diff --git a/venv/lib/python3.10/site-packages/numba/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..064ae983b76649c0dc5f9824b80efb5a3434a399 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..036e1d955e199c10cacd8b3b87bc9ca05ee7498b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/__pycache__/_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d60d4c4fcbb1a9755ccaab7905762791375aa538 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/__pycache__/_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/__pycache__/extending.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/__pycache__/extending.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95f8408c00b56253fb9029e19dfa98ef0028087a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/__pycache__/extending.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/__pycache__/runtests.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/__pycache__/runtests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fada1d11b8f424a500770155da170ca8f9d8187 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/__pycache__/runtests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/printimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/printimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab9f6a1fb5c7697c3ae00cf5f74f289cc10448ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/printimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff76dc4acf3522fbbfc8556ce563e5d5d66da4e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/simulator_init.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/simulator_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a1b18d428f20e20fe8cdd5226d3480b27e63a04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/simulator_init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/stubs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/stubs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bbb069bc787b59fb4a1ffe43d96a9db19b13511 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/stubs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/target.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fc91e366d83bbf6971064ffe84d1f8929e62da6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/target.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3314292ec4d6a24447ead6a8c04261084e48cdbc Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16c45de44b28eac989ba4ae4b21fe3a4da016bda Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/ufuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/ufuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8997634ca4d4d0785c40ec51d0b863a0d57c8ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/ufuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vector_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vector_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551c5446dda56552888e471be20b9c1cd393e8b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vector_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vectorizers.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vectorizers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69366015272c8f02b68b3f16132c734f5f2298ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vectorizers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/POST.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/POST.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a8fd72d8edea30cc9f4808400fa8e4105c3dc54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/POST.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..194a947258d310385dc8b7ace441f87fc0330c2c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/appdirs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/appdirs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be03ee468758459be2795ac185d99db962a4c66a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/appdirs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/cffiimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/cffiimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d65bda5ad60d3cdf180c7eb45a069aa3b9b9f976 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/cffiimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/coverage_support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/coverage_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d19c6742fe234ec34fc0f5f6b6a5a874678c31f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/coverage_support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/dump_style.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/dump_style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6c5ce98a7f29b688e72b46c70c5a7edbd61c4a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/dump_style.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/findlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/findlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb1ee175301171d900a57a15982db75ff031afa2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/findlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/firstlinefinder.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/firstlinefinder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f6c99dc4ab6ba16d42061bdcfcc92d874bd90ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/firstlinefinder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_hook.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd2076faa096644934a9ac8cb9d05edf918b12b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_hook.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_print_extension.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_print_extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0aa0b9b05ec005d95338f91844042c551f4a993 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_print_extension.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/init_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/init_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f138be56542fddcaa26277d1630c6eb2792dbae8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/init_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/inspection.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/inspection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec884ec2544c36941a54d251de1f4e5dabb72511 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/inspection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/literal.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/literal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e76f7f6b20cf1bc6a55c0ed28433addc7816fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/literal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/llvm_pass_timings.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/llvm_pass_timings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b663ef08706293c0b2d418ed19fd26161cb88571 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/llvm_pass_timings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/mergesort.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/mergesort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b09a6a491579960059660d87bcdf627bb46af15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/mergesort.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_entry.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_entry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b23b44f23b8ebf12797d76cacb5a7eac18c58c26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_entry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_gdbinfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_gdbinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67bbf836865eb6d1169321ed8f4594f336189904 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_gdbinfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_sysinfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_sysinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deea7638642b770e1069a69af39786e943d8008f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_sysinfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/quicksort.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/quicksort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d9ffee167a015723432e91e9a610e7763d0d6bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/quicksort.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/special.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/special.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6886657b44db361e12efb219c42687a2a127aefb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/special.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/__pycache__/timsort.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/timsort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..642376f1820cba9b1a12ad21abf14af8633d7e0f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/misc/__pycache__/timsort.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/misc/gdb_print_extension.py b/venv/lib/python3.10/site-packages/numba/misc/gdb_print_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..982aacf26e55eb76b852e1fe4ce5ea95a432c45c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/gdb_print_extension.py @@ -0,0 +1,204 @@ +"""gdb printing extension for Numba types. +""" +import re + +try: + import gdb.printing + import gdb +except ImportError: + raise ImportError("GDB python support is not available.") + + +class NumbaArrayPrinter: + + def __init__(self, val): + self.val = val + + def to_string(self): + try: + import numpy as np + HAVE_NUMPY = True + except ImportError: + HAVE_NUMPY = False + + try: + NULL = 0x0 + + # Raw data references, these need unpacking/interpreting. + + # Member "data" is... + # DW_TAG_member of DIDerivedType, tag of DW_TAG_pointer_type + # encoding e.g. DW_ATE_float + data = self.val["data"] + + # Member "itemsize" is... + # DW_TAG_member of DIBasicType encoding DW_ATE_signed + itemsize = self.val["itemsize"] + + # Members "shape" and "strides" are... + # DW_TAG_member of DIDerivedType, the type is a DICompositeType + # (it's a Numba UniTuple) with tag: DW_TAG_array_type, i.e. it's an + # array repr, it has a basetype of e.g. DW_ATE_unsigned and also + # "elements" which are referenced with a DISubrange(count: ) + # to say how many elements are in the array. + rshp = self.val["shape"] + rstrides = self.val["strides"] + + # bool on whether the data is aligned. + is_aligned = False + + # type information decode, simple type: + ty_str = str(self.val.type) + if HAVE_NUMPY and ('aligned' in ty_str or 'Record' in ty_str): + ty_str = ty_str.replace('unaligned ','').strip() + matcher = re.compile(r"array\((Record.*), (.*), (.*)\)\ \(.*") + # NOTE: need to deal with "Alignment" else dtype size is wrong + arr_info = [x.strip() for x in matcher.match(ty_str).groups()] + dtype_str, ndim_str, order_str = arr_info + rstr = r'Record\((.*\[.*\]);([0-9]+);(True|False)' + rstr_match = re.match(rstr, dtype_str) + # balign is unused, it's the alignment + fields, balign, is_aligned_str = rstr_match.groups() + is_aligned = is_aligned_str == 'True' + field_dts = fields.split(',') + struct_entries = [] + for f in field_dts: + splitted = f.split('[') + name = splitted[0] + dt_part = splitted[1:] + if len(dt_part) > 1: + raise TypeError('Unsupported sub-type: %s' % f) + else: + dt_part = dt_part[0] + if "nestedarray" in dt_part: + raise TypeError('Unsupported sub-type: %s' % f) + dt_as_str = dt_part.split(';')[0].split('=')[1] + dtype = np.dtype(dt_as_str) + struct_entries.append((name, dtype)) + # The dtype is actually a record of some sort + dtype_str = struct_entries + else: # simple type + matcher = re.compile(r"array\((.*),(.*),(.*)\)\ \(.*") + arr_info = [x.strip() for x in matcher.match(ty_str).groups()] + dtype_str, ndim_str, order_str = arr_info + # fix up unichr dtype + if 'unichr x ' in dtype_str: + dtype_str = dtype_str[1:-1].replace('unichr x ', '', text, '') + + def begin_module_section(self, modname): + self.print('

', modname, '

') + self.print('') + + def write_supported_item(self, modname, itemname, typename, explained, + sources, alias): + self.print('
  • ') + self.print('{}.{}'.format( + modname, + itemname, + )) + self.print(': {}'.format(typename)) + self.print('
    ', explained, '
    ') + + self.print("
      ") + for tcls, source in sources.items(): + if source: + self.print("
    • ") + impl = source['name'] + sig = source['sig'] + filename = source['filename'] + lines = source['lines'] + self.print( + "

      defined by {}{} at {}:{}-{}

      ".format( + self.escape(impl), self.escape(sig), + self.escape(filename), lines[0], lines[1], + ), + ) + self.print('

      {}

      '.format( + self.escape(source['docstring'] or '') + )) + else: + self.print("
    • {}".format(self.escape(str(tcls)))) + self.print("
    • ") + self.print("
    ") + self.print('
  • ') + + def write_unsupported_item(self, modname, itemname): + self.print('
  • ') + self.print('{}.{}: UNSUPPORTED'.format( + modname, + itemname, + )) + self.print('
  • ') + + def write_statistic(self, stats): + self.print('

    {}

    '.format(stats.describe())) + + +class ReSTFormatter(Formatter): + """Formatter that output ReSTructured text format for Sphinx docs. + """ + def escape(self, text): + return text + + def title(self, text): + self.print(text) + self.print('=' * len(text)) + self.print() + + def begin_module_section(self, modname): + self.print(modname) + self.print('-' * len(modname)) + self.print() + + def end_module_section(self): + self.print() + + def write_supported_item(self, modname, itemname, typename, explained, + sources, alias): + self.print('.. function:: {}.{}'.format(modname, itemname)) + self.print(' :noindex:') + self.print() + + if alias: + self.print(" Alias to: ``{}``".format(alias)) + self.print() + + for tcls, source in sources.items(): + if source: + impl = source['name'] + sig = source['sig'] + filename = source['filename'] + lines = source['lines'] + source_link = github_url.format( + commit=commit, + path=filename, + firstline=lines[0], + lastline=lines[1], + ) + self.print( + " - defined by ``{}{}`` at `{}:{}-{} <{}>`_".format( + impl, sig, filename, lines[0], lines[1], source_link, + ), + ) + + else: + self.print(" - defined by ``{}``".format(str(tcls))) + self.print() + + def write_unsupported_item(self, modname, itemname): + pass + + def write_statistic(self, stat): + if stat.supported == 0: + self.print("This module is not supported.") + else: + msg = "Not showing {} unsupported functions." + self.print(msg.format(stat.unsupported)) + self.print() + self.print(stat.describe()) + self.print() + + +def _format_module_infos(formatter, package_name, mod_sequence, target=None): + """Format modules. + """ + formatter.title('Listings for {}'.format(package_name)) + alias_map = {} # remember object seen to track alias + for mod in mod_sequence: + stat = _Stat() + modname = mod.__name__ + formatter.begin_module_section(formatter.escape(modname)) + for info in inspect_module(mod, target=target, alias=alias_map): + nbtype = info['numba_type'] + if nbtype is not None: + stat.supported += 1 + formatter.write_supported_item( + modname=formatter.escape(info['module'].__name__), + itemname=formatter.escape(info['name']), + typename=formatter.escape(str(nbtype)), + explained=formatter.escape(info['explained']), + sources=info['source_infos'], + alias=info.get('alias'), + ) + + else: + stat.unsupported += 1 + formatter.write_unsupported_item( + modname=formatter.escape(info['module'].__name__), + itemname=formatter.escape(info['name']), + ) + + formatter.write_statistic(stat) + formatter.end_module_section() + + +def write_listings(package_name, filename, output_format): + """Write listing information into a file. + + Parameters + ---------- + package_name : str + Name of the package to inspect. + filename : str + Output filename. Always overwrite. + output_format : str + Support formats are "html" and "rst". + """ + package = __import__(package_name) + if hasattr(package, '__path__'): + mods = list_modules_in_package(package) + else: + mods = [package] + + if output_format == 'html': + with open(filename + '.html', 'w') as fout: + fmtr = HTMLFormatter(fileobj=fout) + _format_module_infos(fmtr, package_name, mods) + elif output_format == 'rst': + with open(filename + '.rst', 'w') as fout: + fmtr = ReSTFormatter(fileobj=fout) + _format_module_infos(fmtr, package_name, mods) + else: + raise ValueError( + "Output format '{}' is not supported".format(output_format)) + + +program_description = """ +Inspect Numba support for a given top-level package. +""".strip() + + +def main(): + parser = argparse.ArgumentParser(description=program_description) + parser.add_argument( + 'package', metavar='package', type=str, + help='Package to inspect', + ) + parser.add_argument( + '--format', dest='format', default='html', + help='Output format; i.e. "html", "rst"', + ) + parser.add_argument( + '--file', dest='file', default='inspector_output', + help='Output filename. Defaults to "inspector_output."', + ) + + args = parser.parse_args() + package_name = args.package + output_format = args.format + filename = args.file + write_listings(package_name, filename, output_format) + + +if __name__ == '__main__': + main() diff --git a/venv/lib/python3.10/site-packages/numba/misc/init_utils.py b/venv/lib/python3.10/site-packages/numba/misc/init_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4b6dc9f0e7a9a98b993c56a15a05b37bbf68a9b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/init_utils.py @@ -0,0 +1,44 @@ +"""Collection of miscellaneous initialization utilities.""" + +from collections import namedtuple + +version_info = namedtuple('version_info', + ('major minor patch short full ' + 'string tuple git_revision')) + + +def generate_version_info(version): + """Process a version string into a structured version_info object. + + Parameters + ---------- + version: str + a string describing the current version + + Returns + ------- + version_info: tuple + structured version information + + See also + -------- + Look at the definition of 'version_info' in this module for details. + + """ + parts = version.split('.') + + def try_int(x): + try: + return int(x) + except ValueError: + return None + major = try_int(parts[0]) if len(parts) >= 1 else None + minor = try_int(parts[1]) if len(parts) >= 2 else None + patch = try_int(parts[2]) if len(parts) >= 3 else None + short = (major, minor) + full = (major, minor, patch) + string = version + tup = tuple(parts) + git_revision = tup[3] if len(tup) >= 4 else None + return version_info(major, minor, patch, short, full, string, tup, + git_revision) diff --git a/venv/lib/python3.10/site-packages/numba/misc/inspection.py b/venv/lib/python3.10/site-packages/numba/misc/inspection.py new file mode 100644 index 0000000000000000000000000000000000000000..a92c9430805ae1d4e388a1ca3222075fe1330847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/inspection.py @@ -0,0 +1,103 @@ +"""Miscellaneous inspection tools +""" +from tempfile import NamedTemporaryFile, TemporaryDirectory +import os +import warnings + +from numba.core.errors import NumbaWarning + + +def disassemble_elf_to_cfg(elf, mangled_symbol): + """ + Gets the CFG of the disassembly of an ELF object, elf, at mangled name, + mangled_symbol, and renders it appropriately depending on the execution + environment (terminal/notebook). + """ + try: + import r2pipe + except ImportError: + raise RuntimeError("r2pipe package needed for disasm CFG") + + def get_rendering(cmd=None): + from numba.pycc.platform import Toolchain # import local, circular ref + if cmd is None: + raise ValueError("No command given") + + with TemporaryDirectory() as tmpdir: + # Write ELF as a temporary file in the temporary dir, do not delete! + with NamedTemporaryFile(delete=False, dir=tmpdir) as f: + f.write(elf) + f.flush() # force write, radare2 needs a binary blob on disk + + # Now try and link the ELF, this helps radare2 _a lot_ + linked = False + try: + raw_dso_name = f'{os.path.basename(f.name)}.so' + linked_dso = os.path.join(tmpdir, raw_dso_name) + tc = Toolchain() + tc.link_shared(linked_dso, (f.name,)) + obj_to_analyse = linked_dso + linked = True + except Exception as e: + # link failed, mention it to user, radare2 will still be able to + # analyse the object, but things like dwarf won't appear in the + # asm as comments. + msg = ('Linking the ELF object with the distutils toolchain ' + f'failed with: {e}. Disassembly will still work but ' + 'might be less accurate and will not use DWARF ' + 'information.') + warnings.warn(NumbaWarning(msg)) + obj_to_analyse = f.name + + # catch if r2pipe can actually talk to radare2 + try: + flags = ['-2', # close stderr to hide warnings + '-e io.cache=true', # fix relocations in disassembly + '-e scr.color=1', # 16 bit ANSI colour terminal + '-e asm.dwarf=true', # DWARF decode + '-e scr.utf8=true', # UTF8 output looks better + ] + r = r2pipe.open(obj_to_analyse, flags=flags) + r.cmd('aaaaaa') # analyse as much as possible + # If the elf is linked then it's necessary to seek as the + # DSO ctor/dtor is at the default position + if linked: + # r2 only matches up to 61 chars?! found this by experiment! + mangled_symbol_61char = mangled_symbol[:61] + # switch off demangle, the seek is on a mangled symbol + r.cmd('e bin.demangle=false') + # seek to the mangled symbol address + r.cmd(f's `is~ {mangled_symbol_61char}[1]`') + # switch demangling back on for output purposes + r.cmd('e bin.demangle=true') + data = r.cmd('%s' % cmd) # print graph + r.quit() + except Exception as e: + if "radare2 in PATH" in str(e): + msg = ("This feature requires 'radare2' to be " + "installed and available on the system see: " + "https://github.com/radareorg/radare2. " + "Cannot find 'radare2' in $PATH.") + raise RuntimeError(msg) + else: + raise e + return data + + class DisasmCFG(object): + + def _repr_svg_(self): + try: + import graphviz + except ImportError: + raise RuntimeError("graphviz package needed for disasm CFG") + jupyter_rendering = get_rendering(cmd='agfd') + # this just makes it read slightly better in jupyter notebooks + jupyter_rendering.replace('fontname="Courier",', + 'fontname="Courier",fontsize=6,') + src = graphviz.Source(jupyter_rendering) + return src.pipe('svg').decode('UTF-8') + + def __repr__(self): + return get_rendering(cmd='agf') + + return DisasmCFG() diff --git a/venv/lib/python3.10/site-packages/numba/misc/literal.py b/venv/lib/python3.10/site-packages/numba/misc/literal.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc1225b7e4b8428854b5f39b26a1f0ab6766d30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/literal.py @@ -0,0 +1,24 @@ +from numba.core.extending import overload +from numba.core import types +from numba.misc.special import literally, literal_unroll +from numba.core.errors import TypingError + + +@overload(literally) +def _ov_literally(obj): + if isinstance(obj, (types.Literal, types.InitialValue)): + return lambda obj: obj + else: + m = "Invalid use of non-Literal type in literally({})".format(obj) + raise TypingError(m) + + +@overload(literal_unroll) +def literal_unroll_impl(container): + if isinstance(container, types.Poison): + m = f"Invalid use of non-Literal type in literal_unroll({container})" + raise TypingError(m) + + def impl(container): + return container + return impl diff --git a/venv/lib/python3.10/site-packages/numba/misc/llvm_pass_timings.py b/venv/lib/python3.10/site-packages/numba/misc/llvm_pass_timings.py new file mode 100644 index 0000000000000000000000000000000000000000..17c52dbf48360f68e780c5207bb83b5d4c7d73ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/llvm_pass_timings.py @@ -0,0 +1,409 @@ +import re +import operator +import heapq +from collections import namedtuple +from collections.abc import Sequence +from contextlib import contextmanager +from functools import cached_property + +from numba.core import config + +import llvmlite.binding as llvm + + +class RecordLLVMPassTimings: + """A helper context manager to track LLVM pass timings. + """ + + __slots__ = ["_data"] + + def __enter__(self): + """Enables the pass timing in LLVM. + """ + llvm.set_time_passes(True) + return self + + def __exit__(self, exc_val, exc_type, exc_tb): + """Reset timings and save report internally. + """ + self._data = llvm.report_and_reset_timings() + llvm.set_time_passes(False) + return + + def get(self): + """Retrieve timing data for processing. + + Returns + ------- + timings: ProcessedPassTimings + """ + return ProcessedPassTimings(self._data) + + +PassTimingRecord = namedtuple( + "PassTimingRecord", + [ + "user_time", + "user_percent", + "system_time", + "system_percent", + "user_system_time", + "user_system_percent", + "wall_time", + "wall_percent", + "pass_name", + "instruction", + ], +) + + +def _adjust_timings(records): + """Adjust timing records because of truncated information. + + Details: The percent information can be used to improve the timing + information. + + Returns + ------- + res: List[PassTimingRecord] + """ + total_rec = records[-1] + assert total_rec.pass_name == "Total" # guard for implementation error + + def make_adjuster(attr): + time_attr = f"{attr}_time" + percent_attr = f"{attr}_percent" + time_getter = operator.attrgetter(time_attr) + + def adjust(d): + """Compute percent x total_time = adjusted""" + total = time_getter(total_rec) + adjusted = total * d[percent_attr] * 0.01 + d[time_attr] = adjusted + return d + + return adjust + + # Make adjustment functions for each field + adj_fns = [ + make_adjuster(x) for x in ["user", "system", "user_system", "wall"] + ] + + # Extract dictionaries from the namedtuples + dicts = map(lambda x: x._asdict(), records) + + def chained(d): + # Chain the adjustment functions + for fn in adj_fns: + d = fn(d) + # Reconstruct the namedtuple + return PassTimingRecord(**d) + + return list(map(chained, dicts)) + + +class ProcessedPassTimings: + """A class for processing raw timing report from LLVM. + + The processing is done lazily so we don't waste time processing unused + timing information. + """ + + def __init__(self, raw_data): + self._raw_data = raw_data + + def __bool__(self): + return bool(self._raw_data) + + def get_raw_data(self): + """Returns the raw string data. + + Returns + ------- + res: str + """ + return self._raw_data + + def get_total_time(self): + """Compute the total time spend in all passes. + + Returns + ------- + res: float + """ + return self.list_records()[-1].wall_time + + def list_records(self): + """Get the processed data for the timing report. + + Returns + ------- + res: List[PassTimingRecord] + """ + return self._processed + + def list_top(self, n): + """Returns the top(n) most time-consuming (by wall-time) passes. + + Parameters + ---------- + n: int + This limits the maximum number of items to show. + This function will show the ``n`` most time-consuming passes. + + Returns + ------- + res: List[PassTimingRecord] + Returns the top(n) most time-consuming passes in descending order. + """ + records = self.list_records() + key = operator.attrgetter("wall_time") + return heapq.nlargest(n, records[:-1], key) + + def summary(self, topn=5, indent=0): + """Return a string summarizing the timing information. + + Parameters + ---------- + topn: int; optional + This limits the maximum number of items to show. + This function will show the ``topn`` most time-consuming passes. + indent: int; optional + Set the indentation level. Defaults to 0 for no indentation. + + Returns + ------- + res: str + """ + buf = [] + prefix = " " * indent + + def ap(arg): + buf.append(f"{prefix}{arg}") + + ap(f"Total {self.get_total_time():.4f}s") + ap("Top timings:") + for p in self.list_top(topn): + ap(f" {p.wall_time:.4f}s ({p.wall_percent:5}%) {p.pass_name}") + return "\n".join(buf) + + @cached_property + def _processed(self): + """A cached property for lazily processing the data and returning it. + + See ``_process()`` for details. + """ + return self._process() + + def _process(self): + """Parses the raw string data from LLVM timing report and attempts + to improve the data by recomputing the times + (See `_adjust_timings()``). + """ + + def parse(raw_data): + """A generator that parses the raw_data line-by-line to extract + timing information for each pass. + """ + lines = raw_data.splitlines() + colheader = r"[a-zA-Z+ ]+" + # Take at least one column header. + multicolheaders = fr"(?:\s*-+{colheader}-+)+" + + line_iter = iter(lines) + # find column headers + header_map = { + "User Time": "user", + "System Time": "system", + "User+System": "user_system", + "Wall Time": "wall", + "Instr": "instruction", + "Name": "pass_name", + } + for ln in line_iter: + m = re.match(multicolheaders, ln) + if m: + # Get all the column headers + raw_headers = re.findall(r"[a-zA-Z][a-zA-Z+ ]+", ln) + headers = [header_map[k.strip()] for k in raw_headers] + break + + assert headers[-1] == 'pass_name' + # compute the list of available attributes from the column headers + attrs = [] + n = r"\s*((?:[0-9]+\.)?[0-9]+)" + pat = "" + for k in headers[:-1]: + if k == "instruction": + pat += n + else: + attrs.append(f"{k}_time") + attrs.append(f"{k}_percent") + pat += rf"\s+(?:{n}\s*\({n}%\)|-+)" + + # put default value 0.0 to all missing attributes + missing = {} + for k in PassTimingRecord._fields: + if k not in attrs and k != 'pass_name': + missing[k] = 0.0 + # parse timings + pat += r"\s*(.*)" + for ln in line_iter: + m = re.match(pat, ln) + if m is not None: + raw_data = list(m.groups()) + data = {k: float(v) if v is not None else 0.0 + for k, v in zip(attrs, raw_data)} + data.update(missing) + pass_name = raw_data[-1] + rec = PassTimingRecord( + pass_name=pass_name, **data, + ) + yield rec + if rec.pass_name == "Total": + # "Total" means the report has ended + break + # Check that we have reach the end of the report + remaining = '\n'.join(line_iter) + if remaining: + raise ValueError( + f"unexpected text after parser finished:\n{remaining}" + ) + + # Parse raw data + records = list(parse(self._raw_data)) + return _adjust_timings(records) + + +NamedTimings = namedtuple("NamedTimings", ["name", "timings"]) + + +class PassTimingsCollection(Sequence): + """A collection of pass timings. + + This class implements the ``Sequence`` protocol for accessing the + individual timing records. + """ + + def __init__(self, name): + self._name = name + self._records = [] + + @contextmanager + def record(self, name): + """Record new timings and append to this collection. + + Note: this is mainly for internal use inside the compiler pipeline. + + See also ``RecordLLVMPassTimings`` + + Parameters + ---------- + name: str + Name for the records. + """ + if config.LLVM_PASS_TIMINGS: + # Recording of pass timings is enabled + with RecordLLVMPassTimings() as timings: + yield + rec = timings.get() + # Only keep non-empty records + if rec: + self._append(name, rec) + else: + # Do nothing. Recording of pass timings is disabled. + yield + + def _append(self, name, timings): + """Append timing records + + Parameters + ---------- + name: str + Name for the records. + timings: ProcessedPassTimings + the timing records. + """ + self._records.append(NamedTimings(name, timings)) + + def get_total_time(self): + """Computes the sum of the total time across all contained timings. + + Returns + ------- + res: float or None + Returns the total number of seconds or None if no timings were + recorded + """ + if self._records: + return sum(r.timings.get_total_time() for r in self._records) + else: + return None + + def list_longest_first(self): + """Returns the timings in descending order of total time duration. + + Returns + ------- + res: List[ProcessedPassTimings] + """ + return sorted(self._records, + key=lambda x: x.timings.get_total_time(), + reverse=True) + + @property + def is_empty(self): + """ + """ + return not self._records + + def summary(self, topn=5): + """Return a string representing the summary of the timings. + + Parameters + ---------- + topn: int; optional, default=5. + This limits the maximum number of items to show. + This function will show the ``topn`` most time-consuming passes. + + Returns + ------- + res: str + + See also ``ProcessedPassTimings.summary()`` + """ + if self.is_empty: + return "No pass timings were recorded" + else: + buf = [] + ap = buf.append + ap(f"Printing pass timings for {self._name}") + overall_time = self.get_total_time() + ap(f"Total time: {overall_time:.4f}") + for i, r in enumerate(self._records): + ap(f"== #{i} {r.name}") + percent = r.timings.get_total_time() / overall_time * 100 + ap(f" Percent: {percent:.1f}%") + ap(r.timings.summary(topn=topn, indent=1)) + return "\n".join(buf) + + def __getitem__(self, i): + """Get the i-th timing record. + + Returns + ------- + res: (name, timings) + A named tuple with two fields: + + - name: str + - timings: ProcessedPassTimings + """ + return self._records[i] + + def __len__(self): + """Length of this collection. + """ + return len(self._records) + + def __str__(self): + return self.summary() diff --git a/venv/lib/python3.10/site-packages/numba/misc/mergesort.py b/venv/lib/python3.10/site-packages/numba/misc/mergesort.py new file mode 100644 index 0000000000000000000000000000000000000000..cba5f3318b7ef55398df4f5e921e1f2c7a68aed3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/mergesort.py @@ -0,0 +1,126 @@ +""" +The same algorithm as translated from numpy. +See numpy/core/src/npysort/mergesort.c.src. +The high-level numba code is adding a little overhead comparing to +the pure-C implementation in numpy. +""" +import numpy as np +from collections import namedtuple + +# Array size smaller than this will be sorted by insertion sort +SMALL_MERGESORT = 20 + + +MergesortImplementation = namedtuple('MergesortImplementation', [ + 'run_mergesort', +]) + + +def make_mergesort_impl(wrap, lt=None, is_argsort=False): + kwargs_lite = dict(no_cpython_wrapper=True, _nrt=False) + + # The less than + if lt is None: + @wrap(**kwargs_lite) + def lt(a, b): + return a < b + else: + lt = wrap(**kwargs_lite)(lt) + + if is_argsort: + @wrap(**kwargs_lite) + def lessthan(a, b, vals): + return lt(vals[a], vals[b]) + else: + @wrap(**kwargs_lite) + def lessthan(a, b, vals): + return lt(a, b) + + @wrap(**kwargs_lite) + def argmergesort_inner(arr, vals, ws): + """The actual mergesort function + + Parameters + ---------- + arr : array [read+write] + The values being sorted inplace. For argsort, this is the + indices. + vals : array [readonly] + ``None`` for normal sort. In argsort, this is the actual array values. + ws : array [write] + The workspace. Must be of size ``arr.size // 2`` + """ + if arr.size > SMALL_MERGESORT: + # Merge sort + mid = arr.size // 2 + + argmergesort_inner(arr[:mid], vals, ws) + argmergesort_inner(arr[mid:], vals, ws) + + # Copy left half into workspace so we don't overwrite it + for i in range(mid): + ws[i] = arr[i] + + # Merge + left = ws[:mid] + right = arr[mid:] + out = arr + + i = j = k = 0 + while i < left.size and j < right.size: + if not lessthan(right[j], left[i], vals): + out[k] = left[i] + i += 1 + else: + out[k] = right[j] + j += 1 + k += 1 + + # Leftovers + while i < left.size: + out[k] = left[i] + i += 1 + k += 1 + + while j < right.size: + out[k] = right[j] + j += 1 + k += 1 + else: + # Insertion sort + i = 1 + while i < arr.size: + j = i + while j > 0 and lessthan(arr[j], arr[j - 1], vals): + arr[j - 1], arr[j] = arr[j], arr[j - 1] + j -= 1 + i += 1 + + # The top-level entry points + + @wrap(no_cpython_wrapper=True) + def mergesort(arr): + "Inplace" + ws = np.empty(arr.size // 2, dtype=arr.dtype) + argmergesort_inner(arr, None, ws) + return arr + + + @wrap(no_cpython_wrapper=True) + def argmergesort(arr): + "Out-of-place" + idxs = np.arange(arr.size) + ws = np.empty(arr.size // 2, dtype=idxs.dtype) + argmergesort_inner(idxs, arr, ws) + return idxs + + return MergesortImplementation( + run_mergesort=(argmergesort if is_argsort else mergesort) + ) + + +def make_jit_mergesort(*args, **kwargs): + from numba import njit + # NOTE: wrap with njit to allow recursion + # because @register_jitable => @overload doesn't support recursion + return make_mergesort_impl(njit, *args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/numba/misc/numba_entry.py b/venv/lib/python3.10/site-packages/numba/misc/numba_entry.py new file mode 100644 index 0000000000000000000000000000000000000000..b95f9dd06b44a23b84ac7efc508f41718eb40b92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/numba_entry.py @@ -0,0 +1,72 @@ +import sys +import argparse +import os +import subprocess +import json + +from .numba_sysinfo import display_sysinfo, get_sysinfo +from .numba_gdbinfo import display_gdbinfo + + +def make_parser(): + parser = argparse.ArgumentParser() + parser.add_argument('--annotate', help='Annotate source', + action='store_true') + parser.add_argument('--dump-llvm', action="store_true", + help='Print generated llvm assembly') + parser.add_argument('--dump-optimized', action='store_true', + help='Dump the optimized llvm assembly') + parser.add_argument('--dump-assembly', action='store_true', + help='Dump the LLVM generated assembly') + parser.add_argument('--annotate-html', nargs=1, + help='Output source annotation as html') + parser.add_argument('-s', '--sysinfo', action="store_true", + help='Output system information for bug reporting') + parser.add_argument('-g', '--gdbinfo', action="store_true", + help='Output system information about gdb') + parser.add_argument('--sys-json', nargs=1, + help='Saves the system info dict as a json file') + parser.add_argument('filename', nargs='?', help='Python source filename') + return parser + + +def main(): + parser = make_parser() + args = parser.parse_args() + + if args.sysinfo: + print("System info:") + display_sysinfo() + + if args.gdbinfo: + print("GDB info:") + display_gdbinfo() + + if args.sysinfo or args.gdbinfo: + sys.exit(0) + + if args.sys_json: + info = get_sysinfo() + info.update({'Start': info['Start'].isoformat()}) + info.update({'Start UTC': info['Start UTC'].isoformat()}) + with open(args.sys_json[0], 'w') as f: + json.dump(info, f, indent=4) + sys.exit(0) + + os.environ['NUMBA_DUMP_ANNOTATION'] = str(int(args.annotate)) + if args.annotate_html is not None: + try: + from jinja2 import Template + except ImportError: + raise ImportError("Please install the 'jinja2' package") + os.environ['NUMBA_DUMP_HTML'] = str(args.annotate_html[0]) + os.environ['NUMBA_DUMP_LLVM'] = str(int(args.dump_llvm)) + os.environ['NUMBA_DUMP_OPTIMIZED'] = str(int(args.dump_optimized)) + os.environ['NUMBA_DUMP_ASSEMBLY'] = str(int(args.dump_assembly)) + + if args.filename: + cmd = [sys.executable, args.filename] + subprocess.call(cmd) + else: + print("numba: error: the following arguments are required: filename") + sys.exit(1) diff --git a/venv/lib/python3.10/site-packages/numba/misc/numba_gdbinfo.py b/venv/lib/python3.10/site-packages/numba/misc/numba_gdbinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..db92cd24b6c120368b0560497bd28d75bba5c37d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/numba_gdbinfo.py @@ -0,0 +1,161 @@ +"""Module for displaying information about Numba's gdb set up""" +from collections import namedtuple +import os +import re +import subprocess +from textwrap import dedent +from numba import config + +# Container for the output of the gdb info data collection +_fields = ('binary_loc, extension_loc, py_ver, np_ver, supported') +_gdb_info = namedtuple('_gdb_info', _fields) + + +class _GDBTestWrapper(): + """Wraps the gdb binary and has methods for checking what the gdb binary + has support for (Python and NumPy).""" + + def __init__(self,): + gdb_binary = config.GDB_BINARY + if gdb_binary is None: + msg = ("No valid binary could be found for gdb named: " + f"{config.GDB_BINARY}") + raise ValueError(msg) + self._gdb_binary = gdb_binary + + def _run_cmd(self, cmd=()): + gdb_call = [self.gdb_binary, '-q',] + for x in cmd: + gdb_call.append('-ex') + gdb_call.append(x) + gdb_call.extend(['-ex', 'q']) + return subprocess.run(gdb_call, capture_output=True, timeout=10, + text=True) + + @property + def gdb_binary(self): + return self._gdb_binary + + @classmethod + def success(cls, status): + return status.returncode == 0 + + def check_launch(self): + """Checks that gdb will launch ok""" + return self._run_cmd() + + def check_python(self): + cmd = ("python from __future__ import print_function; " + "import sys; print(sys.version_info[:2])") + return self._run_cmd((cmd,)) + + def check_numpy(self): + cmd = ("python from __future__ import print_function; " + "import types; import numpy; " + "print(isinstance(numpy, types.ModuleType))") + return self._run_cmd((cmd,)) + + def check_numpy_version(self): + cmd = ("python from __future__ import print_function; " + "import types; import numpy;" + "print(numpy.__version__)") + return self._run_cmd((cmd,)) + + +def collect_gdbinfo(): + """Prints information to stdout about the gdb setup that Numba has found""" + + # State flags: + gdb_state = None + gdb_has_python = False + gdb_has_numpy = False + gdb_python_version = 'No Python support' + gdb_python_numpy_version = "No NumPy support" + + # There are so many ways for gdb to not be working as expected. Surround + # the "is it working" tests with try/except and if there's an exception + # store it for processing later. + try: + # Check gdb exists + gdb_wrapper = _GDBTestWrapper() + + # Check gdb works + status = gdb_wrapper.check_launch() + if not gdb_wrapper.success(status): + msg = (f"gdb at '{gdb_wrapper.gdb_binary}' does not appear to work." + f"\nstdout: {status.stdout}\nstderr: {status.stderr}") + raise ValueError(msg) + gdb_state = gdb_wrapper.gdb_binary + except Exception as e: + gdb_state = f"Testing gdb binary failed. Reported Error: {e}" + else: + # Got this far, so gdb works, start checking what it supports + status = gdb_wrapper.check_python() + if gdb_wrapper.success(status): + version_match = re.match(r'\((\d+),\s+(\d+)\)', + status.stdout.strip()) + if version_match is not None: + pymajor, pyminor = version_match.groups() + gdb_python_version = f"{pymajor}.{pyminor}" + gdb_has_python = True + + status = gdb_wrapper.check_numpy() + if gdb_wrapper.success(status): + if "Traceback" not in status.stderr.strip(): + if status.stdout.strip() == 'True': + gdb_has_numpy = True + gdb_python_numpy_version = "Unknown" + # NumPy is present find the version + status = gdb_wrapper.check_numpy_version() + if gdb_wrapper.success(status): + if "Traceback" not in status.stderr.strip(): + gdb_python_numpy_version = \ + status.stdout.strip() + + # Work out what level of print-extension support is present in this gdb + if gdb_has_python: + if gdb_has_numpy: + print_ext_supported = "Full (Python and NumPy supported)" + else: + print_ext_supported = "Partial (Python only, no NumPy support)" + else: + print_ext_supported = "None" + + # Work out print ext location + print_ext_file = "gdb_print_extension.py" + print_ext_path = os.path.join(os.path.dirname(__file__), print_ext_file) + + # return! + return _gdb_info(gdb_state, print_ext_path, gdb_python_version, + gdb_python_numpy_version, print_ext_supported) + + +def display_gdbinfo(sep_pos=45): + """Displays the information collected by collect_gdbinfo. + """ + gdb_info = collect_gdbinfo() + print('-' * 80) + fmt = f'%-{sep_pos}s : %-s' + # Display the information + print(fmt % ("Binary location", gdb_info.binary_loc)) + print(fmt % ("Print extension location", gdb_info.extension_loc)) + print(fmt % ("Python version", gdb_info.py_ver)) + print(fmt % ("NumPy version", gdb_info.np_ver)) + print(fmt % ("Numba printing extension support", gdb_info.supported)) + + print("") + print("To load the Numba gdb printing extension, execute the following " + "from the gdb prompt:") + print(f"\nsource {gdb_info.extension_loc}\n") + print('-' * 80) + warn = """ + ============================================================= + IMPORTANT: Before sharing you should remove any information + in the above that you wish to keep private e.g. paths. + ============================================================= + """ + print(dedent(warn)) + + +if __name__ == '__main__': + display_gdbinfo() diff --git a/venv/lib/python3.10/site-packages/numba/misc/numba_sysinfo.py b/venv/lib/python3.10/site-packages/numba/misc/numba_sysinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..75ff27491edd39648d5d93545133824adaea1756 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/numba_sysinfo.py @@ -0,0 +1,698 @@ +import json +import locale +import multiprocessing +import os +import platform +import textwrap +import sys +from contextlib import redirect_stdout +from datetime import datetime +from io import StringIO +from subprocess import check_output, PIPE, CalledProcessError +import numpy as np +import llvmlite.binding as llvmbind +from llvmlite import __version__ as llvmlite_version +from numba import cuda as cu, __version__ as version_number +from numba.cuda import cudadrv +from numba.cuda.cudadrv.driver import driver as cudriver +from numba.cuda.cudadrv.runtime import runtime as curuntime +from numba.core import config + +_psutil_import = False +try: + import psutil +except ImportError: + pass +else: + _psutil_import = True + +__all__ = ['get_sysinfo', 'display_sysinfo'] + +# Keys of a `sysinfo` dictionary + +# Time info +_start, _start_utc, _runtime = 'Start', 'Start UTC', 'Runtime' +_numba_version = 'Numba Version' +# Hardware info +_machine = 'Machine' +_cpu_name, _cpu_count = 'CPU Name', 'CPU Count' +_cpus_allowed, _cpus_list = 'CPUs Allowed', 'List CPUs Allowed' +_cpu_features = 'CPU Features' +_cfs_quota, _cfs_period = 'CFS Quota', 'CFS Period', +_cfs_restrict = 'CFS Restriction' +_mem_total, _mem_available = 'Mem Total', 'Mem Available' +# OS info +_platform_name, _platform_release = 'Platform Name', 'Platform Release' +_os_name, _os_version = 'OS Name', 'OS Version' +_os_spec_version = 'OS Specific Version' +_libc_version = 'Libc Version' +# Python info +_python_comp = 'Python Compiler' +_python_impl = 'Python Implementation' +_python_version = 'Python Version' +_python_locale = 'Python Locale' +# LLVM info +_llvmlite_version = 'llvmlite Version' +_llvm_version = 'LLVM Version' +# CUDA info +_cu_target_impl = 'CUDA Target Impl' +_cu_dev_init = 'CUDA Device Init' +_cu_drv_ver = 'CUDA Driver Version' +_cu_rt_ver = 'CUDA Runtime Version' +_cu_nvidia_bindings = 'NVIDIA CUDA Bindings' +_cu_nvidia_bindings_used = 'NVIDIA CUDA Bindings In Use' +_cu_detect_out, _cu_lib_test = 'CUDA Detect Output', 'CUDA Lib Test' +_cu_mvc_available = 'NVIDIA CUDA Minor Version Compatibility Available' +_cu_mvc_needed = 'NVIDIA CUDA Minor Version Compatibility Needed' +_cu_mvc_in_use = 'NVIDIA CUDA Minor Version Compatibility In Use' +# NumPy info +_numpy_version = 'NumPy Version' +_numpy_supported_simd_features = 'NumPy Supported SIMD features' +_numpy_supported_simd_dispatch = 'NumPy Supported SIMD dispatch' +_numpy_supported_simd_baseline = 'NumPy Supported SIMD baseline' +_numpy_AVX512_SKX_detected = 'NumPy AVX512_SKX detected' +# SVML info +_svml_state, _svml_loaded = 'SVML State', 'SVML Lib Loaded' +_llvm_svml_patched = 'LLVM SVML Patched' +_svml_operational = 'SVML Operational' +# Threading layer info +_tbb_thread, _tbb_error = 'TBB Threading', 'TBB Threading Error' +_openmp_thread, _openmp_error = 'OpenMP Threading', 'OpenMP Threading Error' +_openmp_vendor = 'OpenMP vendor' +_wkq_thread, _wkq_error = 'Workqueue Threading', 'Workqueue Threading Error' +# Numba info +_numba_env_vars = 'Numba Env Vars' +# Conda info +_conda_build_ver, _conda_env_ver = 'Conda Build', 'Conda Env' +_conda_platform, _conda_python_ver = 'Conda Platform', 'Conda Python Version' +_conda_root_writable = 'Conda Root Writable' +# Packages info +_inst_pkg = 'Installed Packages' +# Psutil info +_psutil = 'Psutil Available' +# Errors and warnings +_errors = 'Errors' +_warnings = 'Warnings' + +# Error and warning log +_error_log = [] +_warning_log = [] + + +def get_os_spec_info(os_name): + # Linux man page for `/proc`: + # http://man7.org/linux/man-pages/man5/proc.5.html + + # Windows documentation for `wmic OS`: + # https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/cim-operatingsystem + + # MacOS man page for `sysctl`: + # https://www.unix.com/man-page/osx/3/sysctl/ + # MacOS man page for `vm_stat`: + # https://www.unix.com/man-page/osx/1/vm_stat/ + + class CmdBufferOut(tuple): + buffer_output_flag = True + + class CmdReadFile(tuple): + read_file_flag = True + + shell_params = { + 'Linux': { + 'cmd': ( + CmdReadFile(('/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us',)), + CmdReadFile(('/sys/fs/cgroup/cpuacct/cpu.cfs_period_us',)), + ), + 'cmd_optional': ( + CmdReadFile(('/proc/meminfo',)), + CmdReadFile(('/proc/self/status',)), + ), + 'kwds': { + # output string fragment -> result dict key + 'MemTotal:': _mem_total, + 'MemAvailable:': _mem_available, + 'Cpus_allowed:': _cpus_allowed, + 'Cpus_allowed_list:': _cpus_list, + '/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us': _cfs_quota, + '/sys/fs/cgroup/cpuacct/cpu.cfs_period_us': _cfs_period, + }, + }, + 'Windows': { + 'cmd': (), + 'cmd_optional': ( + CmdBufferOut(('wmic', 'OS', 'get', 'TotalVirtualMemorySize')), + CmdBufferOut(('wmic', 'OS', 'get', 'FreeVirtualMemory')), + ), + 'kwds': { + # output string fragment -> result dict key + 'TotalVirtualMemorySize': _mem_total, + 'FreeVirtualMemory': _mem_available, + }, + }, + 'Darwin': { + 'cmd': (), + 'cmd_optional': ( + ('sysctl', 'hw.memsize'), + ('vm_stat'), + ), + 'kwds': { + # output string fragment -> result dict key + 'hw.memsize:': _mem_total, + 'free:': _mem_available, + }, + 'units': { + _mem_total: 1, # Size is given in bytes. + _mem_available: 4096, # Size is given in 4kB pages. + }, + }, + } + + os_spec_info = {} + params = shell_params.get(os_name, {}) + cmd_selected = params.get('cmd', ()) + + if _psutil_import: + vm = psutil.virtual_memory() + os_spec_info.update({ + _mem_total: vm.total, + _mem_available: vm.available, + }) + p = psutil.Process() + cpus_allowed = p.cpu_affinity() if hasattr(p, 'cpu_affinity') else [] + if cpus_allowed: + os_spec_info[_cpus_allowed] = len(cpus_allowed) + os_spec_info[_cpus_list] = ' '.join(str(n) for n in cpus_allowed) + + else: + _warning_log.append( + "Warning (psutil): psutil cannot be imported. " + "For more accuracy, consider installing it.") + # Fallback to internal heuristics + cmd_selected += params.get('cmd_optional', ()) + + # Assuming the shell cmd returns a unique (k, v) pair per line + # or a unique (k, v) pair spread over several lines: + # Gather output in a list of strings containing a keyword and some value. + output = [] + for cmd in cmd_selected: + if hasattr(cmd, 'read_file_flag'): + # Open file within Python + if os.path.exists(cmd[0]): + try: + with open(cmd[0], 'r') as f: + out = f.readlines() + if out: + out[0] = ' '.join((cmd[0], out[0])) + output.extend(out) + except OSError as e: + _error_log.append(f'Error (file read): {e}') + continue + else: + _warning_log.append('Warning (no file): {}'.format(cmd[0])) + continue + else: + # Spawn a subprocess + try: + out = check_output(cmd, stderr=PIPE) + except (OSError, CalledProcessError) as e: + _error_log.append(f'Error (subprocess): {e}') + continue + if hasattr(cmd, 'buffer_output_flag'): + out = b' '.join(line for line in out.splitlines()) + b'\n' + output.extend(out.decode().splitlines()) + + # Extract (k, output) pairs by searching for keywords in output + kwds = params.get('kwds', {}) + for line in output: + match = kwds.keys() & line.split() + if match and len(match) == 1: + k = kwds[match.pop()] + os_spec_info[k] = line + elif len(match) > 1: + print(f'Ambiguous output: {line}') + + # Try to extract something meaningful from output string + def format(): + # CFS restrictions + split = os_spec_info.get(_cfs_quota, '').split() + if split: + os_spec_info[_cfs_quota] = float(split[-1]) + split = os_spec_info.get(_cfs_period, '').split() + if split: + os_spec_info[_cfs_period] = float(split[-1]) + if os_spec_info.get(_cfs_quota, -1) != -1: + cfs_quota = os_spec_info.get(_cfs_quota, '') + cfs_period = os_spec_info.get(_cfs_period, '') + runtime_amount = cfs_quota / cfs_period + os_spec_info[_cfs_restrict] = runtime_amount + + def format_optional(): + # Memory + units = {_mem_total: 1024, _mem_available: 1024} + units.update(params.get('units', {})) + for k in (_mem_total, _mem_available): + digits = ''.join(d for d in os_spec_info.get(k, '') if d.isdigit()) + os_spec_info[k] = int(digits or 0) * units[k] + # Accessible CPUs + split = os_spec_info.get(_cpus_allowed, '').split() + if split: + n = split[-1] + n = n.split(',')[-1] + os_spec_info[_cpus_allowed] = str(bin(int(n or 0, 16))).count('1') + split = os_spec_info.get(_cpus_list, '').split() + if split: + os_spec_info[_cpus_list] = split[-1] + + try: + format() + if not _psutil_import: + format_optional() + except Exception as e: + _error_log.append(f'Error (format shell output): {e}') + + # Call OS specific functions + os_specific_funcs = { + 'Linux': { + _libc_version: lambda: ' '.join(platform.libc_ver()) + }, + 'Windows': { + _os_spec_version: lambda: ' '.join( + s for s in platform.win32_ver()), + }, + 'Darwin': { + _os_spec_version: lambda: ''.join( + i or ' ' for s in tuple(platform.mac_ver()) for i in s), + }, + } + key_func = os_specific_funcs.get(os_name, {}) + os_spec_info.update({k: f() for k, f in key_func.items()}) + return os_spec_info + + +def get_sysinfo(): + + # Gather the information that shouldn't raise exceptions + sys_info = { + _start: datetime.now(), + _start_utc: datetime.utcnow(), + _machine: platform.machine(), + _cpu_name: llvmbind.get_host_cpu_name(), + _cpu_count: multiprocessing.cpu_count(), + _platform_name: platform.platform(aliased=True), + _platform_release: platform.release(), + _os_name: platform.system(), + _os_version: platform.version(), + _python_comp: platform.python_compiler(), + _python_impl: platform.python_implementation(), + _python_version: platform.python_version(), + _numba_env_vars: {k: v for (k, v) in os.environ.items() + if k.startswith('NUMBA_')}, + _numba_version: version_number, + _llvm_version: '.'.join(str(i) for i in llvmbind.llvm_version_info), + _llvmlite_version: llvmlite_version, + _psutil: _psutil_import, + } + + # CPU features + try: + feature_map = llvmbind.get_host_cpu_features() + except RuntimeError as e: + _error_log.append(f'Error (CPU features): {e}') + else: + features = sorted([key for key, value in feature_map.items() if value]) + sys_info[_cpu_features] = ' '.join(features) + + # Python locale + # On MacOSX, getdefaultlocale can raise. Check again if Py > 3.7.5 + try: + # If $LANG is unset, getdefaultlocale() can return (None, None), make + # sure we can encode this as strings by casting explicitly. + sys_info[_python_locale] = '.'.join([str(i) for i in + locale.getdefaultlocale()]) + except Exception as e: + _error_log.append(f'Error (locale): {e}') + + # CUDA information + try: + sys_info[_cu_target_impl] = cu.implementation + except AttributeError: + # On the offchance an out-of-tree target did not set the + # implementation, we can try to continue + pass + + try: + cu.list_devices()[0] # will a device initialise? + except Exception as e: + sys_info[_cu_dev_init] = False + msg_not_found = "CUDA driver library cannot be found" + msg_disabled_by_user = "CUDA is disabled" + msg_end = " or no CUDA enabled devices are present." + msg_generic_problem = "CUDA device initialisation problem." + msg = getattr(e, 'msg', None) + if msg is not None: + if msg_not_found in msg: + err_msg = msg_not_found + msg_end + elif msg_disabled_by_user in msg: + err_msg = msg_disabled_by_user + msg_end + else: + err_msg = msg_generic_problem + " Message:" + msg + else: + err_msg = msg_generic_problem + " " + str(e) + # Best effort error report + _warning_log.append("Warning (cuda): %s\nException class: %s" % + (err_msg, str(type(e)))) + else: + try: + sys_info[_cu_dev_init] = True + + output = StringIO() + with redirect_stdout(output): + cu.detect() + sys_info[_cu_detect_out] = output.getvalue() + output.close() + + cu_drv_ver = cudriver.get_version() + cu_rt_ver = curuntime.get_version() + sys_info[_cu_drv_ver] = '%s.%s' % cu_drv_ver + sys_info[_cu_rt_ver] = '%s.%s' % cu_rt_ver + + output = StringIO() + with redirect_stdout(output): + cudadrv.libs.test() + sys_info[_cu_lib_test] = output.getvalue() + output.close() + + try: + from cuda import cuda # noqa: F401 + nvidia_bindings_available = True + except ImportError: + nvidia_bindings_available = False + sys_info[_cu_nvidia_bindings] = nvidia_bindings_available + + nv_binding_used = bool(cudadrv.driver.USE_NV_BINDING) + sys_info[_cu_nvidia_bindings_used] = nv_binding_used + + try: + from ptxcompiler import compile_ptx # noqa: F401 + from cubinlinker import CubinLinker # noqa: F401 + sys_info[_cu_mvc_available] = True + except ImportError: + sys_info[_cu_mvc_available] = False + + sys_info[_cu_mvc_needed] = cu_rt_ver > cu_drv_ver + sys_info[_cu_mvc_in_use] = bool( + config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY) + except Exception as e: + _warning_log.append( + "Warning (cuda): Probing CUDA failed " + "(device and driver present, runtime problem?)\n" + f"(cuda) {type(e)}: {e}") + + # NumPy information + sys_info[_numpy_version] = np.version.full_version + try: + # NOTE: These consts were added in NumPy 1.20 + from numpy.core._multiarray_umath import (__cpu_features__, + __cpu_dispatch__, + __cpu_baseline__,) + except ImportError: + sys_info[_numpy_AVX512_SKX_detected] = False + else: + feat_filtered = [k for k, v in __cpu_features__.items() if v] + sys_info[_numpy_supported_simd_features] = feat_filtered + sys_info[_numpy_supported_simd_dispatch] = __cpu_dispatch__ + sys_info[_numpy_supported_simd_baseline] = __cpu_baseline__ + sys_info[_numpy_AVX512_SKX_detected] = \ + __cpu_features__.get("AVX512_SKX", False) + + # SVML information + # Replicate some SVML detection logic from numba.__init__ here. + # If SVML load fails in numba.__init__ the splitting of the logic + # here will help diagnosing the underlying issue. + svml_lib_loaded = True + try: + if sys.platform.startswith('linux'): + llvmbind.load_library_permanently("libsvml.so") + elif sys.platform.startswith('darwin'): + llvmbind.load_library_permanently("libsvml.dylib") + elif sys.platform.startswith('win'): + llvmbind.load_library_permanently("svml_dispmd") + else: + svml_lib_loaded = False + except Exception: + svml_lib_loaded = False + func = getattr(llvmbind.targets, "has_svml", None) + sys_info[_llvm_svml_patched] = func() if func else False + sys_info[_svml_state] = config.USING_SVML + sys_info[_svml_loaded] = svml_lib_loaded + sys_info[_svml_operational] = all(( + sys_info[_svml_state], + sys_info[_svml_loaded], + sys_info[_llvm_svml_patched], + )) + + # Check which threading backends are available. + def parse_error(e, backend): + # parses a linux based error message, this is to provide feedback + # and hide user paths etc + try: + path, problem, symbol = [x.strip() for x in e.msg.split(':')] + extn_dso = os.path.split(path)[1] + if backend in extn_dso: + return "%s: %s" % (problem, symbol) + except Exception: + pass + return "Unknown import problem." + + try: + # check import is ok, this means the DSO linkage is working + from numba.np.ufunc import tbbpool # NOQA + # check that the version is compatible, this is a check performed at + # runtime (well, compile time), it will also ImportError if there's + # a problem. + from numba.np.ufunc.parallel import _check_tbb_version_compatible + _check_tbb_version_compatible() + sys_info[_tbb_thread] = True + except ImportError as e: + # might be a missing symbol due to e.g. tbb libraries missing + sys_info[_tbb_thread] = False + sys_info[_tbb_error] = parse_error(e, 'tbbpool') + + try: + from numba.np.ufunc import omppool + sys_info[_openmp_thread] = True + sys_info[_openmp_vendor] = omppool.openmp_vendor + except ImportError as e: + sys_info[_openmp_thread] = False + sys_info[_openmp_error] = parse_error(e, 'omppool') + + try: + from numba.np.ufunc import workqueue # NOQA + sys_info[_wkq_thread] = True + except ImportError as e: + sys_info[_wkq_thread] = True + sys_info[_wkq_error] = parse_error(e, 'workqueue') + + # Look for conda and installed packages information + cmd = ('conda', 'info', '--json') + try: + conda_out = check_output(cmd) + except Exception as e: + _warning_log.append(f'Warning: Conda not available.\n Error was {e}\n') + # Conda is not available, try pip list to list installed packages + cmd = (sys.executable, '-m', 'pip', 'list') + try: + reqs = check_output(cmd) + except Exception as e: + _error_log.append(f'Error (pip): {e}') + else: + sys_info[_inst_pkg] = reqs.decode().splitlines() + + else: + jsond = json.loads(conda_out.decode()) + keys = { + 'conda_build_version': _conda_build_ver, + 'conda_env_version': _conda_env_ver, + 'platform': _conda_platform, + 'python_version': _conda_python_ver, + 'root_writable': _conda_root_writable, + } + for conda_k, sysinfo_k in keys.items(): + sys_info[sysinfo_k] = jsond.get(conda_k, 'N/A') + + # Get info about packages in current environment + cmd = ('conda', 'list') + try: + conda_out = check_output(cmd) + except CalledProcessError as e: + _error_log.append(f'Error (conda): {e}') + else: + data = conda_out.decode().splitlines() + sys_info[_inst_pkg] = [l for l in data if not l.startswith('#')] + + sys_info.update(get_os_spec_info(sys_info[_os_name])) + sys_info[_errors] = _error_log + sys_info[_warnings] = _warning_log + sys_info[_runtime] = (datetime.now() - sys_info[_start]).total_seconds() + return sys_info + + +def display_sysinfo(info=None, sep_pos=45): + class DisplayMap(dict): + display_map_flag = True + + class DisplaySeq(tuple): + display_seq_flag = True + + class DisplaySeqMaps(tuple): + display_seqmaps_flag = True + + if info is None: + info = get_sysinfo() + + fmt = f'%-{sep_pos}s : %-s' + MB = 1024**2 + template = ( + ("-" * 80,), + ("__Time Stamp__",), + ("Report started (local time)", info.get(_start, '?')), + ("UTC start time", info.get(_start_utc, '?')), + ("Running time (s)", info.get(_runtime, '?')), + ("",), + ("__Hardware Information__",), + ("Machine", info.get(_machine, '?')), + ("CPU Name", info.get(_cpu_name, '?')), + ("CPU Count", info.get(_cpu_count, '?')), + ("Number of accessible CPUs", info.get(_cpus_allowed, '?')), + ("List of accessible CPUs cores", info.get(_cpus_list, '?')), + ("CFS Restrictions (CPUs worth of runtime)", + info.get(_cfs_restrict, 'None')), + ("",), + ("CPU Features", '\n'.join( + ' ' * (sep_pos + 3) + l if i else l + for i, l in enumerate( + textwrap.wrap( + info.get(_cpu_features, '?'), + width=79 - sep_pos + ) + ) + )), + ("",), + ("Memory Total (MB)", info.get(_mem_total, 0) // MB or '?'), + ("Memory Available (MB)" + if info.get(_os_name, '') != 'Darwin' or info.get(_psutil, False) + else "Free Memory (MB)", info.get(_mem_available, 0) // MB or '?'), + ("",), + ("__OS Information__",), + ("Platform Name", info.get(_platform_name, '?')), + ("Platform Release", info.get(_platform_release, '?')), + ("OS Name", info.get(_os_name, '?')), + ("OS Version", info.get(_os_version, '?')), + ("OS Specific Version", info.get(_os_spec_version, '?')), + ("Libc Version", info.get(_libc_version, '?')), + ("",), + ("__Python Information__",), + DisplayMap({k: v for k, v in info.items() if k.startswith('Python')}), + ("",), + ("__Numba Toolchain Versions__",), + ("Numba Version", info.get(_numba_version, '?')), + ("llvmlite Version", info.get(_llvmlite_version, '?')), + ("",), + ("__LLVM Information__",), + ("LLVM Version", info.get(_llvm_version, '?')), + ("",), + ("__CUDA Information__",), + ("CUDA Target Implementation", info.get(_cu_target_impl, '?')), + ("CUDA Device Initialized", info.get(_cu_dev_init, '?')), + ("CUDA Driver Version", info.get(_cu_drv_ver, '?')), + ("CUDA Runtime Version", info.get(_cu_rt_ver, '?')), + ("CUDA NVIDIA Bindings Available", info.get(_cu_nvidia_bindings, '?')), + ("CUDA NVIDIA Bindings In Use", + info.get(_cu_nvidia_bindings_used, '?')), + ("CUDA Minor Version Compatibility Available", + info.get(_cu_mvc_available, '?')), + ("CUDA Minor Version Compatibility Needed", + info.get(_cu_mvc_needed, '?')), + ("CUDA Minor Version Compatibility In Use", + info.get(_cu_mvc_in_use, '?')), + ("CUDA Detect Output:",), + (info.get(_cu_detect_out, "None"),), + ("CUDA Libraries Test Output:",), + (info.get(_cu_lib_test, "None"),), + ("",), + ("__NumPy Information__",), + ("NumPy Version", info.get(_numpy_version, '?')), + ("NumPy Supported SIMD features", + DisplaySeq(info.get(_numpy_supported_simd_features, []) + or ('None found.',))), + ("NumPy Supported SIMD dispatch", + DisplaySeq(info.get(_numpy_supported_simd_dispatch, []) + or ('None found.',))), + ("NumPy Supported SIMD baseline", + DisplaySeq(info.get(_numpy_supported_simd_baseline, []) + or ('None found.',))), + ("NumPy AVX512_SKX support detected", + info.get(_numpy_AVX512_SKX_detected, '?')), + ("",), + ("__SVML Information__",), + ("SVML State, config.USING_SVML", info.get(_svml_state, '?')), + ("SVML Library Loaded", info.get(_svml_loaded, '?')), + ("llvmlite Using SVML Patched LLVM", info.get(_llvm_svml_patched, '?')), + ("SVML Operational", info.get(_svml_operational, '?')), + ("",), + ("__Threading Layer Information__",), + ("TBB Threading Layer Available", info.get(_tbb_thread, '?')), + ("+-->TBB imported successfully." if info.get(_tbb_thread, '?') + else f"+--> Disabled due to {info.get(_tbb_error, '?')}",), + ("OpenMP Threading Layer Available", info.get(_openmp_thread, '?')), + (f"+-->Vendor: {info.get(_openmp_vendor, '?')}" + if info.get(_openmp_thread, False) + else f"+--> Disabled due to {info.get(_openmp_error, '?')}",), + ("Workqueue Threading Layer Available", info.get(_wkq_thread, '?')), + ("+-->Workqueue imported successfully." if info.get(_wkq_thread, False) + else f"+--> Disabled due to {info.get(_wkq_error, '?')}",), + ("",), + ("__Numba Environment Variable Information__",), + (DisplayMap(info.get(_numba_env_vars, {})) or ('None found.',)), + ("",), + ("__Conda Information__",), + (DisplayMap({k: v for k, v in info.items() + if k.startswith('Conda')}) or ("Conda not available.",)), + ("",), + ("__Installed Packages__",), + DisplaySeq(info.get(_inst_pkg, ("Couldn't retrieve packages info.",))), + ("",), + ("__Error log__" if info.get(_errors, []) + else "No errors reported.",), + DisplaySeq(info.get(_errors, [])), + ("",), + ("__Warning log__" if info.get(_warnings, []) + else "No warnings reported.",), + DisplaySeq(info.get(_warnings, [])), + ("-" * 80,), + ("If requested, please copy and paste the information between\n" + "the dashed (----) lines, or from a given specific section as\n" + "appropriate.\n\n" + "=============================================================\n" + "IMPORTANT: Please ensure that you are happy with sharing the\n" + "contents of the information present, any information that you\n" + "wish to keep private you should remove before sharing.\n" + "=============================================================\n",), + ) + for t in template: + if hasattr(t, 'display_seq_flag'): + print(*t, sep='\n') + elif hasattr(t, 'display_map_flag'): + print(*tuple(fmt % (k, v) for (k, v) in t.items()), sep='\n') + elif hasattr(t, 'display_seqmaps_flag'): + for d in t: + print(*tuple(fmt % ('\t' + k, v) for (k, v) in d.items()), + sep='\n', end='\n') + elif len(t) == 2: + print(fmt % t) + else: + print(*t) + + +if __name__ == '__main__': + display_sysinfo() diff --git a/venv/lib/python3.10/site-packages/numba/misc/quicksort.py b/venv/lib/python3.10/site-packages/numba/misc/quicksort.py new file mode 100644 index 0000000000000000000000000000000000000000..e54ca698416c0c269fe9bdbb770b13c42a8a2c46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/quicksort.py @@ -0,0 +1,261 @@ +import collections + +import numpy as np + +from numba.core import types, config + + +QuicksortImplementation = collections.namedtuple( + 'QuicksortImplementation', + (# The compile function itself + 'compile', + # All subroutines exercised by test_sort + 'partition', 'partition3', 'insertion_sort', + # The top-level function + 'run_quicksort', + )) + + +Partition = collections.namedtuple('Partition', ('start', 'stop')) + +# Under this size, switch to a simple insertion sort +SMALL_QUICKSORT = 15 + +MAX_STACK = 100 + + +def make_quicksort_impl(wrap, lt=None, is_argsort=False, is_list=False, is_np_array=False): + + if config.USE_LEGACY_TYPE_SYSTEM: + intp = types.intp + else: + intp = types.py_int + zero = intp(0) + + # Two subroutines to make the core algorithm generic wrt. argsort + # or normal sorting. Note the genericity may make basic sort() + # slightly slower (~5%) + if is_argsort: + if is_list: + @wrap + def make_res(A): + return [x for x in range(len(A))] + else: + @wrap + def make_res(A): + return np.arange(A.size) + + @wrap + def GET(A, idx_or_val): + return A[idx_or_val] + + else: + @wrap + def make_res(A): + return A + + @wrap + def GET(A, idx_or_val): + return idx_or_val + + def default_lt(a, b): + """ + Trivial comparison function between two keys. + """ + return a < b + + LT = wrap(lt if lt is not None else default_lt) + + @wrap + def insertion_sort(A, R, low, high): + """ + Insertion sort A[low:high + 1]. Note the inclusive bounds. + """ + assert low >= 0 + if high <= low: + return + + for i in range(low + 1, high + 1): + k = R[i] + v = GET(A, k) + # Insert v into A[low:i] + j = i + while j > low and LT(v, GET(A, R[j - 1])): + # Make place for moving A[i] downwards + R[j] = R[j - 1] + j -= 1 + R[j] = k + + @wrap + def partition(A, R, low, high): + """ + Partition A[low:high + 1] around a chosen pivot. The pivot's index + is returned. + """ + assert low >= 0 + assert high > low + + mid = (low + high) >> 1 + # NOTE: the pattern of swaps below for the pivot choice and the + # partitioning gives good results (i.e. regular O(n log n)) + # on sorted, reverse-sorted, and uniform arrays. Subtle changes + # risk breaking this property. + + # median of three {low, middle, high} + if LT(GET(A, R[mid]), GET(A, R[low])): + R[low], R[mid] = R[mid], R[low] + if LT(GET(A, R[high]), GET(A, R[mid])): + R[high], R[mid] = R[mid], R[high] + if LT(GET(A, R[mid]), GET(A, R[low])): + R[low], R[mid] = R[mid], R[low] + pivot = GET(A, R[mid]) + + # Temporarily stash the pivot at the end + R[high], R[mid] = R[mid], R[high] + i = low + j = high - 1 + while True: + while i < high and LT(GET(A, R[i]), pivot): + i += 1 + while j >= low and LT(pivot, GET(A, R[j])): + j -= 1 + if i >= j: + break + R[i], R[j] = R[j], R[i] + i += 1 + j -= 1 + # Put the pivot back in its final place (all items before `i` + # are smaller than the pivot, all items at/after `i` are larger) + R[i], R[high] = R[high], R[i] + return i + + @wrap + def partition3(A, low, high): + """ + Three-way partition [low, high) around a chosen pivot. + A tuple (lt, gt) is returned such that: + - all elements in [low, lt) are < pivot + - all elements in [lt, gt] are == pivot + - all elements in (gt, high] are > pivot + """ + mid = (low + high) >> 1 + # median of three {low, middle, high} + if LT(A[mid], A[low]): + A[low], A[mid] = A[mid], A[low] + if LT(A[high], A[mid]): + A[high], A[mid] = A[mid], A[high] + if LT(A[mid], A[low]): + A[low], A[mid] = A[mid], A[low] + pivot = A[mid] + + A[low], A[mid] = A[mid], A[low] + lt = low + gt = high + i = low + 1 + while i <= gt: + if LT(A[i], pivot): + A[lt], A[i] = A[i], A[lt] + lt += 1 + i += 1 + elif LT(pivot, A[i]): + A[gt], A[i] = A[i], A[gt] + gt -= 1 + else: + i += 1 + return lt, gt + + @wrap + def run_quicksort1(A): + R = make_res(A) + + if len(A) < 2: + return R + + stack = [Partition(zero, zero)] * MAX_STACK + stack[0] = Partition(zero, len(A) - 1) + n = 1 + + while n > 0: + n -= 1 + low, high = stack[n] + # Partition until it becomes more efficient to do an insertion sort + while high - low >= SMALL_QUICKSORT: + assert n < MAX_STACK + i = partition(A, R, low, high) + # Push largest partition on the stack + if high - i > i - low: + # Right is larger + if high > i: + stack[n] = Partition(i + 1, high) + n += 1 + high = i - 1 + else: + if i > low: + stack[n] = Partition(low, i - 1) + n += 1 + low = i + 1 + + insertion_sort(A, R, low, high) + + return R + + if is_np_array: + @wrap + def run_quicksort(A): + if A.ndim == 1: + return run_quicksort1(A) + else: + for idx in np.ndindex(A.shape[:-1]): + run_quicksort1(A[idx]) + return A + else: + @wrap + def run_quicksort(A): + return run_quicksort1(A) + + # Unused quicksort implementation based on 3-way partitioning; the + # partitioning scheme turns out exhibiting bad behaviour on sorted arrays. + @wrap + def _run_quicksort(A): + stack = [Partition(zero, zero)] * 100 + stack[0] = Partition(zero, len(A) - 1) + n = 1 + + while n > 0: + n -= 1 + low, high = stack[n] + # Partition until it becomes more efficient to do an insertion sort + while high - low >= SMALL_QUICKSORT: + assert n < MAX_STACK + l, r = partition3(A, low, high) + # One trivial (empty) partition => iterate on the other + if r == high: + high = l - 1 + elif l == low: + low = r + 1 + # Push largest partition on the stack + elif high - r > l - low: + # Right is larger + stack[n] = Partition(r + 1, high) + n += 1 + high = l - 1 + else: + stack[n] = Partition(low, l - 1) + n += 1 + low = r + 1 + + insertion_sort(A, low, high) + + + return QuicksortImplementation(wrap, + partition, partition3, insertion_sort, + run_quicksort) + + +def make_py_quicksort(*args, **kwargs): + return make_quicksort_impl((lambda f: f), *args, **kwargs) + +def make_jit_quicksort(*args, **kwargs): + from numba.core.extending import register_jitable + return make_quicksort_impl((lambda f: register_jitable(f)), + *args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/numba/misc/special.py b/venv/lib/python3.10/site-packages/numba/misc/special.py new file mode 100644 index 0000000000000000000000000000000000000000..36a44cebe74ba00ab82b88150c2cd11b5ae9a25e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/special.py @@ -0,0 +1,104 @@ +import numpy as np + +from numba.core.typing.typeof import typeof +from numba.core.typing.asnumbatype import as_numba_type + + +def pndindex(*args): + """ Provides an n-dimensional parallel iterator that generates index tuples + for each iteration point. Sequentially, pndindex is identical to np.ndindex. + """ + return np.ndindex(*args) + + +class prange(object): + """ Provides a 1D parallel iterator that generates a sequence of integers. + In non-parallel contexts, prange is identical to range. + """ + def __new__(cls, *args): + return range(*args) + + +def _gdb_python_call_gen(func_name, *args): + # generates a call to a function containing a compiled in gdb command, + # this is to make `numba.gdb*` work in the interpreter. + import numba + fn = getattr(numba, func_name) + argstr = ','.join(['"%s"' for _ in args]) % args + defn = """def _gdb_func_injection():\n\t%s(%s)\n + """ % (func_name, argstr) + l = {} + exec(defn, {func_name: fn}, l) + return numba.njit(l['_gdb_func_injection']) + + +def gdb(*args): + """ + Calling this function will invoke gdb and attach it to the current process + at the call site. Arguments are strings in the gdb command language syntax + which will be executed by gdb once initialisation has occurred. + """ + _gdb_python_call_gen('gdb', *args)() + + +def gdb_breakpoint(): + """ + Calling this function will inject a breakpoint at the call site that is + recognised by both `gdb` and `gdb_init`, this is to allow breaking at + multiple points. gdb will stop in the user defined code just after the frame + employed by the breakpoint returns. + """ + _gdb_python_call_gen('gdb_breakpoint')() + + +def gdb_init(*args): + """ + Calling this function will invoke gdb and attach it to the current process + at the call site, then continue executing the process under gdb's control. + Arguments are strings in the gdb command language syntax which will be + executed by gdb once initialisation has occurred. + """ + _gdb_python_call_gen('gdb_init', *args)() + + +def literally(obj): + """Forces Numba to interpret *obj* as an Literal value. + + *obj* must be either a literal or an argument of the caller function, where + the argument must be bound to a literal. The literal requirement + propagates up the call stack. + + This function is intercepted by the compiler to alter the compilation + behavior to wrap the corresponding function parameters as ``Literal``. + It has **no effect** outside of nopython-mode (interpreter, and objectmode). + + The current implementation detects literal arguments in two ways: + + 1. Scans for uses of ``literally`` via a compiler pass. + 2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg`` + to signal the dispatcher to treat the corresponding parameter + differently. This mode is to support indirect use (via a function call). + + The execution semantic of this function is equivalent to an identity + function. + + See :ghfile:`numba/tests/test_literal_dispatch.py` for examples. + """ + return obj + + +def literal_unroll(container): + return container + + +__all__ = [ + 'typeof', + 'as_numba_type', + 'prange', + 'pndindex', + 'gdb', + 'gdb_breakpoint', + 'gdb_init', + 'literally', + 'literal_unroll', +] diff --git a/venv/lib/python3.10/site-packages/numba/misc/timsort.py b/venv/lib/python3.10/site-packages/numba/misc/timsort.py new file mode 100644 index 0000000000000000000000000000000000000000..ba1560e399ec73fbaad95c2060097a617b9415c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/misc/timsort.py @@ -0,0 +1,943 @@ +""" +Timsort implementation. Mostly adapted from CPython's listobject.c. + +For more information, see listsort.txt in CPython's source tree. +""" + + +import collections + +from numba.core import types + + +TimsortImplementation = collections.namedtuple( + 'TimsortImplementation', + (# The compile function itself + 'compile', + # All subroutines exercised by test_sort + 'count_run', 'binarysort', 'gallop_left', 'gallop_right', + 'merge_init', 'merge_append', 'merge_pop', + 'merge_compute_minrun', 'merge_lo', 'merge_hi', 'merge_at', + 'merge_force_collapse', 'merge_collapse', + # The top-level functions + 'run_timsort', 'run_timsort_with_values' + )) + + +# The maximum number of entries in a MergeState's pending-runs stack. +# This is enough to sort arrays of size up to about +# 32 * phi ** MAX_MERGE_PENDING +# where phi ~= 1.618. 85 is ridiculously large enough, good for an array +# with 2**64 elements. +# NOTE this implementation doesn't depend on it (the stack is dynamically +# allocated), but it's still good to check as an invariant. +MAX_MERGE_PENDING = 85 + +# When we get into galloping mode, we stay there until both runs win less +# often than MIN_GALLOP consecutive times. See listsort.txt for more info. +MIN_GALLOP = 7 + +# Start size for temp arrays. +MERGESTATE_TEMP_SIZE = 256 + +# A mergestate is a named tuple with the following members: +# - *min_gallop* is an integer controlling when we get into galloping mode +# - *keys* is a temp list for merging keys +# - *values* is a temp list for merging values, if needed +# - *pending* is a stack of pending runs to be merged +# - *n* is the current stack length of *pending* + +MergeState = collections.namedtuple( + 'MergeState', ('min_gallop', 'keys', 'values', 'pending', 'n')) + + +MergeRun = collections.namedtuple('MergeRun', ('start', 'size')) + + +def make_timsort_impl(wrap, make_temp_area): + + make_temp_area = wrap(make_temp_area) + intp = types.intp + zero = intp(0) + + @wrap + def has_values(keys, values): + return values is not keys + + @wrap + def merge_init(keys): + """ + Initialize a MergeState for a non-keyed sort. + """ + temp_size = min(len(keys) // 2 + 1, MERGESTATE_TEMP_SIZE) + temp_keys = make_temp_area(keys, temp_size) + temp_values = temp_keys + pending = [MergeRun(zero, zero)] * MAX_MERGE_PENDING + return MergeState(intp(MIN_GALLOP), temp_keys, temp_values, pending, zero) + + @wrap + def merge_init_with_values(keys, values): + """ + Initialize a MergeState for a keyed sort. + """ + temp_size = min(len(keys) // 2 + 1, MERGESTATE_TEMP_SIZE) + temp_keys = make_temp_area(keys, temp_size) + temp_values = make_temp_area(values, temp_size) + pending = [MergeRun(zero, zero)] * MAX_MERGE_PENDING + return MergeState(intp(MIN_GALLOP), temp_keys, temp_values, pending, zero) + + @wrap + def merge_append(ms, run): + """ + Append a run on the merge stack. + """ + n = ms.n + assert n < MAX_MERGE_PENDING + ms.pending[n] = run + return MergeState(ms.min_gallop, ms.keys, ms.values, ms.pending, n + 1) + + @wrap + def merge_pop(ms): + """ + Pop the top run from the merge stack. + """ + return MergeState(ms.min_gallop, ms.keys, ms.values, ms.pending, ms.n - 1) + + @wrap + def merge_getmem(ms, need): + """ + Ensure enough temp memory for 'need' items is available. + """ + alloced = len(ms.keys) + if need <= alloced: + return ms + # Over-allocate + while alloced < need: + alloced = alloced << 1 + # Don't realloc! That can cost cycles to copy the old data, but + # we don't care what's in the block. + temp_keys = make_temp_area(ms.keys, alloced) + if has_values(ms.keys, ms.values): + temp_values = make_temp_area(ms.values, alloced) + else: + temp_values = temp_keys + return MergeState(ms.min_gallop, temp_keys, temp_values, ms.pending, ms.n) + + @wrap + def merge_adjust_gallop(ms, new_gallop): + """ + Modify the MergeState's min_gallop. + """ + return MergeState(intp(new_gallop), ms.keys, ms.values, ms.pending, ms.n) + + + @wrap + def LT(a, b): + """ + Trivial comparison function between two keys. This is factored out to + make it clear where comparisons occur. + """ + return a < b + + @wrap + def binarysort(keys, values, lo, hi, start): + """ + binarysort is the best method for sorting small arrays: it does + few compares, but can do data movement quadratic in the number of + elements. + [lo, hi) is a contiguous slice of a list, and is sorted via + binary insertion. This sort is stable. + On entry, must have lo <= start <= hi, and that [lo, start) is already + sorted (pass start == lo if you don't know!). + """ + assert lo <= start and start <= hi + _has_values = has_values(keys, values) + if lo == start: + start += 1 + while start < hi: + pivot = keys[start] + # Bisect to find where to insert `pivot` + # NOTE: bisection only wins over linear search if the comparison + # function is much more expensive than simply moving data. + l = lo + r = start + # Invariants: + # pivot >= all in [lo, l). + # pivot < all in [r, start). + # The second is vacuously true at the start. + while l < r: + p = l + ((r - l) >> 1) + if LT(pivot, keys[p]): + r = p + else: + l = p+1 + + # The invariants still hold, so pivot >= all in [lo, l) and + # pivot < all in [l, start), so pivot belongs at l. Note + # that if there are elements equal to pivot, l points to the + # first slot after them -- that's why this sort is stable. + # Slide over to make room (aka memmove()). + for p in range(start, l, -1): + keys[p] = keys[p - 1] + keys[l] = pivot + if _has_values: + pivot_val = values[start] + for p in range(start, l, -1): + values[p] = values[p - 1] + values[l] = pivot_val + + start += 1 + + + @wrap + def count_run(keys, lo, hi): + """ + Return the length of the run beginning at lo, in the slice [lo, hi). + lo < hi is required on entry. "A run" is the longest ascending sequence, with + + lo[0] <= lo[1] <= lo[2] <= ... + + or the longest descending sequence, with + + lo[0] > lo[1] > lo[2] > ... + + A tuple (length, descending) is returned, where boolean *descending* + is set to 0 in the former case, or to 1 in the latter. + For its intended use in a stable mergesort, the strictness of the defn of + "descending" is needed so that the caller can safely reverse a descending + sequence without violating stability (strict > ensures there are no equal + elements to get out of order). + """ + assert lo < hi + if lo + 1 == hi: + # Trivial 1-long run + return 1, False + if LT(keys[lo + 1], keys[lo]): + # Descending run + for k in range(lo + 2, hi): + if not LT(keys[k], keys[k - 1]): + return k - lo, True + return hi - lo, True + else: + # Ascending run + for k in range(lo + 2, hi): + if LT(keys[k], keys[k - 1]): + return k - lo, False + return hi - lo, False + + + @wrap + def gallop_left(key, a, start, stop, hint): + """ + Locate the proper position of key in a sorted vector; if the vector contains + an element equal to key, return the position immediately to the left of + the leftmost equal element. [gallop_right() does the same except returns + the position to the right of the rightmost equal element (if any).] + + "a" is a sorted vector with stop elements, starting at a[start]. + stop must be > start. + + "hint" is an index at which to begin the search, start <= hint < stop. + The closer hint is to the final result, the faster this runs. + + The return value is the int k in start..stop such that + + a[k-1] < key <= a[k] + + pretending that a[start-1] is minus infinity and a[stop] is plus infinity. + IOW, key belongs at index k; or, IOW, the first k elements of a should + precede key, and the last stop-start-k should follow key. + + See listsort.txt for info on the method. + """ + assert stop > start + assert hint >= start and hint < stop + n = stop - start + + # First, gallop from the hint to find a "good" subinterval for bisecting + lastofs = 0 + ofs = 1 + if LT(a[hint], key): + # a[hint] < key => gallop right, until + # a[hint + lastofs] < key <= a[hint + ofs] + maxofs = stop - hint + while ofs < maxofs: + if LT(a[hint + ofs], key): + lastofs = ofs + ofs = (ofs << 1) + 1 + if ofs <= 0: + # Int overflow + ofs = maxofs + else: + # key <= a[hint + ofs] + break + if ofs > maxofs: + ofs = maxofs + # Translate back to offsets relative to a[0] + lastofs += hint + ofs += hint + else: + # key <= a[hint] => gallop left, until + # a[hint - ofs] < key <= a[hint - lastofs] + maxofs = hint - start + 1 + while ofs < maxofs: + if LT(a[hint - ofs], key): + break + else: + # key <= a[hint - ofs] + lastofs = ofs + ofs = (ofs << 1) + 1 + if ofs <= 0: + # Int overflow + ofs = maxofs + if ofs > maxofs: + ofs = maxofs + # Translate back to positive offsets relative to a[0] + lastofs, ofs = hint - ofs, hint - lastofs + + assert start - 1 <= lastofs and lastofs < ofs and ofs <= stop + # Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the + # right of lastofs but no farther right than ofs. Do a binary + # search, with invariant a[lastofs-1] < key <= a[ofs]. + lastofs += 1 + while lastofs < ofs: + m = lastofs + ((ofs - lastofs) >> 1) + if LT(a[m], key): + # a[m] < key + lastofs = m + 1 + else: + # key <= a[m] + ofs = m + # Now lastofs == ofs, so a[ofs - 1] < key <= a[ofs] + return ofs + + + @wrap + def gallop_right(key, a, start, stop, hint): + """ + Exactly like gallop_left(), except that if key already exists in a[start:stop], + finds the position immediately to the right of the rightmost equal value. + + The return value is the int k in start..stop such that + + a[k-1] <= key < a[k] + + The code duplication is massive, but this is enough different given that + we're sticking to "<" comparisons that it's much harder to follow if + written as one routine with yet another "left or right?" flag. + """ + assert stop > start + assert hint >= start and hint < stop + n = stop - start + + # First, gallop from the hint to find a "good" subinterval for bisecting + lastofs = 0 + ofs = 1 + if LT(key, a[hint]): + # key < a[hint] => gallop left, until + # a[hint - ofs] <= key < a[hint - lastofs] + maxofs = hint - start + 1 + while ofs < maxofs: + if LT(key, a[hint - ofs]): + lastofs = ofs + ofs = (ofs << 1) + 1 + if ofs <= 0: + # Int overflow + ofs = maxofs + else: + # a[hint - ofs] <= key + break + if ofs > maxofs: + ofs = maxofs + # Translate back to positive offsets relative to a[0] + lastofs, ofs = hint - ofs, hint - lastofs + else: + # a[hint] <= key -- gallop right, until + # a[hint + lastofs] <= key < a[hint + ofs] + maxofs = stop - hint + while ofs < maxofs: + if LT(key, a[hint + ofs]): + break + else: + # a[hint + ofs] <= key + lastofs = ofs + ofs = (ofs << 1) + 1 + if ofs <= 0: + # Int overflow + ofs = maxofs + if ofs > maxofs: + ofs = maxofs + # Translate back to offsets relative to a[0] + lastofs += hint + ofs += hint + + assert start - 1 <= lastofs and lastofs < ofs and ofs <= stop + # Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the + # right of lastofs but no farther right than ofs. Do a binary + # search, with invariant a[lastofs-1] <= key < a[ofs]. + lastofs += 1 + while lastofs < ofs: + m = lastofs + ((ofs - lastofs) >> 1) + if LT(key, a[m]): + # key < a[m] + ofs = m + else: + # a[m] <= key + lastofs = m + 1 + # Now lastofs == ofs, so a[ofs - 1] <= key < a[ofs] + return ofs + + + @wrap + def merge_compute_minrun(n): + """ + Compute a good value for the minimum run length; natural runs shorter + than this are boosted artificially via binary insertion. + + If n < 64, return n (it's too small to bother with fancy stuff). + Else if n is an exact power of 2, return 32. + Else return an int k, 32 <= k <= 64, such that n/k is close to, but + strictly less than, an exact power of 2. + + See listsort.txt for more info. + """ + r = 0 + assert n >= 0 + while n >= 64: + r |= n & 1 + n >>= 1 + return n + r + + + @wrap + def sortslice_copy(dest_keys, dest_values, dest_start, + src_keys, src_values, src_start, + nitems): + """ + Upwards memcpy(). + """ + assert src_start >= 0 + assert dest_start >= 0 + for i in range(nitems): + dest_keys[dest_start + i] = src_keys[src_start + i] + if has_values(src_keys, src_values): + for i in range(nitems): + dest_values[dest_start + i] = src_values[src_start + i] + + @wrap + def sortslice_copy_down(dest_keys, dest_values, dest_start, + src_keys, src_values, src_start, + nitems): + """ + Downwards memcpy(). + """ + assert src_start >= 0 + assert dest_start >= 0 + for i in range(nitems): + dest_keys[dest_start - i] = src_keys[src_start - i] + if has_values(src_keys, src_values): + for i in range(nitems): + dest_values[dest_start - i] = src_values[src_start - i] + + + # Disable this for debug or perf comparison + DO_GALLOP = 1 + + @wrap + def merge_lo(ms, keys, values, ssa, na, ssb, nb): + """ + Merge the na elements starting at ssa with the nb elements starting at + ssb = ssa + na in a stable way, in-place. na and nb must be > 0, + and should have na <= nb. See listsort.txt for more info. + + An updated MergeState is returned (with possibly a different min_gallop + or larger temp arrays). + + NOTE: compared to CPython's timsort, the requirement that + "Must also have that keys[ssa + na - 1] belongs at the end of the merge" + + is removed. This makes the code a bit simpler and easier to reason about. + """ + assert na > 0 and nb > 0 and na <= nb + assert ssb == ssa + na + # First copy [ssa, ssa + na) into the temp space + ms = merge_getmem(ms, na) + sortslice_copy(ms.keys, ms.values, 0, + keys, values, ssa, + na) + a_keys = ms.keys + a_values = ms.values + b_keys = keys + b_values = values + dest = ssa + ssa = 0 + + _has_values = has_values(a_keys, a_values) + min_gallop = ms.min_gallop + + # Now start merging into the space left from [ssa, ...) + + while nb > 0 and na > 0: + # Do the straightforward thing until (if ever) one run + # appears to win consistently. + acount = 0 + bcount = 0 + + while True: + if LT(b_keys[ssb], a_keys[ssa]): + keys[dest] = b_keys[ssb] + if _has_values: + values[dest] = b_values[ssb] + dest += 1 + ssb += 1 + nb -= 1 + if nb == 0: + break + # It's a B run + bcount += 1 + acount = 0 + if bcount >= min_gallop: + break + else: + keys[dest] = a_keys[ssa] + if _has_values: + values[dest] = a_values[ssa] + dest += 1 + ssa += 1 + na -= 1 + if na == 0: + break + # It's a A run + acount += 1 + bcount = 0 + if acount >= min_gallop: + break + + # One run is winning so consistently that galloping may + # be a huge win. So try that, and continue galloping until + # (if ever) neither run appears to be winning consistently + # anymore. + if DO_GALLOP and na > 0 and nb > 0: + min_gallop += 1 + + while acount >= MIN_GALLOP or bcount >= MIN_GALLOP: + # As long as we gallop without leaving this loop, make + # the heuristic more likely + min_gallop -= min_gallop > 1 + + # Gallop in A to find where keys[ssb] should end up + k = gallop_right(b_keys[ssb], a_keys, ssa, ssa + na, ssa) + # k is an index, make it a size + k -= ssa + acount = k + if k > 0: + # Copy everything from A before k + sortslice_copy(keys, values, dest, + a_keys, a_values, ssa, + k) + dest += k + ssa += k + na -= k + if na == 0: + # Finished merging + break + # Copy keys[ssb] + keys[dest] = b_keys[ssb] + if _has_values: + values[dest] = b_values[ssb] + dest += 1 + ssb += 1 + nb -= 1 + if nb == 0: + # Finished merging + break + + # Gallop in B to find where keys[ssa] should end up + k = gallop_left(a_keys[ssa], b_keys, ssb, ssb + nb, ssb) + # k is an index, make it a size + k -= ssb + bcount = k + if k > 0: + # Copy everything from B before k + # NOTE: source and dest are the same buffer, but the + # destination index is below the source index + sortslice_copy(keys, values, dest, + b_keys, b_values, ssb, + k) + dest += k + ssb += k + nb -= k + if nb == 0: + # Finished merging + break + # Copy keys[ssa] + keys[dest] = a_keys[ssa] + if _has_values: + values[dest] = a_values[ssa] + dest += 1 + ssa += 1 + na -= 1 + if na == 0: + # Finished merging + break + + # Penalize it for leaving galloping mode + min_gallop += 1 + + # Merge finished, now handle the remaining areas + if nb == 0: + # Only A remaining to copy at the end of the destination area + sortslice_copy(keys, values, dest, + a_keys, a_values, ssa, + na) + else: + assert na == 0 + assert dest == ssb + # B's tail is already at the right place, do nothing + + return merge_adjust_gallop(ms, min_gallop) + + + @wrap + def merge_hi(ms, keys, values, ssa, na, ssb, nb): + """ + Merge the na elements starting at ssa with the nb elements starting at + ssb = ssa + na in a stable way, in-place. na and nb must be > 0, + and should have na >= nb. See listsort.txt for more info. + + An updated MergeState is returned (with possibly a different min_gallop + or larger temp arrays). + + NOTE: compared to CPython's timsort, the requirement that + "Must also have that keys[ssa + na - 1] belongs at the end of the merge" + + is removed. This makes the code a bit simpler and easier to reason about. + """ + assert na > 0 and nb > 0 and na >= nb + assert ssb == ssa + na + # First copy [ssb, ssb + nb) into the temp space + ms = merge_getmem(ms, nb) + sortslice_copy(ms.keys, ms.values, 0, + keys, values, ssb, + nb) + a_keys = keys + a_values = values + b_keys = ms.keys + b_values = ms.values + + # Now start merging *in descending order* into the space left + # from [..., ssb + nb). + dest = ssb + nb - 1 + ssb = nb - 1 + ssa = ssa + na - 1 + + _has_values = has_values(b_keys, b_values) + min_gallop = ms.min_gallop + + while nb > 0 and na > 0: + # Do the straightforward thing until (if ever) one run + # appears to win consistently. + acount = 0 + bcount = 0 + + while True: + if LT(b_keys[ssb], a_keys[ssa]): + # We merge in descending order, so copy the larger value + keys[dest] = a_keys[ssa] + if _has_values: + values[dest] = a_values[ssa] + dest -= 1 + ssa -= 1 + na -= 1 + if na == 0: + break + # It's a A run + acount += 1 + bcount = 0 + if acount >= min_gallop: + break + else: + keys[dest] = b_keys[ssb] + if _has_values: + values[dest] = b_values[ssb] + dest -= 1 + ssb -= 1 + nb -= 1 + if nb == 0: + break + # It's a B run + bcount += 1 + acount = 0 + if bcount >= min_gallop: + break + + # One run is winning so consistently that galloping may + # be a huge win. So try that, and continue galloping until + # (if ever) neither run appears to be winning consistently + # anymore. + if DO_GALLOP and na > 0 and nb > 0: + min_gallop += 1 + + while acount >= MIN_GALLOP or bcount >= MIN_GALLOP: + # As long as we gallop without leaving this loop, make + # the heuristic more likely + min_gallop -= min_gallop > 1 + + # Gallop in A to find where keys[ssb] should end up + k = gallop_right(b_keys[ssb], a_keys, ssa - na + 1, ssa + 1, ssa) + # k is an index, make it a size from the end + k = ssa + 1 - k + acount = k + if k > 0: + # Copy everything from A after k. + # Destination and source are the same buffer, and destination + # index is greater, so copy from the end to the start. + sortslice_copy_down(keys, values, dest, + a_keys, a_values, ssa, + k) + dest -= k + ssa -= k + na -= k + if na == 0: + # Finished merging + break + # Copy keys[ssb] + keys[dest] = b_keys[ssb] + if _has_values: + values[dest] = b_values[ssb] + dest -= 1 + ssb -= 1 + nb -= 1 + if nb == 0: + # Finished merging + break + + # Gallop in B to find where keys[ssa] should end up + k = gallop_left(a_keys[ssa], b_keys, ssb - nb + 1, ssb + 1, ssb) + # k is an index, make it a size from the end + k = ssb + 1 - k + bcount = k + if k > 0: + # Copy everything from B before k + sortslice_copy_down(keys, values, dest, + b_keys, b_values, ssb, + k) + dest -= k + ssb -= k + nb -= k + if nb == 0: + # Finished merging + break + # Copy keys[ssa] + keys[dest] = a_keys[ssa] + if _has_values: + values[dest] = a_values[ssa] + dest -= 1 + ssa -= 1 + na -= 1 + if na == 0: + # Finished merging + break + + # Penalize it for leaving galloping mode + min_gallop += 1 + + # Merge finished, now handle the remaining areas + if na == 0: + # Only B remaining to copy at the front of the destination area + sortslice_copy(keys, values, dest - nb + 1, + b_keys, b_values, ssb - nb + 1, + nb) + else: + assert nb == 0 + assert dest == ssa + # A's front is already at the right place, do nothing + + return merge_adjust_gallop(ms, min_gallop) + + + @wrap + def merge_at(ms, keys, values, i): + """ + Merge the two runs at stack indices i and i+1. + + An updated MergeState is returned. + """ + n = ms.n + assert n >= 2 + assert i >= 0 + assert i == n - 2 or i == n - 3 + + ssa, na = ms.pending[i] + ssb, nb = ms.pending[i + 1] + assert na > 0 and nb > 0 + assert ssa + na == ssb + + # Record the length of the combined runs; if i is the 3rd-last + # run now, also slide over the last run (which isn't involved + # in this merge). The current run i+1 goes away in any case. + ms.pending[i] = MergeRun(ssa, na + nb) + if i == n - 3: + ms.pending[i + 1] = ms.pending[i + 2] + ms = merge_pop(ms) + + # Where does b start in a? Elements in a before that can be + # ignored (already in place). + k = gallop_right(keys[ssb], keys, ssa, ssa + na, ssa) + # [k, ssa + na) remains to be merged + na -= k - ssa + ssa = k + if na == 0: + return ms + + # Where does a end in b? Elements in b after that can be + # ignored (already in place). + k = gallop_left(keys[ssa + na - 1], keys, ssb, ssb + nb, ssb + nb - 1) + # [ssb, k) remains to be merged + nb = k - ssb + + # Merge what remains of the runs, using a temp array with + # min(na, nb) elements. + if na <= nb: + return merge_lo(ms, keys, values, ssa, na, ssb, nb) + else: + return merge_hi(ms, keys, values, ssa, na, ssb, nb) + + + @wrap + def merge_collapse(ms, keys, values): + """ + Examine the stack of runs waiting to be merged, merging adjacent runs + until the stack invariants are re-established: + + 1. len[-3] > len[-2] + len[-1] + 2. len[-2] > len[-1] + + An updated MergeState is returned. + + See listsort.txt for more info. + """ + while ms.n > 1: + pending = ms.pending + n = ms.n - 2 + if ((n > 0 and pending[n-1].size <= pending[n].size + pending[n+1].size) or + (n > 1 and pending[n-2].size <= pending[n-1].size + pending[n].size)): + if pending[n - 1].size < pending[n + 1].size: + # Merge smaller one first + n -= 1 + ms = merge_at(ms, keys, values, n) + elif pending[n].size < pending[n + 1].size: + ms = merge_at(ms, keys, values, n) + else: + break + return ms + + @wrap + def merge_force_collapse(ms, keys, values): + """ + Regardless of invariants, merge all runs on the stack until only one + remains. This is used at the end of the mergesort. + + An updated MergeState is returned. + """ + while ms.n > 1: + pending = ms.pending + n = ms.n - 2 + if n > 0: + if pending[n - 1].size < pending[n + 1].size: + # Merge the smaller one first + n -= 1 + ms = merge_at(ms, keys, values, n) + return ms + + + @wrap + def reverse_slice(keys, values, start, stop): + """ + Reverse a slice, in-place. + """ + i = start + j = stop - 1 + while i < j: + keys[i], keys[j] = keys[j], keys[i] + i += 1 + j -= 1 + if has_values(keys, values): + i = start + j = stop - 1 + while i < j: + values[i], values[j] = values[j], values[i] + i += 1 + j -= 1 + + + @wrap + def run_timsort_with_mergestate(ms, keys, values): + """ + Run timsort with the mergestate. + """ + nremaining = len(keys) + if nremaining < 2: + return + + # March over the array once, left to right, finding natural runs, + # and extending short natural runs to minrun elements. + minrun = merge_compute_minrun(nremaining) + + lo = zero + while nremaining > 0: + n, desc = count_run(keys, lo, lo + nremaining) + if desc: + # Descending run => reverse + reverse_slice(keys, values, lo, lo + n) + # If short, extend to min(minrun, nremaining) + if n < minrun: + force = min(minrun, nremaining) + binarysort(keys, values, lo, lo + force, lo + n) + n = force + # Push run onto stack, and maybe merge. + ms = merge_append(ms, MergeRun(lo, n)) + ms = merge_collapse(ms, keys, values) + # Advance to find next run. + lo += n + nremaining -= n + + # All initial runs have been discovered, now finish merging. + ms = merge_force_collapse(ms, keys, values) + assert ms.n == 1 + assert ms.pending[0] == (0, len(keys)) + + + @wrap + def run_timsort(keys): + """ + Run timsort over the given keys. + """ + values = keys + run_timsort_with_mergestate(merge_init(keys), keys, values) + + + @wrap + def run_timsort_with_values(keys, values): + """ + Run timsort over the given keys and values. + """ + run_timsort_with_mergestate(merge_init_with_values(keys, values), + keys, values) + + return TimsortImplementation( + wrap, + count_run, binarysort, gallop_left, gallop_right, + merge_init, merge_append, merge_pop, + merge_compute_minrun, merge_lo, merge_hi, merge_at, + merge_force_collapse, merge_collapse, + run_timsort, run_timsort_with_values) + + +def make_py_timsort(*args): + return make_timsort_impl((lambda f: f), *args) + +def make_jit_timsort(*args): + from numba import jit + return make_timsort_impl((lambda f: jit(nopython=True)(f)), + *args) diff --git a/venv/lib/python3.10/site-packages/numba/np/__init__.py b/venv/lib/python3.10/site-packages/numba/np/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae1847c856b29f2293b67ec38273f59de1c7a50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/arraymath.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/arraymath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..392414f31a6e3779dd46eb1a24685065c5e01e9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/arraymath.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/extensions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d374cc38d762c160efade27677353d83344f3354 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/extensions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ca7f7d8ceb513a9d31994e13a0f90a1dc56ca81 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e010a46a967cc2e67e3414217715490af5a29867 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e6bf4861e212a18899287698ff4e8429826dd43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npdatetime_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/npyfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npyfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b2c44ac4f5c97bc46afcf01f426b2d3d65bab7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npyfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/npyimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npyimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4193618ff946155dea3733ba980b8e6dd9dc6903 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/npyimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/numpy_support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/numpy_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..326e5eab0cf838b85e91d2411e4c711a79ae9ed8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/numpy_support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/__pycache__/ufunc_db.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/__pycache__/ufunc_db.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7edcc2031f3dfdb820db47152342814a7e4299a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/__pycache__/ufunc_db.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/arraymath.py b/venv/lib/python3.10/site-packages/numba/np/arraymath.py new file mode 100644 index 0000000000000000000000000000000000000000..1d4d2ef15e8d106c407655027088d578ffc28c5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/arraymath.py @@ -0,0 +1,10 @@ +import sys +from numba.core.utils import _RedirectSubpackage +from numba.core import config + +if config.USE_LEGACY_TYPE_SYSTEM: + sys.modules[__name__] = _RedirectSubpackage(locals(), + "numba.np.old_arraymath") +else: + sys.modules[__name__] = _RedirectSubpackage(locals(), + "numba.np.new_arraymath") diff --git a/venv/lib/python3.10/site-packages/numba/np/arrayobj.py b/venv/lib/python3.10/site-packages/numba/np/arrayobj.py new file mode 100644 index 0000000000000000000000000000000000000000..712668fa8a995aa9000e05be75f1581e92e75a5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/arrayobj.py @@ -0,0 +1,7034 @@ +""" +Implementation of operations on Array objects and objects supporting +the buffer protocol. +""" + +import functools +import math +import operator +import textwrap + +from llvmlite import ir +from llvmlite.ir import Constant + +import numpy as np + +from numba import pndindex, literal_unroll +from numba.core import types, typing, errors, cgutils, extending, config +from numba.np.numpy_support import (as_dtype, from_dtype, carray, farray, + is_contiguous, is_fortran, + check_is_integer, type_is_scalar, + lt_complex, lt_floats) +from numba.np.numpy_support import type_can_asarray, is_nonelike, numpy_version +from numba.core.imputils import (lower_builtin, lower_getattr, + lower_getattr_generic, + lower_setattr_generic, + lower_cast, lower_constant, + iternext_impl, impl_ret_borrowed, + impl_ret_new_ref, impl_ret_untracked, + RefType) +from numba.core.typing import signature +from numba.core.types import StringLiteral +from numba.core.extending import (register_jitable, overload, overload_method, + intrinsic, overload_attribute) +from numba.misc import quicksort, mergesort +from numba.cpython import slicing +from numba.cpython.unsafe.tuple import tuple_setitem, build_full_slice_tuple +from numba.core.extending import overload_classmethod +from numba.core.typing.npydecl import (parse_dtype as ty_parse_dtype, + parse_shape as ty_parse_shape, + _parse_nested_sequence, + _sequence_of_arrays, + _choose_concatenation_layout) + + +def set_range_metadata(builder, load, lower_bound, upper_bound): + """ + Set the "range" metadata on a load instruction. + Note the interval is in the form [lower_bound, upper_bound). + """ + range_operands = [Constant(load.type, lower_bound), + Constant(load.type, upper_bound)] + md = builder.module.add_metadata(range_operands) + load.set_metadata("range", md) + + +def mark_positive(builder, load): + """ + Mark the result of a load instruction as positive (or zero). + """ + upper_bound = (1 << (load.type.width - 1)) - 1 + set_range_metadata(builder, load, 0, upper_bound) + + +def make_array(array_type): + """ + Return the Structure representation of the given *array_type* + (an instance of types.ArrayCompatible). + + Note this does not call __array_wrap__ in case a new array structure + is being created (rather than populated). + """ + real_array_type = array_type.as_array + base = cgutils.create_struct_proxy(real_array_type) + ndim = real_array_type.ndim + + class ArrayStruct(base): + + def _make_refs(self, ref): + sig = signature(real_array_type, array_type) + try: + array_impl = self._context.get_function('__array__', sig) + except NotImplementedError: + return super(ArrayStruct, self)._make_refs(ref) + + # Return a wrapped structure and its unwrapped reference + datamodel = self._context.data_model_manager[array_type] + be_type = self._get_be_type(datamodel) + if ref is None: + outer_ref = cgutils.alloca_once(self._builder, be_type, + zfill=True) + else: + outer_ref = ref + # NOTE: __array__ is called with a pointer and expects a pointer + # in return! + ref = array_impl(self._builder, (outer_ref,)) + return outer_ref, ref + + @property + def shape(self): + """ + Override .shape to inform LLVM that its elements are all positive. + """ + builder = self._builder + if ndim == 0: + return base.__getattr__(self, "shape") + + # Unfortunately, we can't use llvm.assume as its presence can + # seriously pessimize performance, + # *and* the range metadata currently isn't improving anything here, + # see https://llvm.org/bugs/show_bug.cgi?id=23848 ! + ptr = self._get_ptr_by_name("shape") + dims = [] + for i in range(ndim): + dimptr = cgutils.gep_inbounds(builder, ptr, 0, i) + load = builder.load(dimptr) + dims.append(load) + mark_positive(builder, load) + + return cgutils.pack_array(builder, dims) + + return ArrayStruct + + +def get_itemsize(context, array_type): + """ + Return the item size for the given array or buffer type. + """ + llty = context.get_data_type(array_type.dtype) + return context.get_abi_sizeof(llty) + + +def load_item(context, builder, arrayty, ptr): + """ + Load the item at the given array pointer. + """ + align = None if arrayty.aligned else 1 + return context.unpack_value(builder, arrayty.dtype, ptr, + align=align) + + +def store_item(context, builder, arrayty, val, ptr): + """ + Store the item at the given array pointer. + """ + align = None if arrayty.aligned else 1 + return context.pack_value(builder, arrayty.dtype, val, ptr, align=align) + + +def fix_integer_index(context, builder, idxty, idx, size): + """ + Fix the integer index' type and value for the given dimension size. + """ + if idxty.signed: + ind = context.cast(builder, idx, idxty, types.intp) + ind = slicing.fix_index(builder, ind, size) + else: + ind = context.cast(builder, idx, idxty, types.uintp) + return ind + + +def normalize_index(context, builder, idxty, idx): + """ + Normalize the index type and value. 0-d arrays are converted to scalars. + """ + if isinstance(idxty, types.Array) and idxty.ndim == 0: + assert isinstance(idxty.dtype, types.Integer) + idxary = make_array(idxty)(context, builder, idx) + idxval = load_item(context, builder, idxty, idxary.data) + return idxty.dtype, idxval + else: + return idxty, idx + + +def normalize_indices(context, builder, index_types, indices): + """ + Same as normalize_index(), but operating on sequences of + index types and values. + """ + if len(indices): + index_types, indices = zip(*[normalize_index(context, builder, idxty, + idx) + for idxty, idx in zip(index_types, indices) + ]) + return index_types, indices + + +def populate_array(array, data, shape, strides, itemsize, meminfo, + parent=None): + """ + Helper function for populating array structures. + This avoids forgetting to set fields. + + *shape* and *strides* can be Python tuples or LLVM arrays. + """ + context = array._context + builder = array._builder + datamodel = array._datamodel + # doesn't matter what this array type instance is, it's just to get the + # fields for the datamodel of the standard array type in this context + standard_array = types.Array(types.float64, 1, 'C') + standard_array_type_datamodel = context.data_model_manager[standard_array] + required_fields = set(standard_array_type_datamodel._fields) + datamodel_fields = set(datamodel._fields) + # Make sure that the presented array object has a data model that is close + # enough to an array for this function to proceed. + if (required_fields & datamodel_fields) != required_fields: + missing = required_fields - datamodel_fields + msg = (f"The datamodel for type {array._fe_type} is missing " + f"field{'s' if len(missing) > 1 else ''} {missing}.") + raise ValueError(msg) + + if meminfo is None: + meminfo = Constant(context.get_value_type( + datamodel.get_type('meminfo')), None) + + intp_t = context.get_value_type(types.intp) + if isinstance(shape, (tuple, list)): + shape = cgutils.pack_array(builder, shape, intp_t) + if isinstance(strides, (tuple, list)): + strides = cgutils.pack_array(builder, strides, intp_t) + if isinstance(itemsize, int): + itemsize = intp_t(itemsize) + + attrs = dict(shape=shape, + strides=strides, + data=data, + itemsize=itemsize, + meminfo=meminfo,) + + # Set `parent` attribute + if parent is None: + attrs['parent'] = Constant(context.get_value_type( + datamodel.get_type('parent')), None) + else: + attrs['parent'] = parent + # Calc num of items from shape + nitems = context.get_constant(types.intp, 1) + unpacked_shape = cgutils.unpack_tuple(builder, shape, shape.type.count) + # (note empty shape => 0d array therefore nitems = 1) + for axlen in unpacked_shape: + nitems = builder.mul(nitems, axlen, flags=['nsw']) + attrs['nitems'] = nitems + + # Make sure that we have all the fields + got_fields = set(attrs.keys()) + if got_fields != required_fields: + raise ValueError("missing {0}".format(required_fields - got_fields)) + + # Set field value + for k, v in attrs.items(): + setattr(array, k, v) + + return array + + +def update_array_info(aryty, array): + """ + Update some auxiliary information in *array* after some of its fields + were changed. `itemsize` and `nitems` are updated. + """ + context = array._context + builder = array._builder + + # Calc num of items from shape + nitems = context.get_constant(types.intp, 1) + unpacked_shape = cgutils.unpack_tuple(builder, array.shape, aryty.ndim) + for axlen in unpacked_shape: + nitems = builder.mul(nitems, axlen, flags=['nsw']) + array.nitems = nitems + + array.itemsize = context.get_constant(types.intp, + get_itemsize(context, aryty)) + + +def normalize_axis(func_name, arg_name, ndim, axis): + """Constrain axis values to valid positive values.""" + raise NotImplementedError() + + +@overload(normalize_axis) +def normalize_axis_overloads(func_name, arg_name, ndim, axis): + if not isinstance(func_name, StringLiteral): + raise errors.TypingError("func_name must be a str literal.") + if not isinstance(arg_name, StringLiteral): + raise errors.TypingError("arg_name must be a str literal.") + + msg = ( + f"{func_name.literal_value}: Argument {arg_name.literal_value} " + "out of bounds for dimensions of the array" + ) + + def impl(func_name, arg_name, ndim, axis): + if axis < 0: + axis += ndim + if axis < 0 or axis >= ndim: + raise ValueError(msg) + + return axis + + return impl + + +@lower_builtin('getiter', types.Buffer) +def getiter_array(context, builder, sig, args): + [arrayty] = sig.args + [array] = args + + iterobj = context.make_helper(builder, sig.return_type) + + zero = context.get_constant(types.intp, 0) + indexptr = cgutils.alloca_once_value(builder, zero) + + iterobj.index = indexptr + iterobj.array = array + + # Incref array + if context.enable_nrt: + context.nrt.incref(builder, arrayty, array) + + res = iterobj._getvalue() + + # Note: a decref on the iterator will dereference all internal MemInfo* + out = impl_ret_new_ref(context, builder, sig.return_type, res) + return out + + +def _getitem_array_single_int(context, builder, return_type, aryty, ary, idx): + """ Evaluate `ary[idx]`, where idx is a single int. """ + # optimized form of _getitem_array_generic + shapes = cgutils.unpack_tuple(builder, ary.shape, count=aryty.ndim) + strides = cgutils.unpack_tuple(builder, ary.strides, count=aryty.ndim) + offset = builder.mul(strides[0], idx) + dataptr = cgutils.pointer_add(builder, ary.data, offset) + view_shapes = shapes[1:] + view_strides = strides[1:] + + if isinstance(return_type, types.Buffer): + # Build array view + retary = make_view(context, builder, aryty, ary, return_type, + dataptr, view_shapes, view_strides) + return retary._getvalue() + else: + # Load scalar from 0-d result + assert not view_shapes + return load_item(context, builder, aryty, dataptr) + + +@lower_builtin('iternext', types.ArrayIterator) +@iternext_impl(RefType.BORROWED) +def iternext_array(context, builder, sig, args, result): + [iterty] = sig.args + [iter] = args + arrayty = iterty.array_type + + iterobj = context.make_helper(builder, iterty, value=iter) + ary = make_array(arrayty)(context, builder, value=iterobj.array) + + nitems, = cgutils.unpack_tuple(builder, ary.shape, count=1) + + index = builder.load(iterobj.index) + is_valid = builder.icmp_signed('<', index, nitems) + result.set_valid(is_valid) + + with builder.if_then(is_valid): + value = _getitem_array_single_int( + context, builder, iterty.yield_type, arrayty, ary, index + ) + result.yield_(value) + nindex = cgutils.increment_index(builder, index) + builder.store(nindex, iterobj.index) + + +# ------------------------------------------------------------------------------ +# Basic indexing (with integers and slices only) + +def basic_indexing(context, builder, aryty, ary, index_types, indices, + boundscheck=None): + """ + Perform basic indexing on the given array. + A (data pointer, shapes, strides) tuple is returned describing + the corresponding view. + """ + zero = context.get_constant(types.intp, 0) + one = context.get_constant(types.intp, 1) + + shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim) + strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim) + + output_indices = [] + output_shapes = [] + output_strides = [] + + num_newaxes = len([idx for idx in index_types if is_nonelike(idx)]) + ax = 0 + for indexval, idxty in zip(indices, index_types): + if idxty is types.ellipsis: + # Fill up missing dimensions at the middle + n_missing = aryty.ndim - len(indices) + 1 + num_newaxes + for i in range(n_missing): + output_indices.append(zero) + output_shapes.append(shapes[ax]) + output_strides.append(strides[ax]) + ax += 1 + continue + # Regular index value + if isinstance(idxty, types.SliceType): + slice = context.make_helper(builder, idxty, value=indexval) + slicing.guard_invalid_slice(context, builder, idxty, slice) + slicing.fix_slice(builder, slice, shapes[ax]) + output_indices.append(slice.start) + sh = slicing.get_slice_length(builder, slice) + st = slicing.fix_stride(builder, slice, strides[ax]) + output_shapes.append(sh) + output_strides.append(st) + elif isinstance(idxty, types.Integer): + ind = fix_integer_index(context, builder, idxty, indexval, + shapes[ax]) + if boundscheck: + cgutils.do_boundscheck(context, builder, ind, shapes[ax], ax) + output_indices.append(ind) + elif is_nonelike(idxty): + output_shapes.append(one) + output_strides.append(zero) + ax -= 1 + else: + raise NotImplementedError("unexpected index type: %s" % (idxty,)) + ax += 1 + + # Fill up missing dimensions at the end + assert ax <= aryty.ndim + while ax < aryty.ndim: + output_shapes.append(shapes[ax]) + output_strides.append(strides[ax]) + ax += 1 + + # No need to check wraparound, as negative indices were already + # fixed in the loop above. + dataptr = cgutils.get_item_pointer(context, builder, aryty, ary, + output_indices, + wraparound=False, boundscheck=False) + return (dataptr, output_shapes, output_strides) + + +def make_view(context, builder, aryty, ary, return_type, + data, shapes, strides): + """ + Build a view over the given array with the given parameters. + """ + retary = make_array(return_type)(context, builder) + populate_array(retary, + data=data, + shape=shapes, + strides=strides, + itemsize=ary.itemsize, + meminfo=ary.meminfo, + parent=ary.parent) + return retary + + +def _getitem_array_generic(context, builder, return_type, aryty, ary, + index_types, indices): + """ + Return the result of indexing *ary* with the given *indices*, + returning either a scalar or a view. + """ + dataptr, view_shapes, view_strides = \ + basic_indexing(context, builder, aryty, ary, index_types, indices, + boundscheck=context.enable_boundscheck) + + if isinstance(return_type, types.Buffer): + # Build array view + retary = make_view(context, builder, aryty, ary, return_type, + dataptr, view_shapes, view_strides) + return retary._getvalue() + else: + # Load scalar from 0-d result + assert not view_shapes + return load_item(context, builder, aryty, dataptr) + + +@lower_builtin(operator.getitem, types.Buffer, types.Integer) +@lower_builtin(operator.getitem, types.Buffer, types.SliceType) +def getitem_arraynd_intp(context, builder, sig, args): + """ + Basic indexing with an integer or a slice. + """ + aryty, idxty = sig.args + ary, idx = args + + assert aryty.ndim >= 1 + ary = make_array(aryty)(context, builder, ary) + + res = _getitem_array_generic(context, builder, sig.return_type, + aryty, ary, (idxty,), (idx,)) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin(operator.getitem, types.Buffer, types.BaseTuple) +def getitem_array_tuple(context, builder, sig, args): + """ + Basic or advanced indexing with a tuple. + """ + aryty, tupty = sig.args + ary, tup = args + ary = make_array(aryty)(context, builder, ary) + + index_types = tupty.types + indices = cgutils.unpack_tuple(builder, tup, count=len(tupty)) + + index_types, indices = normalize_indices(context, builder, + index_types, indices) + + if any(isinstance(ty, types.Array) for ty in index_types): + # Advanced indexing + return fancy_getitem(context, builder, sig, args, + aryty, ary, index_types, indices) + + res = _getitem_array_generic(context, builder, sig.return_type, + aryty, ary, index_types, indices) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin(operator.setitem, types.Buffer, types.Any, types.Any) +def setitem_array(context, builder, sig, args): + """ + array[a] = scalar_or_array + array[a,..,b] = scalar_or_array + """ + aryty, idxty, valty = sig.args + ary, idx, val = args + + if isinstance(idxty, types.BaseTuple): + index_types = idxty.types + indices = cgutils.unpack_tuple(builder, idx, count=len(idxty)) + else: + index_types = (idxty,) + indices = (idx,) + + ary = make_array(aryty)(context, builder, ary) + + # First try basic indexing to see if a single array location is denoted. + index_types, indices = normalize_indices(context, builder, + index_types, indices) + try: + dataptr, shapes, strides = \ + basic_indexing(context, builder, aryty, ary, index_types, indices, + boundscheck=context.enable_boundscheck) + except NotImplementedError: + use_fancy_indexing = True + else: + use_fancy_indexing = bool(shapes) + + if use_fancy_indexing: + # Index describes a non-trivial view => use generic slice assignment + # (NOTE: this also handles scalar broadcasting) + return fancy_setslice(context, builder, sig, args, + index_types, indices) + + # Store source value the given location + val = context.cast(builder, val, valty, aryty.dtype) + store_item(context, builder, aryty, val, dataptr) + + +@lower_builtin(len, types.Buffer) +def array_len(context, builder, sig, args): + (aryty,) = sig.args + (ary,) = args + arystty = make_array(aryty) + ary = arystty(context, builder, ary) + shapeary = ary.shape + res = builder.extract_value(shapeary, 0) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin("array.item", types.Array) +def array_item(context, builder, sig, args): + aryty, = sig.args + ary, = args + ary = make_array(aryty)(context, builder, ary) + + nitems = ary.nitems + with builder.if_then(builder.icmp_signed('!=', nitems, nitems.type(1)), + likely=False): + msg = "item(): can only convert an array of size 1 to a Python scalar" + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + + return load_item(context, builder, aryty, ary.data) + + +if numpy_version < (2, 0): + @lower_builtin("array.itemset", types.Array, types.Any) + def array_itemset(context, builder, sig, args): + aryty, valty = sig.args + ary, val = args + assert valty == aryty.dtype + ary = make_array(aryty)(context, builder, ary) + + nitems = ary.nitems + with builder.if_then(builder.icmp_signed('!=', nitems, nitems.type(1)), + likely=False): + msg = "itemset(): can only write to an array of size 1" + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + + store_item(context, builder, aryty, val, ary.data) + return context.get_dummy_value() + + +# ------------------------------------------------------------------------------ +# Advanced / fancy indexing + + +class Indexer(object): + """ + Generic indexer interface, for generating indices over a fancy indexed + array on a single dimension. + """ + + def prepare(self): + """ + Prepare the indexer by initializing any required variables, basic + blocks... + """ + raise NotImplementedError + + def get_size(self): + """ + Return this dimension's size as an integer. + """ + raise NotImplementedError + + def get_shape(self): + """ + Return this dimension's shape as a tuple. + """ + raise NotImplementedError + + def get_index_bounds(self): + """ + Return a half-open [lower, upper) range of indices this dimension + is guaranteed not to step out of. + """ + raise NotImplementedError + + def loop_head(self): + """ + Start indexation loop. Return a (index, count) tuple. + *index* is an integer LLVM value representing the index over this + dimension. + *count* is either an integer LLVM value representing the current + iteration count, or None if this dimension should be omitted from + the indexation result. + """ + raise NotImplementedError + + def loop_tail(self): + """ + Finish indexation loop. + """ + raise NotImplementedError + + +class EntireIndexer(Indexer): + """ + Compute indices along an entire array dimension. + """ + + def __init__(self, context, builder, aryty, ary, dim): + self.context = context + self.builder = builder + self.aryty = aryty + self.ary = ary + self.dim = dim + self.ll_intp = self.context.get_value_type(types.intp) + + def prepare(self): + builder = self.builder + self.size = builder.extract_value(self.ary.shape, self.dim) + self.index = cgutils.alloca_once(builder, self.ll_intp) + self.bb_start = builder.append_basic_block() + self.bb_end = builder.append_basic_block() + + def get_size(self): + return self.size + + def get_shape(self): + return (self.size,) + + def get_index_bounds(self): + # [0, size) + return (self.ll_intp(0), self.size) + + def loop_head(self): + builder = self.builder + # Initialize loop variable + self.builder.store(Constant(self.ll_intp, 0), self.index) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_start) + cur_index = builder.load(self.index) + with builder.if_then(builder.icmp_signed('>=', cur_index, self.size), + likely=False): + builder.branch(self.bb_end) + return cur_index, cur_index + + def loop_tail(self): + builder = self.builder + next_index = cgutils.increment_index(builder, builder.load(self.index)) + builder.store(next_index, self.index) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_end) + + +class IntegerIndexer(Indexer): + """ + Compute indices from a single integer. + """ + + def __init__(self, context, builder, idx): + self.context = context + self.builder = builder + self.idx = idx + self.ll_intp = self.context.get_value_type(types.intp) + + def prepare(self): + pass + + def get_size(self): + return Constant(self.ll_intp, 1) + + def get_shape(self): + return () + + def get_index_bounds(self): + # [idx, idx+1) + return (self.idx, self.builder.add(self.idx, self.get_size())) + + def loop_head(self): + return self.idx, None + + def loop_tail(self): + pass + + +class IntegerArrayIndexer(Indexer): + """ + Compute indices from an array of integer indices. + """ + + def __init__(self, context, builder, idxty, idxary, size): + self.context = context + self.builder = builder + self.idxty = idxty + self.idxary = idxary + self.size = size + assert idxty.ndim == 1 + self.ll_intp = self.context.get_value_type(types.intp) + + def prepare(self): + builder = self.builder + self.idx_size = cgutils.unpack_tuple(builder, self.idxary.shape)[0] + self.idx_index = cgutils.alloca_once(builder, self.ll_intp) + self.bb_start = builder.append_basic_block() + self.bb_end = builder.append_basic_block() + + def get_size(self): + return self.idx_size + + def get_shape(self): + return (self.idx_size,) + + def get_index_bounds(self): + # Pessimal heuristic, as we don't want to scan for the min and max + return (self.ll_intp(0), self.size) + + def loop_head(self): + builder = self.builder + # Initialize loop variable + self.builder.store(Constant(self.ll_intp, 0), self.idx_index) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_start) + cur_index = builder.load(self.idx_index) + with builder.if_then( + builder.icmp_signed('>=', cur_index, self.idx_size), + likely=False + ): + builder.branch(self.bb_end) + # Load the actual index from the array of indices + index = _getitem_array_single_int( + self.context, builder, self.idxty.dtype, self.idxty, self.idxary, + cur_index + ) + index = fix_integer_index(self.context, builder, + self.idxty.dtype, index, self.size) + return index, cur_index + + def loop_tail(self): + builder = self.builder + next_index = cgutils.increment_index(builder, + builder.load(self.idx_index)) + builder.store(next_index, self.idx_index) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_end) + + +class BooleanArrayIndexer(Indexer): + """ + Compute indices from an array of boolean predicates. + """ + + def __init__(self, context, builder, idxty, idxary): + self.context = context + self.builder = builder + self.idxty = idxty + self.idxary = idxary + assert idxty.ndim == 1 + self.ll_intp = self.context.get_value_type(types.intp) + self.zero = Constant(self.ll_intp, 0) + + def prepare(self): + builder = self.builder + self.size = cgutils.unpack_tuple(builder, self.idxary.shape)[0] + self.idx_index = cgutils.alloca_once(builder, self.ll_intp) + self.count = cgutils.alloca_once(builder, self.ll_intp) + self.bb_start = builder.append_basic_block() + self.bb_tail = builder.append_basic_block() + self.bb_end = builder.append_basic_block() + + def get_size(self): + builder = self.builder + count = cgutils.alloca_once_value(builder, self.zero) + # Sum all true values + with cgutils.for_range(builder, self.size) as loop: + c = builder.load(count) + pred = _getitem_array_single_int( + self.context, builder, self.idxty.dtype, + self.idxty, self.idxary, loop.index + ) + c = builder.add(c, builder.zext(pred, c.type)) + builder.store(c, count) + + return builder.load(count) + + def get_shape(self): + return (self.get_size(),) + + def get_index_bounds(self): + # Pessimal heuristic, as we don't want to scan for the + # first and last true items + return (self.ll_intp(0), self.size) + + def loop_head(self): + builder = self.builder + # Initialize loop variable + self.builder.store(self.zero, self.idx_index) + self.builder.store(self.zero, self.count) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_start) + cur_index = builder.load(self.idx_index) + cur_count = builder.load(self.count) + with builder.if_then(builder.icmp_signed('>=', cur_index, self.size), + likely=False): + builder.branch(self.bb_end) + # Load the predicate and branch if false + pred = _getitem_array_single_int( + self.context, builder, self.idxty.dtype, self.idxty, self.idxary, + cur_index + ) + with builder.if_then(builder.not_(pred)): + builder.branch(self.bb_tail) + # Increment the count for next iteration + next_count = cgutils.increment_index(builder, cur_count) + builder.store(next_count, self.count) + return cur_index, cur_count + + def loop_tail(self): + builder = self.builder + builder.branch(self.bb_tail) + builder.position_at_end(self.bb_tail) + next_index = cgutils.increment_index(builder, + builder.load(self.idx_index)) + builder.store(next_index, self.idx_index) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_end) + + +class SliceIndexer(Indexer): + """ + Compute indices along a slice. + """ + + def __init__(self, context, builder, aryty, ary, dim, idxty, slice): + self.context = context + self.builder = builder + self.aryty = aryty + self.ary = ary + self.dim = dim + self.idxty = idxty + self.slice = slice + self.ll_intp = self.context.get_value_type(types.intp) + self.zero = Constant(self.ll_intp, 0) + + def prepare(self): + builder = self.builder + # Fix slice for the dimension's size + self.dim_size = builder.extract_value(self.ary.shape, self.dim) + slicing.guard_invalid_slice(self.context, builder, self.idxty, + self.slice) + slicing.fix_slice(builder, self.slice, self.dim_size) + self.is_step_negative = cgutils.is_neg_int(builder, self.slice.step) + # Create loop entities + self.index = cgutils.alloca_once(builder, self.ll_intp) + self.count = cgutils.alloca_once(builder, self.ll_intp) + self.bb_start = builder.append_basic_block() + self.bb_end = builder.append_basic_block() + + def get_size(self): + return slicing.get_slice_length(self.builder, self.slice) + + def get_shape(self): + return (self.get_size(),) + + def get_index_bounds(self): + lower, upper = slicing.get_slice_bounds(self.builder, self.slice) + return lower, upper + + def loop_head(self): + builder = self.builder + # Initialize loop variable + self.builder.store(self.slice.start, self.index) + self.builder.store(self.zero, self.count) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_start) + cur_index = builder.load(self.index) + cur_count = builder.load(self.count) + is_finished = builder.select(self.is_step_negative, + builder.icmp_signed('<=', cur_index, + self.slice.stop), + builder.icmp_signed('>=', cur_index, + self.slice.stop)) + with builder.if_then(is_finished, likely=False): + builder.branch(self.bb_end) + return cur_index, cur_count + + def loop_tail(self): + builder = self.builder + next_index = builder.add(builder.load(self.index), self.slice.step, + flags=['nsw']) + builder.store(next_index, self.index) + next_count = cgutils.increment_index(builder, builder.load(self.count)) + builder.store(next_count, self.count) + builder.branch(self.bb_start) + builder.position_at_end(self.bb_end) + + +class FancyIndexer(object): + """ + Perform fancy indexing on the given array. + """ + + def __init__(self, context, builder, aryty, ary, index_types, indices): + self.context = context + self.builder = builder + self.aryty = aryty + self.shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim) + self.strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim) + self.ll_intp = self.context.get_value_type(types.intp) + self.newaxes = [] + + indexers = [] + num_newaxes = len([idx for idx in index_types if is_nonelike(idx)]) + + ax = 0 # keeps track of position of original axes + new_ax = 0 # keeps track of position for inserting new axes + for indexval, idxty in zip(indices, index_types): + if idxty is types.ellipsis: + # Fill up missing dimensions at the middle + n_missing = aryty.ndim - len(indices) + 1 + num_newaxes + for i in range(n_missing): + indexer = EntireIndexer(context, builder, aryty, ary, ax) + indexers.append(indexer) + ax += 1 + new_ax += 1 + continue + + # Regular index value + if isinstance(idxty, types.SliceType): + slice = context.make_helper(builder, idxty, indexval) + indexer = SliceIndexer(context, builder, aryty, ary, ax, + idxty, slice) + indexers.append(indexer) + elif isinstance(idxty, types.Integer): + ind = fix_integer_index(context, builder, idxty, indexval, + self.shapes[ax]) + indexer = IntegerIndexer(context, builder, ind) + indexers.append(indexer) + elif isinstance(idxty, types.Array): + idxary = make_array(idxty)(context, builder, indexval) + if isinstance(idxty.dtype, types.Integer): + indexer = IntegerArrayIndexer(context, builder, + idxty, idxary, + self.shapes[ax]) + elif isinstance(idxty.dtype, types.Boolean): + indexer = BooleanArrayIndexer(context, builder, + idxty, idxary) + else: + assert 0 + indexers.append(indexer) + elif is_nonelike(idxty): + self.newaxes.append(new_ax) + ax -= 1 + else: + raise AssertionError("unexpected index type: %s" % (idxty,)) + ax += 1 + new_ax += 1 + + # Fill up missing dimensions at the end + assert ax <= aryty.ndim, (ax, aryty.ndim) + while ax < aryty.ndim: + indexer = EntireIndexer(context, builder, aryty, ary, ax) + indexers.append(indexer) + ax += 1 + + assert len(indexers) == aryty.ndim, (len(indexers), aryty.ndim) + self.indexers = indexers + + def prepare(self): + for i in self.indexers: + i.prepare() + + one = self.context.get_constant(types.intp, 1) + + # Compute the resulting shape given by the indices + res_shape = [i.get_shape() for i in self.indexers] + + # At every position where newaxis/None is present insert + # one as a constant shape in the resulting list of shapes. + for i in self.newaxes: + res_shape.insert(i, (one,)) + + # Store the shape as a tuple, we can't do a simple + # tuple(res_shape) here since res_shape is a list + # of tuples which may be differently sized. + self.indexers_shape = sum(res_shape, ()) + + def get_shape(self): + """ + Get the resulting data shape as Python tuple. + """ + return self.indexers_shape + + def get_offset_bounds(self, strides, itemsize): + """ + Get a half-open [lower, upper) range of byte offsets spanned by + the indexer with the given strides and itemsize. The indexer is + guaranteed to not go past those bounds. + """ + assert len(strides) == self.aryty.ndim + builder = self.builder + is_empty = cgutils.false_bit + zero = self.ll_intp(0) + one = self.ll_intp(1) + lower = zero + upper = zero + for indexer, shape, stride in zip(self.indexers, self.indexers_shape, + strides): + is_empty = builder.or_(is_empty, + builder.icmp_unsigned('==', shape, zero)) + # Compute [lower, upper) indices on this dimension + lower_index, upper_index = indexer.get_index_bounds() + lower_offset = builder.mul(stride, lower_index) + upper_offset = builder.mul(stride, builder.sub(upper_index, one)) + # Adjust total interval + is_downwards = builder.icmp_signed('<', stride, zero) + lower = builder.add(lower, + builder.select(is_downwards, + upper_offset, + lower_offset)) + upper = builder.add(upper, + builder.select(is_downwards, + lower_offset, + upper_offset)) + # Make interval half-open + upper = builder.add(upper, itemsize) + # Adjust for empty shape + lower = builder.select(is_empty, zero, lower) + upper = builder.select(is_empty, zero, upper) + return lower, upper + + def begin_loops(self): + indices, counts = zip(*(i.loop_head() for i in self.indexers)) + return indices, counts + + def end_loops(self): + for i in reversed(self.indexers): + i.loop_tail() + + +def fancy_getitem(context, builder, sig, args, + aryty, ary, index_types, indices): + + shapes = cgutils.unpack_tuple(builder, ary.shape) + strides = cgutils.unpack_tuple(builder, ary.strides) + data = ary.data + + indexer = FancyIndexer(context, builder, aryty, ary, + index_types, indices) + indexer.prepare() + + # Construct output array + out_ty = sig.return_type + out_shapes = indexer.get_shape() + + out = _empty_nd_impl(context, builder, out_ty, out_shapes) + out_data = out.data + out_idx = cgutils.alloca_once_value(builder, + context.get_constant(types.intp, 0)) + + # Loop on source and copy to destination + indices, _ = indexer.begin_loops() + + # No need to check for wraparound, as the indexers all ensure + # a positive index is returned. + ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides, + aryty.layout, indices, wraparound=False, + boundscheck=context.enable_boundscheck) + val = load_item(context, builder, aryty, ptr) + + # Since the destination is C-contiguous, no need for multi-dimensional + # indexing. + cur = builder.load(out_idx) + ptr = builder.gep(out_data, [cur]) + store_item(context, builder, out_ty, val, ptr) + next_idx = cgutils.increment_index(builder, cur) + builder.store(next_idx, out_idx) + + indexer.end_loops() + + return impl_ret_new_ref(context, builder, out_ty, out._getvalue()) + + +@lower_builtin(operator.getitem, types.Buffer, types.Array) +def fancy_getitem_array(context, builder, sig, args): + """ + Advanced or basic indexing with an array. + """ + aryty, idxty = sig.args + ary, idx = args + ary = make_array(aryty)(context, builder, ary) + if idxty.ndim == 0: + # 0-d array index acts as a basic integer index + idxty, idx = normalize_index(context, builder, idxty, idx) + res = _getitem_array_generic(context, builder, sig.return_type, + aryty, ary, (idxty,), (idx,)) + return impl_ret_borrowed(context, builder, sig.return_type, res) + else: + # Advanced indexing + return fancy_getitem(context, builder, sig, args, + aryty, ary, (idxty,), (idx,)) + + +def offset_bounds_from_strides(context, builder, arrty, arr, shapes, strides): + """ + Compute a half-open range [lower, upper) of byte offsets from the + array's data pointer, that bound the in-memory extent of the array. + + This mimics offset_bounds_from_strides() from + numpy/core/src/private/mem_overlap.c + """ + itemsize = arr.itemsize + zero = itemsize.type(0) + one = zero.type(1) + if arrty.layout in 'CF': + # Array is contiguous: contents are laid out sequentially + # starting from arr.data and upwards + lower = zero + upper = builder.mul(itemsize, arr.nitems) + else: + # Non-contiguous array: need to examine strides + lower = zero + upper = zero + for i in range(arrty.ndim): + # Compute the largest byte offset on this dimension + # max_axis_offset = strides[i] * (shapes[i] - 1) + # (shapes[i] == 0 is catered for by the empty array case below) + max_axis_offset = builder.mul(strides[i], + builder.sub(shapes[i], one)) + is_upwards = builder.icmp_signed('>=', max_axis_offset, zero) + # Expand either upwards or downwards depending on stride + upper = builder.select(is_upwards, + builder.add(upper, max_axis_offset), upper) + lower = builder.select(is_upwards, + lower, builder.add(lower, max_axis_offset)) + # Return a half-open range + upper = builder.add(upper, itemsize) + # Adjust for empty arrays + is_empty = builder.icmp_signed('==', arr.nitems, zero) + upper = builder.select(is_empty, zero, upper) + lower = builder.select(is_empty, zero, lower) + + return lower, upper + + +def compute_memory_extents(context, builder, lower, upper, data): + """ + Given [lower, upper) byte offsets and a base data pointer, + compute the memory pointer bounds as pointer-sized integers. + """ + data_ptr_as_int = builder.ptrtoint(data, lower.type) + start = builder.add(data_ptr_as_int, lower) + end = builder.add(data_ptr_as_int, upper) + return start, end + + +def get_array_memory_extents(context, builder, arrty, arr, shapes, strides, + data): + """ + Compute a half-open range [start, end) of pointer-sized integers + which fully contain the array data. + """ + lower, upper = offset_bounds_from_strides(context, builder, arrty, arr, + shapes, strides) + return compute_memory_extents(context, builder, lower, upper, data) + + +def extents_may_overlap(context, builder, a_start, a_end, b_start, b_end): + """ + Whether two memory extents [a_start, a_end) and [b_start, b_end) + may overlap. + """ + # Comparisons are unsigned, since we are really comparing pointers + may_overlap = builder.and_( + builder.icmp_unsigned('<', a_start, b_end), + builder.icmp_unsigned('<', b_start, a_end), + ) + return may_overlap + + +def maybe_copy_source(context, builder, use_copy, + srcty, src, src_shapes, src_strides, src_data): + ptrty = src_data.type + + copy_layout = 'C' + copy_data = cgutils.alloca_once_value(builder, src_data) + copy_shapes = src_shapes + copy_strides = None # unneeded for contiguous arrays + + with builder.if_then(use_copy, likely=False): + # Allocate temporary scratchpad + # XXX: should we use a stack-allocated array for very small + # data sizes? + allocsize = builder.mul(src.itemsize, src.nitems) + data = context.nrt.allocate(builder, allocsize) + voidptrty = data.type + data = builder.bitcast(data, ptrty) + builder.store(data, copy_data) + + # Copy source data into scratchpad + intp_t = context.get_value_type(types.intp) + + with cgutils.loop_nest(builder, src_shapes, intp_t) as indices: + src_ptr = cgutils.get_item_pointer2(context, builder, src_data, + src_shapes, src_strides, + srcty.layout, indices) + dest_ptr = cgutils.get_item_pointer2(context, builder, data, + copy_shapes, copy_strides, + copy_layout, indices) + builder.store(builder.load(src_ptr), dest_ptr) + + def src_getitem(source_indices): + src_ptr = cgutils.alloca_once(builder, ptrty) + with builder.if_else(use_copy, likely=False) as (if_copy, otherwise): + with if_copy: + builder.store( + cgutils.get_item_pointer2(context, builder, + builder.load(copy_data), + copy_shapes, copy_strides, + copy_layout, source_indices, + wraparound=False), + src_ptr) + with otherwise: + builder.store( + cgutils.get_item_pointer2(context, builder, src_data, + src_shapes, src_strides, + srcty.layout, source_indices, + wraparound=False), + src_ptr) + return load_item(context, builder, srcty, builder.load(src_ptr)) + + def src_cleanup(): + # Deallocate memory + with builder.if_then(use_copy, likely=False): + data = builder.load(copy_data) + data = builder.bitcast(data, voidptrty) + context.nrt.free(builder, data) + + return src_getitem, src_cleanup + + +def _bc_adjust_dimension(context, builder, shapes, strides, target_shape): + """ + Preprocess dimension for broadcasting. + Returns (shapes, strides) such that the ndim match *target_shape*. + When expanding to higher ndim, the returning shapes and strides are + prepended with ones and zeros, respectively. + When truncating to lower ndim, the shapes are checked (in runtime). + All extra dimension must have size of 1. + """ + zero = context.get_constant(types.uintp, 0) + one = context.get_constant(types.uintp, 1) + + # Adjust for broadcasting to higher dimension + if len(target_shape) > len(shapes): + nd_diff = len(target_shape) - len(shapes) + # Fill missing shapes with one, strides with zeros + shapes = [one] * nd_diff + shapes + strides = [zero] * nd_diff + strides + # Adjust for broadcasting to lower dimension + elif len(target_shape) < len(shapes): + # Accepted if all extra dims has shape 1 + nd_diff = len(shapes) - len(target_shape) + dim_is_one = [builder.icmp_unsigned('==', sh, one) + for sh in shapes[:nd_diff]] + accepted = functools.reduce(builder.and_, dim_is_one, + cgutils.true_bit) + # Check error + with builder.if_then(builder.not_(accepted), likely=False): + msg = "cannot broadcast source array for assignment" + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + # Truncate extra shapes, strides + shapes = shapes[nd_diff:] + strides = strides[nd_diff:] + + return shapes, strides + + +def _bc_adjust_shape_strides(context, builder, shapes, strides, target_shape): + """ + Broadcast shapes and strides to target_shape given that their ndim already + matches. For each location where the shape is 1 and does not match the + dim for target, it is set to the value at the target and the stride is + set to zero. + """ + bc_shapes = [] + bc_strides = [] + zero = context.get_constant(types.uintp, 0) + one = context.get_constant(types.uintp, 1) + # Adjust all mismatching ones in shape + mismatch = [builder.icmp_signed('!=', tar, old) + for tar, old in zip(target_shape, shapes)] + src_is_one = [builder.icmp_signed('==', old, one) for old in shapes] + preds = [builder.and_(x, y) for x, y in zip(mismatch, src_is_one)] + bc_shapes = [builder.select(p, tar, old) + for p, tar, old in zip(preds, target_shape, shapes)] + bc_strides = [builder.select(p, zero, old) + for p, old in zip(preds, strides)] + return bc_shapes, bc_strides + + +def _broadcast_to_shape(context, builder, arrtype, arr, target_shape): + """ + Broadcast the given array to the target_shape. + Returns (array_type, array) + """ + # Compute broadcasted shape and strides + shapes = cgutils.unpack_tuple(builder, arr.shape) + strides = cgutils.unpack_tuple(builder, arr.strides) + + shapes, strides = _bc_adjust_dimension(context, builder, shapes, strides, + target_shape) + shapes, strides = _bc_adjust_shape_strides(context, builder, shapes, + strides, target_shape) + new_arrtype = arrtype.copy(ndim=len(target_shape), layout='A') + # Create new view + new_arr = make_array(new_arrtype)(context, builder) + populate_array(new_arr, + data=arr.data, + shape=cgutils.pack_array(builder, shapes), + strides=cgutils.pack_array(builder, strides), + itemsize=arr.itemsize, + meminfo=arr.meminfo, + parent=arr.parent) + return new_arrtype, new_arr + + +@intrinsic +def _numpy_broadcast_to(typingctx, array, shape): + ret = array.copy(ndim=shape.count, layout='A', readonly=True) + sig = ret(array, shape) + + def codegen(context, builder, sig, args): + src, shape_ = args + srcty = sig.args[0] + + src = make_array(srcty)(context, builder, src) + shape_ = cgutils.unpack_tuple(builder, shape_) + _, dest = _broadcast_to_shape(context, builder, srcty, src, shape_,) + + # Hack to get np.broadcast_to to return a read-only array + setattr(dest, 'parent', Constant( + context.get_value_type(dest._datamodel.get_type('parent')), + None)) + + res = dest._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + return sig, codegen + + +@intrinsic +def get_readonly_array(typingctx, arr): + # returns a copy of arr which is readonly + ret = arr.copy(readonly=True) + sig = ret(arr) + + def codegen(context, builder, sig, args): + [src] = args + srcty = sig.args[0] + + dest = make_array(srcty)(context, builder, src) + # Hack to return a read-only array + dest.parent = cgutils.get_null_value(dest.parent.type) + res = dest._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + return sig, codegen + + +@register_jitable +def _can_broadcast(array, dest_shape): + src_shape = array.shape + src_ndim = len(src_shape) + dest_ndim = len(dest_shape) + if src_ndim > dest_ndim: + raise ValueError('input operand has more dimensions than allowed ' + 'by the axis remapping') + for size in dest_shape: + if size < 0: + raise ValueError('all elements of broadcast shape must be ' + 'non-negative') + + # based on _broadcast_onto function in numba/np/npyimpl.py + src_index = 0 + dest_index = dest_ndim - src_ndim + while src_index < src_ndim: + src_dim = src_shape[src_index] + dest_dim = dest_shape[dest_index] + # possible cases for (src_dim, dest_dim): + # * (1, 1) -> Ok + # * (>1, 1) -> Error! + # * (>1, >1) -> src_dim == dest_dim else error! + # * (1, >1) -> Ok + if src_dim == dest_dim or src_dim == 1: + src_index += 1 + dest_index += 1 + else: + raise ValueError('operands could not be broadcast together ' + 'with remapped shapes') + + +def _default_broadcast_to_impl(array, shape): + array = np.asarray(array) + _can_broadcast(array, shape) + return _numpy_broadcast_to(array, shape) + + +@overload(np.broadcast_to) +def numpy_broadcast_to(array, shape): + if not type_can_asarray(array): + raise errors.TypingError('The first argument "array" must ' + 'be array-like') + + if isinstance(shape, types.Integer): + def impl(array, shape): + return np.broadcast_to(array, (shape,)) + return impl + + elif isinstance(shape, types.UniTuple): + if not isinstance(shape.dtype, types.Integer): + msg = 'The second argument "shape" must be a tuple of integers' + raise errors.TypingError(msg) + return _default_broadcast_to_impl + + elif isinstance(shape, types.Tuple) and shape.count > 0: + # check if all types are integers + if not all([isinstance(typ, types.IntegerLiteral) for typ in shape]): + msg = f'"{shape}" object cannot be interpreted as an integer' + raise errors.TypingError(msg) + return _default_broadcast_to_impl + elif isinstance(shape, types.Tuple) and shape.count == 0: + is_scalar_array = isinstance(array, types.Array) and array.ndim == 0 + if type_is_scalar(array) or is_scalar_array: + + def impl(array, shape): # broadcast_to(array, ()) + # Array type must be supported by "type_can_asarray" + # Quick note that unicode types are not supported! + array = np.asarray(array) + return get_readonly_array(array) + return impl + + else: + msg = 'Cannot broadcast a non-scalar to a scalar array' + raise errors.TypingError(msg) + else: + msg = ('The argument "shape" must be a tuple or an integer. ' + 'Got %s' % shape) + raise errors.TypingError(msg) + + +@register_jitable +def numpy_broadcast_shapes_list(r, m, shape): + for i in range(len(shape)): + k = m - len(shape) + i + tmp = shape[i] + if tmp < 0: + raise ValueError("negative dimensions are not allowed") + if tmp == 1: + continue + if r[k] == 1: + r[k] = tmp + elif r[k] != tmp: + raise ValueError("shape mismatch: objects" + " cannot be broadcast" + " to a single shape") + + +@overload(np.broadcast_shapes) +def ol_numpy_broadcast_shapes(*args): + # Based on https://github.com/numpy/numpy/blob/f702b26fff3271ba6a6ba29a021fc19051d1f007/numpy/core/src/multiarray/iterators.c#L1129-L1212 # noqa + for idx, arg in enumerate(args): + is_int = isinstance(arg, types.Integer) + is_int_tuple = isinstance(arg, types.UniTuple) and \ + isinstance(arg.dtype, types.Integer) + is_empty_tuple = isinstance(arg, types.Tuple) and len(arg.types) == 0 + if not (is_int or is_int_tuple or is_empty_tuple): + msg = (f'Argument {idx} must be either an int or tuple[int]. ' + f'Got {arg}') + raise errors.TypingError(msg) + + # discover the number of dimensions + m = 0 + for arg in args: + if isinstance(arg, types.Integer): + m = max(m, 1) + elif isinstance(arg, types.BaseTuple): + m = max(m, len(arg)) + + if m == 0: + return lambda *args: () + else: + tup_init = (1,) * m + + def impl(*args): + # propagate args + r = [1] * m + tup = tup_init + for arg in literal_unroll(args): + if isinstance(arg, tuple) and len(arg) > 0: + numpy_broadcast_shapes_list(r, m, arg) + elif isinstance(arg, int): + numpy_broadcast_shapes_list(r, m, (arg,)) + for idx, elem in enumerate(r): + tup = tuple_setitem(tup, idx, elem) + return tup + return impl + + +@overload(np.broadcast_arrays) +def numpy_broadcast_arrays(*args): + + for idx, arg in enumerate(args): + if not type_can_asarray(arg): + raise errors.TypingError(f'Argument "{idx}" must ' + 'be array-like') + + unified_dtype = None + dt = None + for arg in args: + if isinstance(arg, (types.Array, types.BaseTuple)): + dt = arg.dtype + else: + dt = arg + + if unified_dtype is None: + unified_dtype = dt + elif unified_dtype != dt: + raise errors.TypingError('Mismatch of argument types. Numba cannot ' + 'broadcast arrays with different types. ' + f'Got {args}') + + # number of dimensions + m = 0 + for idx, arg in enumerate(args): + if isinstance(arg, types.ArrayCompatible): + m = max(m, arg.ndim) + elif isinstance(arg, (types.Number, types.Boolean, types.BaseTuple)): + m = max(m, 1) + else: + raise errors.TypingError(f'Unhandled type {arg}') + + tup_init = (0,) * m + + def impl(*args): + # find out the output shape + # we can't call np.broadcast_shapes here since args may have arrays + # with different shapes and it is not possible to create a list + # with those shapes dynamically + shape = [1] * m + for array in literal_unroll(args): + numpy_broadcast_shapes_list(shape, m, np.asarray(array).shape) + + tup = tup_init + + for i in range(m): + tup = tuple_setitem(tup, i, shape[i]) + + # numpy checks if the input arrays have the same shape as `shape` + outs = [] + for array in literal_unroll(args): + outs.append(np.broadcast_to(np.asarray(array), tup)) + return outs + + return impl + + +def raise_with_shape_context(src_shapes, index_shape): + """Targets should implement this if they wish to specialize the error + handling/messages. The overload implementation takes two tuples as arguments + and should raise a ValueError.""" + raise NotImplementedError + + +@overload(raise_with_shape_context, target="generic") +def ol_raise_with_shape_context_generic(src_shapes, index_shape): + # This overload is for a "generic" target, which makes no assumption about + # the NRT or string support, but does assume exceptions can be raised. + if (isinstance(src_shapes, types.UniTuple) and + isinstance(index_shape, types.UniTuple) and + src_shapes.dtype == index_shape.dtype and + isinstance(src_shapes.dtype, types.Integer)): + + def impl(src_shapes, index_shape): + raise ValueError("cannot assign slice from input of different size") + return impl + + +@overload(raise_with_shape_context, target="CPU") +def ol_raise_with_shape_context_cpu(src_shapes, index_shape): + if (isinstance(src_shapes, types.UniTuple) and + isinstance(index_shape, types.UniTuple) and + src_shapes.dtype == index_shape.dtype and + isinstance(src_shapes.dtype, types.Integer)): + + def impl(src_shapes, index_shape): + if len(src_shapes) == 1: + shape_str = f"({src_shapes[0]},)" + else: + shape_str = f"({', '.join([str(x) for x in src_shapes])})" + if len(index_shape) == 1: + index_str = f"({index_shape[0]},)" + else: + index_str = f"({', '.join([str(x) for x in index_shape])})" + msg = (f"cannot assign slice of shape {shape_str} from input of " + f"shape {index_str}") + raise ValueError(msg) + return impl + + +def fancy_setslice(context, builder, sig, args, index_types, indices): + """ + Implement slice assignment for arrays. This implementation works for + basic as well as fancy indexing, since there's no functional difference + between the two for indexed assignment. + """ + aryty, _, srcty = sig.args + ary, _, src = args + + ary = make_array(aryty)(context, builder, ary) + dest_shapes = cgutils.unpack_tuple(builder, ary.shape) + dest_strides = cgutils.unpack_tuple(builder, ary.strides) + dest_data = ary.data + + indexer = FancyIndexer(context, builder, aryty, ary, + index_types, indices) + indexer.prepare() + + def raise_shape_mismatch_error(context, builder, src_shapes, index_shape): + # This acts as the "trampoline" to raise a ValueError in the case + # of the source and destination shapes mismatch at runtime. It resolves + # the public overload stub `raise_with_shape_context` + fnty = context.typing_context.resolve_value_type( + raise_with_shape_context) + argtys = (types.UniTuple(types.int64, len(src_shapes)), + types.UniTuple(types.int64, len(index_shape))) + raise_sig = fnty.get_call_type(context.typing_context, argtys, {}) + func = context.get_function(fnty, raise_sig) + func(builder, (context.make_tuple(builder, raise_sig.args[0], + src_shapes), + context.make_tuple(builder, raise_sig.args[1], + index_shape))) + + if isinstance(srcty, types.Buffer): + # Source is an array + src_dtype = srcty.dtype + index_shape = indexer.get_shape() + src = make_array(srcty)(context, builder, src) + # Broadcast source array to shape + srcty, src = _broadcast_to_shape(context, builder, srcty, src, + index_shape) + src_shapes = cgutils.unpack_tuple(builder, src.shape) + src_strides = cgutils.unpack_tuple(builder, src.strides) + src_data = src.data + + # Check shapes are equal + shape_error = cgutils.false_bit + assert len(index_shape) == len(src_shapes) + + for u, v in zip(src_shapes, index_shape): + shape_error = builder.or_(shape_error, + builder.icmp_signed('!=', u, v)) + + with builder.if_then(shape_error, likely=False): + raise_shape_mismatch_error(context, builder, src_shapes, + index_shape) + + # Check for array overlap + src_start, src_end = get_array_memory_extents(context, builder, srcty, + src, src_shapes, + src_strides, src_data) + + dest_lower, dest_upper = indexer.get_offset_bounds(dest_strides, + ary.itemsize) + dest_start, dest_end = compute_memory_extents(context, builder, + dest_lower, dest_upper, + dest_data) + + use_copy = extents_may_overlap(context, builder, src_start, src_end, + dest_start, dest_end) + + src_getitem, src_cleanup = maybe_copy_source(context, builder, use_copy, + srcty, src, src_shapes, + src_strides, src_data) + + elif isinstance(srcty, types.Sequence): + src_dtype = srcty.dtype + + # Check shape is equal to sequence length + index_shape = indexer.get_shape() + assert len(index_shape) == 1 + len_impl = context.get_function(len, signature(types.intp, srcty)) + seq_len = len_impl(builder, (src,)) + + shape_error = builder.icmp_signed('!=', index_shape[0], seq_len) + + with builder.if_then(shape_error, likely=False): + raise_shape_mismatch_error(context, builder, (seq_len,), + (index_shape[0],)) + + def src_getitem(source_indices): + idx, = source_indices + getitem_impl = context.get_function( + operator.getitem, + signature(src_dtype, srcty, types.intp), + ) + return getitem_impl(builder, (src, idx)) + + def src_cleanup(): + pass + + else: + # Source is a scalar (broadcast or not, depending on destination + # shape). + src_dtype = srcty + + def src_getitem(source_indices): + return src + + def src_cleanup(): + pass + + zero = context.get_constant(types.uintp, 0) + # Loop on destination and copy from source to destination + dest_indices, counts = indexer.begin_loops() + + # Source is iterated in natural order + + # Counts represent a counter for the number of times a specified axis + # is being accessed, during setitem they are used as source + # indices + counts = list(counts) + + # We need to artifically introduce the index zero wherever a + # newaxis is present within the indexer. These always remain + # zero. + for i in indexer.newaxes: + counts.insert(i, zero) + + source_indices = [c for c in counts if c is not None] + + val = src_getitem(source_indices) + + # Cast to the destination dtype (cross-dtype slice assignment is allowed) + val = context.cast(builder, val, src_dtype, aryty.dtype) + + # No need to check for wraparound, as the indexers all ensure + # a positive index is returned. + dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data, + dest_shapes, dest_strides, + aryty.layout, dest_indices, + wraparound=False, + boundscheck=context.enable_boundscheck) + store_item(context, builder, aryty, val, dest_ptr) + + indexer.end_loops() + + src_cleanup() + + return context.get_dummy_value() + + +# ------------------------------------------------------------------------------ +# Shape / layout altering + +def vararg_to_tuple(context, builder, sig, args): + aryty = sig.args[0] + dimtys = sig.args[1:] + # values + ary = args[0] + dims = args[1:] + # coerce all types to intp + dims = [context.cast(builder, val, ty, types.intp) + for ty, val in zip(dimtys, dims)] + # make a tuple + shape = cgutils.pack_array(builder, dims, dims[0].type) + + shapety = types.UniTuple(dtype=types.intp, count=len(dims)) + new_sig = typing.signature(sig.return_type, aryty, shapety) + new_args = ary, shape + + return new_sig, new_args + + +@lower_builtin('array.transpose', types.Array) +def array_transpose(context, builder, sig, args): + return array_T(context, builder, sig.args[0], args[0]) + + +def permute_arrays(axis, shape, strides): + if len(axis) != len(set(axis)): + raise ValueError("repeated axis in transpose") + dim = len(shape) + for x in axis: + if x >= dim or abs(x) > dim: + raise ValueError("axis is out of bounds for array of " + "given dimension") + + shape[:] = shape[axis] + strides[:] = strides[axis] + + +# Transposing an array involves permuting the shape and strides of the array +# based on the given axes. +@lower_builtin('array.transpose', types.Array, types.BaseTuple) +def array_transpose_tuple(context, builder, sig, args): + aryty = sig.args[0] + ary = make_array(aryty)(context, builder, args[0]) + + axisty, axis = sig.args[1], args[1] + num_axis, dtype = axisty.count, axisty.dtype + + ll_intp = context.get_value_type(types.intp) + ll_ary_size = ir.ArrayType(ll_intp, num_axis) + + # Allocate memory for axes, shapes, and strides arrays. + arys = [axis, ary.shape, ary.strides] + ll_arys = [cgutils.alloca_once(builder, ll_ary_size) for _ in arys] + + # Store axes, shapes, and strides arrays to the allocated memory. + for src, dst in zip(arys, ll_arys): + builder.store(src, dst) + + np_ary_ty = types.Array(dtype=dtype, ndim=1, layout='C') + np_itemsize = context.get_constant(types.intp, + context.get_abi_sizeof(ll_intp)) + + # Form NumPy arrays for axes, shapes, and strides arrays. + np_arys = [make_array(np_ary_ty)(context, builder) for _ in arys] + + # Roughly, `np_ary = np.array(ll_ary)` for each of axes, shapes, and strides + for np_ary, ll_ary in zip(np_arys, ll_arys): + populate_array(np_ary, + data=builder.bitcast(ll_ary, ll_intp.as_pointer()), + shape=[context.get_constant(types.intp, num_axis)], + strides=[np_itemsize], + itemsize=np_itemsize, + meminfo=None) + + # Pass NumPy arrays formed above to permute_arrays function that permutes + # shapes and strides based on axis contents. + context.compile_internal(builder, permute_arrays, + typing.signature(types.void, + np_ary_ty, np_ary_ty, np_ary_ty), + [a._getvalue() for a in np_arys]) + + # Make a new array based on permuted shape and strides and return it. + ret = make_array(sig.return_type)(context, builder) + populate_array(ret, + data=ary.data, + shape=builder.load(ll_arys[1]), + strides=builder.load(ll_arys[2]), + itemsize=ary.itemsize, + meminfo=ary.meminfo, + parent=ary.parent) + res = ret._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin('array.transpose', types.Array, types.VarArg(types.Any)) +def array_transpose_vararg(context, builder, sig, args): + new_sig, new_args = vararg_to_tuple(context, builder, sig, args) + return array_transpose_tuple(context, builder, new_sig, new_args) + + +@overload(np.transpose) +def numpy_transpose(a, axes=None): + if isinstance(a, types.BaseTuple): + raise errors.TypingError("np.transpose does not accept tuples") + + if axes is None: + def np_transpose_impl(a, axes=None): + return a.transpose() + else: + def np_transpose_impl(a, axes=None): + return a.transpose(axes) + + return np_transpose_impl + + +@lower_getattr(types.Array, 'T') +def array_T(context, builder, typ, value): + if typ.ndim <= 1: + res = value + else: + ary = make_array(typ)(context, builder, value) + ret = make_array(typ)(context, builder) + shapes = cgutils.unpack_tuple(builder, ary.shape, typ.ndim) + strides = cgutils.unpack_tuple(builder, ary.strides, typ.ndim) + populate_array(ret, + data=ary.data, + shape=cgutils.pack_array(builder, shapes[::-1]), + strides=cgutils.pack_array(builder, strides[::-1]), + itemsize=ary.itemsize, + meminfo=ary.meminfo, + parent=ary.parent) + res = ret._getvalue() + return impl_ret_borrowed(context, builder, typ, res) + + +@overload(np.logspace) +def numpy_logspace(start, stop, num=50): + if not isinstance(start, types.Number): + raise errors.TypingError('The first argument "start" must be a number') + if not isinstance(stop, types.Number): + raise errors.TypingError('The second argument "stop" must be a number') + if not isinstance(num, (int, types.Integer)): + raise errors.TypingError('The third argument "num" must be an integer') + + def impl(start, stop, num=50): + y = np.linspace(start, stop, num) + return np.power(10.0, y) + + return impl + + +@overload(np.geomspace) +def numpy_geomspace(start, stop, num=50): + if not isinstance(start, types.Number): + msg = 'The argument "start" must be a number' + raise errors.TypingError(msg) + + if not isinstance(stop, types.Number): + msg = 'The argument "stop" must be a number' + raise errors.TypingError(msg) + + if not isinstance(num, (int, types.Integer)): + msg = 'The argument "num" must be an integer' + raise errors.TypingError(msg) + + if any(isinstance(arg, types.Complex) for arg in [start, stop]): + result_dtype = from_dtype(np.result_type(as_dtype(start), + as_dtype(stop), None)) + + def impl(start, stop, num=50): + if start == 0 or stop == 0: + raise ValueError('Geometric sequence cannot include zero') + start = result_dtype(start) + stop = result_dtype(stop) + if numpy_version < (2, 0): + both_imaginary = (start.real == 0) & (stop.real == 0) + both_negative = (np.sign(start) == -1) & (np.sign(stop) == -1) + out_sign = 1 + if both_imaginary: + start = start.imag + stop = stop.imag + out_sign = 1j + if both_negative: + start = -start + stop = -stop + out_sign = -out_sign + else: + out_sign = np.sign(start) + start /= out_sign + stop /= out_sign + + logstart = np.log10(start) + logstop = np.log10(stop) + result = np.logspace(logstart, logstop, num) + # Make sure the endpoints match the start and stop arguments. + # This is necessary because np.exp(np.log(x)) is not necessarily + # equal to x. + if num > 0: + result[0] = start + if num > 1: + result[-1] = stop + return out_sign * result + + else: + def impl(start, stop, num=50): + if start == 0 or stop == 0: + raise ValueError('Geometric sequence cannot include zero') + both_negative = (np.sign(start) == -1) & (np.sign(stop) == -1) + out_sign = 1 + if both_negative: + start = -start + stop = -stop + out_sign = -out_sign + logstart = np.log10(start) + logstop = np.log10(stop) + result = np.logspace(logstart, logstop, num) + # Make sure the endpoints match the start and stop arguments. + # This is necessary because np.exp(np.log(x)) is not necessarily + # equal to x. + if num > 0: + result[0] = start + if num > 1: + result[-1] = stop + return out_sign * result + + return impl + + +@overload(np.rot90) +def numpy_rot90(m, k=1): + # supporting axes argument it needs to be included in np.flip + if not isinstance(k, (int, types.Integer)): + raise errors.TypingError('The second argument "k" must be an integer') + if not isinstance(m, types.Array): + raise errors.TypingError('The first argument "m" must be an array') + + if m.ndim < 2: + raise errors.NumbaValueError('Input must be >= 2-d.') + + def impl(m, k=1): + k = k % 4 + if k == 0: + return m[:] + elif k == 1: + return np.swapaxes(np.fliplr(m), 0, 1) + elif k == 2: + return np.flipud(np.fliplr(m)) + elif k == 3: + return np.fliplr(np.swapaxes(m, 0, 1)) + else: + raise AssertionError # unreachable + + return impl + + +def _attempt_nocopy_reshape(context, builder, aryty, ary, + newnd, newshape, newstrides): + """ + Call into Numba_attempt_nocopy_reshape() for the given array type + and instance, and the specified new shape. + + Return value is non-zero if successful, and the array pointed to + by *newstrides* will be filled up with the computed results. + """ + ll_intp = context.get_value_type(types.intp) + ll_intp_star = ll_intp.as_pointer() + ll_intc = context.get_value_type(types.intc) + fnty = ir.FunctionType(ll_intc, [ + # nd, *dims, *strides + ll_intp, ll_intp_star, ll_intp_star, + # newnd, *newdims, *newstrides + ll_intp, ll_intp_star, ll_intp_star, + # itemsize, is_f_order + ll_intp, ll_intc]) + fn = cgutils.get_or_insert_function(builder.module, fnty, + "numba_attempt_nocopy_reshape") + + nd = ll_intp(aryty.ndim) + shape = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), 0, 0) + strides = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('strides'), + 0, 0) + newnd = ll_intp(newnd) + newshape = cgutils.gep_inbounds(builder, newshape, 0, 0) + newstrides = cgutils.gep_inbounds(builder, newstrides, 0, 0) + is_f_order = ll_intc(0) + res = builder.call(fn, [nd, shape, strides, + newnd, newshape, newstrides, + ary.itemsize, is_f_order]) + return res + + +def normalize_reshape_value(origsize, shape): + num_neg_value = 0 + known_size = 1 + for ax, s in enumerate(shape): + if s < 0: + num_neg_value += 1 + neg_ax = ax + else: + known_size *= s + + if num_neg_value == 0: + if origsize != known_size: + raise ValueError("total size of new array must be unchanged") + + elif num_neg_value == 1: + # Infer negative dimension + if known_size == 0: + inferred = 0 + ok = origsize == 0 + else: + inferred = origsize // known_size + ok = origsize % known_size == 0 + if not ok: + raise ValueError("total size of new array must be unchanged") + shape[neg_ax] = inferred + + else: + raise ValueError("multiple negative shape values") + + +@lower_builtin('array.reshape', types.Array, types.BaseTuple) +def array_reshape(context, builder, sig, args): + aryty = sig.args[0] + retty = sig.return_type + + shapety = sig.args[1] + shape = args[1] + + ll_intp = context.get_value_type(types.intp) + ll_shape = ir.ArrayType(ll_intp, shapety.count) + + ary = make_array(aryty)(context, builder, args[0]) + + # We will change the target shape in this slot + # (see normalize_reshape_value() below) + newshape = cgutils.alloca_once(builder, ll_shape) + builder.store(shape, newshape) + + # Create a shape array pointing to the value of newshape. + # (roughly, `shape_ary = np.array(ary.shape)`) + shape_ary_ty = types.Array(dtype=shapety.dtype, ndim=1, layout='C') + shape_ary = make_array(shape_ary_ty)(context, builder) + shape_itemsize = context.get_constant(types.intp, + context.get_abi_sizeof(ll_intp)) + populate_array(shape_ary, + data=builder.bitcast(newshape, ll_intp.as_pointer()), + shape=[context.get_constant(types.intp, shapety.count)], + strides=[shape_itemsize], + itemsize=shape_itemsize, + meminfo=None) + + # Compute the original array size + size = ary.nitems + + # Call our normalizer which will fix the shape array in case of negative + # shape value + context.compile_internal(builder, normalize_reshape_value, + typing.signature(types.void, + types.uintp, shape_ary_ty), + [size, shape_ary._getvalue()]) + + # Perform reshape (nocopy) + newnd = shapety.count + newstrides = cgutils.alloca_once(builder, ll_shape) + + ok = _attempt_nocopy_reshape(context, builder, aryty, ary, newnd, + newshape, newstrides) + fail = builder.icmp_unsigned('==', ok, ok.type(0)) + + with builder.if_then(fail): + msg = "incompatible shape for array" + context.call_conv.return_user_exc(builder, NotImplementedError, (msg,)) + + ret = make_array(retty)(context, builder) + populate_array(ret, + data=ary.data, + shape=builder.load(newshape), + strides=builder.load(newstrides), + itemsize=ary.itemsize, + meminfo=ary.meminfo, + parent=ary.parent) + res = ret._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin('array.reshape', types.Array, types.VarArg(types.Any)) +def array_reshape_vararg(context, builder, sig, args): + new_sig, new_args = vararg_to_tuple(context, builder, sig, args) + return array_reshape(context, builder, new_sig, new_args) + + +if numpy_version < (2, 1): + @overload(np.reshape) + def np_reshape(a, newshape): + def np_reshape_impl(a, newshape): + return a.reshape(newshape) + return np_reshape_impl +else: + @overload(np.reshape) + def np_reshape(a, shape): + def np_reshape_impl(a, shape): + return a.reshape(shape) + return np_reshape_impl + + +@overload(np.resize) +def numpy_resize(a, new_shape): + + if not type_can_asarray(a): + msg = 'The argument "a" must be array-like' + raise errors.TypingError(msg) + + if not ((isinstance(new_shape, types.UniTuple) + and + isinstance(new_shape.dtype, types.Integer)) + or + isinstance(new_shape, types.Integer)): + msg = ('The argument "new_shape" must be an integer or ' + 'a tuple of integers') + raise errors.TypingError(msg) + + def impl(a, new_shape): + a = np.asarray(a) + a = np.ravel(a) + + if isinstance(new_shape, tuple): + new_size = 1 + for dim_length in np.asarray(new_shape): + new_size *= dim_length + if dim_length < 0: + msg = 'All elements of `new_shape` must be non-negative' + raise ValueError(msg) + else: + if new_shape < 0: + msg2 = 'All elements of `new_shape` must be non-negative' + raise ValueError(msg2) + new_size = new_shape + + if a.size == 0: + return np.zeros(new_shape).astype(a.dtype) + + repeats = -(-new_size // a.size) # ceil division + res = a + for i in range(repeats - 1): + res = np.concatenate((res, a)) + res = res[:new_size] + + return np.reshape(res, new_shape) + + return impl + + +@overload(np.append) +def np_append(arr, values, axis=None): + + if not type_can_asarray(arr): + raise errors.TypingError('The first argument "arr" must be array-like') + + if not type_can_asarray(values): + raise errors.TypingError('The second argument "values" must be ' + 'array-like') + + if is_nonelike(axis): + def impl(arr, values, axis=None): + arr = np.ravel(np.asarray(arr)) + values = np.ravel(np.asarray(values)) + return np.concatenate((arr, values)) + else: + + if not isinstance(axis, types.Integer): + raise errors.TypingError('The third argument "axis" must be an ' + 'integer') + + def impl(arr, values, axis=None): + return np.concatenate((arr, values), axis=axis) + return impl + + +@lower_builtin('array.ravel', types.Array) +def array_ravel(context, builder, sig, args): + # Only support no argument version (default order='C') + def imp_nocopy(ary): + """No copy version""" + return ary.reshape(ary.size) + + def imp_copy(ary): + """Copy version""" + return ary.flatten() + + # If the input array is C layout already, use the nocopy version + if sig.args[0].layout == 'C': + imp = imp_nocopy + # otherwise, use flatten under-the-hood + else: + imp = imp_copy + + res = context.compile_internal(builder, imp, sig, args) + res = impl_ret_new_ref(context, builder, sig.return_type, res) + return res + + +@lower_builtin(np.ravel, types.Array) +def np_ravel(context, builder, sig, args): + def np_ravel_impl(a): + return a.ravel() + + return context.compile_internal(builder, np_ravel_impl, sig, args) + + +@lower_builtin('array.flatten', types.Array) +def array_flatten(context, builder, sig, args): + # Only support flattening to C layout currently. + def imp(ary): + return ary.copy().reshape(ary.size) + + res = context.compile_internal(builder, imp, sig, args) + res = impl_ret_new_ref(context, builder, sig.return_type, res) + return res + + +@register_jitable +def _np_clip_impl(a, a_min, a_max, out): + # Both a_min and a_max are numpy arrays + ret = np.empty_like(a) if out is None else out + a_b, a_min_b, a_max_b = np.broadcast_arrays(a, a_min, a_max) + for index in np.ndindex(a_b.shape): + val_a = a_b[index] + val_a_min = a_min_b[index] + val_a_max = a_max_b[index] + ret[index] = min(max(val_a, val_a_min), val_a_max) + + return ret + + +@register_jitable +def _np_clip_impl_none(a, b, use_min, out): + for index in np.ndindex(a.shape): + val_a = a[index] + val_b = b[index] + if use_min: + out[index] = min(val_a, val_b) + else: + out[index] = max(val_a, val_b) + return out + + +@overload(np.clip) +def np_clip(a, a_min, a_max, out=None): + if not type_can_asarray(a): + raise errors.TypingError('The argument "a" must be array-like') + + if (not isinstance(a_min, types.NoneType) and + not type_can_asarray(a_min)): + raise errors.TypingError(('The argument "a_min" must be a number ' + 'or an array-like')) + + if (not isinstance(a_max, types.NoneType) and + not type_can_asarray(a_max)): + raise errors.TypingError('The argument "a_max" must be a number ' + 'or an array-like') + + if not (isinstance(out, types.Array) or is_nonelike(out)): + msg = 'The argument "out" must be an array if it is provided' + raise errors.TypingError(msg) + + # TODO: support scalar a (issue #3469) + a_min_is_none = a_min is None or isinstance(a_min, types.NoneType) + a_max_is_none = a_max is None or isinstance(a_max, types.NoneType) + + if a_min_is_none and a_max_is_none: + # Raises value error when both a_min and a_max are None + def np_clip_nn(a, a_min, a_max, out=None): + raise ValueError("array_clip: must set either max or min") + + return np_clip_nn + + a_min_is_scalar = isinstance(a_min, types.Number) + a_max_is_scalar = isinstance(a_max, types.Number) + + if a_min_is_scalar and a_max_is_scalar: + def np_clip_ss(a, a_min, a_max, out=None): + # a_min and a_max are scalars + # since their shape will be empty + # so broadcasting is not needed at all + ret = np.empty_like(a) if out is None else out + for index in np.ndindex(a.shape): + val_a = a[index] + ret[index] = min(max(val_a, a_min), a_max) + + return ret + + return np_clip_ss + elif a_min_is_scalar and not a_max_is_scalar: + if a_max_is_none: + def np_clip_sn(a, a_min, a_max, out=None): + # a_min is a scalar + # since its shape will be empty + # so broadcasting is not needed at all + ret = np.empty_like(a) if out is None else out + for index in np.ndindex(a.shape): + val_a = a[index] + ret[index] = max(val_a, a_min) + + return ret + + return np_clip_sn + else: + def np_clip_sa(a, a_min, a_max, out=None): + # a_min is a scalar + # since its shape will be empty + # broadcast it to shape of a + # by using np.full_like + a_min_full = np.full_like(a, a_min) + return _np_clip_impl(a, a_min_full, a_max, out) + + return np_clip_sa + elif not a_min_is_scalar and a_max_is_scalar: + if a_min_is_none: + def np_clip_ns(a, a_min, a_max, out=None): + # a_max is a scalar + # since its shape will be empty + # so broadcasting is not needed at all + ret = np.empty_like(a) if out is None else out + for index in np.ndindex(a.shape): + val_a = a[index] + ret[index] = min(val_a, a_max) + + return ret + + return np_clip_ns + else: + def np_clip_as(a, a_min, a_max, out=None): + # a_max is a scalar + # since its shape will be empty + # broadcast it to shape of a + # by using np.full_like + a_max_full = np.full_like(a, a_max) + return _np_clip_impl(a, a_min, a_max_full, out) + + return np_clip_as + else: + # Case where exactly one of a_min or a_max is None + if a_min_is_none: + def np_clip_na(a, a_min, a_max, out=None): + # a_max is a numpy array but a_min is None + ret = np.empty_like(a) if out is None else out + a_b, a_max_b = np.broadcast_arrays(a, a_max) + return _np_clip_impl_none(a_b, a_max_b, True, ret) + + return np_clip_na + elif a_max_is_none: + def np_clip_an(a, a_min, a_max, out=None): + # a_min is a numpy array but a_max is None + ret = np.empty_like(a) if out is None else out + a_b, a_min_b = np.broadcast_arrays(a, a_min) + return _np_clip_impl_none(a_b, a_min_b, False, ret) + + return np_clip_an + else: + def np_clip_aa(a, a_min, a_max, out=None): + # Both a_min and a_max are clearly arrays + # because none of the above branches + # returned + return _np_clip_impl(a, a_min, a_max, out) + + return np_clip_aa + + +@overload_method(types.Array, 'clip') +def array_clip(a, a_min=None, a_max=None, out=None): + def impl(a, a_min=None, a_max=None, out=None): + return np.clip(a, a_min, a_max, out) + return impl + + +def _change_dtype(context, builder, oldty, newty, ary): + """ + Attempt to fix up *ary* for switching from *oldty* to *newty*. + + See Numpy's array_descr_set() + (np/core/src/multiarray/getset.c). + Attempt to fix the array's shape and strides for a new dtype. + False is returned on failure, True on success. + """ + assert oldty.ndim == newty.ndim + assert oldty.layout == newty.layout + + new_layout = ord(newty.layout) + any_layout = ord('A') + c_layout = ord('C') + f_layout = ord('F') + + int8 = types.int8 + + def imp(nd, dims, strides, old_itemsize, new_itemsize, layout): + # Attempt to update the layout due to limitation of the numba + # type system. + if layout == any_layout: + # Test rightmost stride to be contiguous + if strides[-1] == old_itemsize: + # Process this as if it is C contiguous + layout = int8(c_layout) + # Test leftmost stride to be F contiguous + elif strides[0] == old_itemsize: + # Process this as if it is F contiguous + layout = int8(f_layout) + + if old_itemsize != new_itemsize and (layout == any_layout or nd == 0): + return False + + if layout == c_layout: + i = nd - 1 + else: + i = 0 + + if new_itemsize < old_itemsize: + # If it is compatible, increase the size of the dimension + # at the end (or at the front if F-contiguous) + if (old_itemsize % new_itemsize) != 0: + return False + + newdim = old_itemsize // new_itemsize + dims[i] *= newdim + strides[i] = new_itemsize + + elif new_itemsize > old_itemsize: + # Determine if last (or first if F-contiguous) dimension + # is compatible + bytelength = dims[i] * old_itemsize + if (bytelength % new_itemsize) != 0: + return False + + dims[i] = bytelength // new_itemsize + strides[i] = new_itemsize + + else: + # Same item size: nothing to do (this also works for + # non-contiguous arrays). + pass + + return True + + old_itemsize = context.get_constant(types.intp, + get_itemsize(context, oldty)) + new_itemsize = context.get_constant(types.intp, + get_itemsize(context, newty)) + + nd = context.get_constant(types.intp, newty.ndim) + shape_data = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), + 0, 0) + strides_data = cgutils.gep_inbounds(builder, + ary._get_ptr_by_name('strides'), 0, 0) + + shape_strides_array_type = types.Array(dtype=types.intp, ndim=1, layout='C') + arycls = context.make_array(shape_strides_array_type) + + shape_constant = cgutils.pack_array(builder, + [context.get_constant(types.intp, + newty.ndim)]) + + sizeof_intp = context.get_abi_sizeof(context.get_data_type(types.intp)) + sizeof_intp = context.get_constant(types.intp, sizeof_intp) + strides_constant = cgutils.pack_array(builder, [sizeof_intp]) + + shape_ary = arycls(context, builder) + + populate_array(shape_ary, + data=shape_data, + shape=shape_constant, + strides=strides_constant, + itemsize=sizeof_intp, + meminfo=None) + + strides_ary = arycls(context, builder) + populate_array(strides_ary, + data=strides_data, + shape=shape_constant, + strides=strides_constant, + itemsize=sizeof_intp, + meminfo=None) + + shape = shape_ary._getvalue() + strides = strides_ary._getvalue() + args = [nd, shape, strides, old_itemsize, new_itemsize, + context.get_constant(types.int8, new_layout)] + + sig = signature(types.boolean, + types.intp, # nd + shape_strides_array_type, # dims + shape_strides_array_type, # strides + types.intp, # old_itemsize + types.intp, # new_itemsize + types.int8, # layout + ) + + res = context.compile_internal(builder, imp, sig, args) + update_array_info(newty, ary) + res = impl_ret_borrowed(context, builder, sig.return_type, res) + return res + + +@overload(np.shape) +def np_shape(a): + if not type_can_asarray(a): + raise errors.TypingError("The argument to np.shape must be array-like") + + def impl(a): + return np.asarray(a).shape + return impl + + +@overload(np.size) +def np_size(a): + if not type_can_asarray(a): + raise errors.TypingError("The argument to np.size must be array-like") + + def impl(a): + return np.asarray(a).size + return impl + +# ------------------------------------------------------------------------------ + + +@overload(np.unique) +def np_unique(ar): + def np_unique_impl(ar): + b = np.sort(ar.ravel()) + head = list(b[:1]) + tail = [x for i, x in enumerate(b[1:]) if b[i] != x] + return np.array(head + tail) + return np_unique_impl + + +@overload(np.repeat) +def np_repeat(a, repeats): + # Implementation for repeats being a scalar is a module global function + # (see below) because it might be called from the implementation below. + + def np_repeat_impl_repeats_array_like(a, repeats): + # implementation if repeats is an array like + repeats_array = np.asarray(repeats, dtype=np.int64) + # if it is a singleton array, invoke the scalar implementation + if repeats_array.shape[0] == 1: + return np_repeat_impl_repeats_scaler(a, repeats_array[0]) + if np.any(repeats_array < 0): + raise ValueError("negative dimensions are not allowed") + asa = np.asarray(a) + aravel = asa.ravel() + n = aravel.shape[0] + if aravel.shape != repeats_array.shape: + raise ValueError( + "operands could not be broadcast together") + to_return = np.empty(np.sum(repeats_array), dtype=asa.dtype) + pos = 0 + for i in range(n): + to_return[pos : pos + repeats_array[i]] = aravel[i] + pos += repeats_array[i] + return to_return + + # type checking + if isinstance(a, (types.Array, + types.List, + types.BaseTuple, + types.Number, + types.Boolean, + ) + ): + if isinstance(repeats, types.Integer): + return np_repeat_impl_repeats_scaler + elif isinstance(repeats, (types.Array, types.List)): + if isinstance(repeats.dtype, types.Integer): + return np_repeat_impl_repeats_array_like + + raise errors.TypingError( + "The repeats argument must be an integer " + "or an array-like of integer dtype") + + +@register_jitable +def np_repeat_impl_repeats_scaler(a, repeats): + if repeats < 0: + raise ValueError("negative dimensions are not allowed") + asa = np.asarray(a) + aravel = asa.ravel() + n = aravel.shape[0] + if repeats == 0: + return np.empty(0, dtype=asa.dtype) + elif repeats == 1: + return np.copy(aravel) + else: + to_return = np.empty(n * repeats, dtype=asa.dtype) + for i in range(n): + to_return[i * repeats : (i + 1) * repeats] = aravel[i] + return to_return + + +@extending.overload_method(types.Array, 'repeat') +def array_repeat(a, repeats): + def array_repeat_impl(a, repeats): + return np.repeat(a, repeats) + + return array_repeat_impl + + +@intrinsic +def _intrin_get_itemsize(tyctx, dtype): + """Computes the itemsize of the dtype""" + sig = types.intp(dtype) + + def codegen(cgctx, builder, sig, llargs): + llty = cgctx.get_data_type(sig.args[0].dtype) + llintp = cgctx.get_data_type(sig.return_type) + return llintp(cgctx.get_abi_sizeof(llty)) + return sig, codegen + + +def _compatible_view(a, dtype): + pass + + +@overload(_compatible_view, target='generic') +def ol_compatible_view(a, dtype): + """Determines if the array and dtype are compatible for forming a view.""" + # NOTE: NumPy 1.23+ uses this check. + # Code based on: + # https://github.com/numpy/numpy/blob/750ad21258cfc00663586d5a466e24f91b48edc7/numpy/core/src/multiarray/getset.c#L500-L555 # noqa: E501 + def impl(a, dtype): + dtype_size = _intrin_get_itemsize(dtype) + if dtype_size != a.itemsize: + # catch forbidden cases + if a.ndim == 0: + msg1 = ("Changing the dtype of a 0d array is only supported " + "if the itemsize is unchanged") + raise ValueError(msg1) + else: + # NumPy has a check here for subarray type conversion which + # Numba doesn't support + pass + + # Resize on last axis only + axis = a.ndim - 1 + p1 = a.shape[axis] != 1 + p2 = a.size != 0 + p3 = a.strides[axis] != a.itemsize + if (p1 and p2 and p3): + msg2 = ("To change to a dtype of a different size, the last " + "axis must be contiguous") + raise ValueError(msg2) + + if dtype_size < a.itemsize: + if dtype_size == 0 or a.itemsize % dtype_size != 0: + msg3 = ("When changing to a smaller dtype, its size must " + "be a divisor of the size of original dtype") + raise ValueError(msg3) + else: + newdim = a.shape[axis] * a.itemsize + if newdim % dtype_size != 0: + msg4 = ("When changing to a larger dtype, its size must be " + "a divisor of the total size in bytes of the last " + "axis of the array.") + raise ValueError(msg4) + return impl + + +@lower_builtin('array.view', types.Array, types.DTypeSpec) +def array_view(context, builder, sig, args): + aryty = sig.args[0] + retty = sig.return_type + + ary = make_array(aryty)(context, builder, args[0]) + ret = make_array(retty)(context, builder) + # Copy all fields, casting the "data" pointer appropriately + fields = set(ret._datamodel._fields) + for k in sorted(fields): + val = getattr(ary, k) + if k == 'data': + ptrty = ret.data.type + ret.data = builder.bitcast(val, ptrty) + else: + setattr(ret, k, val) + + tyctx = context.typing_context + fnty = tyctx.resolve_value_type(_compatible_view) + _compatible_view_sig = fnty.get_call_type(tyctx, (*sig.args,), {}) + impl = context.get_function(fnty, _compatible_view_sig) + impl(builder, args) + + ok = _change_dtype(context, builder, aryty, retty, ret) + fail = builder.icmp_unsigned('==', ok, Constant(ok.type, 0)) + + with builder.if_then(fail): + msg = "new type not compatible with array" + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + + res = ret._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +# ------------------------------------------------------------------------------ +# Array attributes + +@lower_getattr(types.Array, "dtype") +def array_dtype(context, builder, typ, value): + res = context.get_dummy_value() + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.Array, "shape") +@lower_getattr(types.MemoryView, "shape") +def array_shape(context, builder, typ, value): + arrayty = make_array(typ) + array = arrayty(context, builder, value) + res = array.shape + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.Array, "strides") +@lower_getattr(types.MemoryView, "strides") +def array_strides(context, builder, typ, value): + arrayty = make_array(typ) + array = arrayty(context, builder, value) + res = array.strides + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.Array, "ndim") +@lower_getattr(types.MemoryView, "ndim") +def array_ndim(context, builder, typ, value): + res = context.get_constant(types.intp, typ.ndim) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.Array, "size") +def array_size(context, builder, typ, value): + arrayty = make_array(typ) + array = arrayty(context, builder, value) + res = array.nitems + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.Array, "itemsize") +@lower_getattr(types.MemoryView, "itemsize") +def array_itemsize(context, builder, typ, value): + arrayty = make_array(typ) + array = arrayty(context, builder, value) + res = array.itemsize + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.Array, "nbytes") +@lower_getattr(types.MemoryView, "nbytes") +def array_nbytes(context, builder, typ, value): + """ + nbytes = size * itemsize + """ + arrayty = make_array(typ) + array = arrayty(context, builder, value) + res = builder.mul(array.nitems, array.itemsize) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.MemoryView, "contiguous") +def array_contiguous(context, builder, typ, value): + res = context.get_constant(types.boolean, typ.is_contig) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.MemoryView, "c_contiguous") +def array_c_contiguous(context, builder, typ, value): + res = context.get_constant(types.boolean, typ.is_c_contig) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.MemoryView, "f_contiguous") +def array_f_contiguous(context, builder, typ, value): + res = context.get_constant(types.boolean, typ.is_f_contig) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.MemoryView, "readonly") +def array_readonly(context, builder, typ, value): + res = context.get_constant(types.boolean, not typ.mutable) + return impl_ret_untracked(context, builder, typ, res) + + +# array.ctypes + +@lower_getattr(types.Array, "ctypes") +def array_ctypes(context, builder, typ, value): + arrayty = make_array(typ) + array = arrayty(context, builder, value) + # Create new ArrayCType structure + act = types.ArrayCTypes(typ) + ctinfo = context.make_helper(builder, act) + ctinfo.data = array.data + ctinfo.meminfo = array.meminfo + res = ctinfo._getvalue() + return impl_ret_borrowed(context, builder, act, res) + + +@lower_getattr(types.ArrayCTypes, "data") +def array_ctypes_data(context, builder, typ, value): + ctinfo = context.make_helper(builder, typ, value=value) + res = ctinfo.data + # Convert it to an integer + res = builder.ptrtoint(res, context.get_value_type(types.intp)) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_cast(types.ArrayCTypes, types.CPointer) +@lower_cast(types.ArrayCTypes, types.voidptr) +def array_ctypes_to_pointer(context, builder, fromty, toty, val): + ctinfo = context.make_helper(builder, fromty, value=val) + res = ctinfo.data + res = builder.bitcast(res, context.get_value_type(toty)) + return impl_ret_untracked(context, builder, toty, res) + + +def _call_contiguous_check(checker, context, builder, aryty, ary): + """Helper to invoke the contiguous checker function on an array + + Args + ---- + checker : + ``numba.numpy_supports.is_contiguous``, or + ``numba.numpy_supports.is_fortran``. + context : target context + builder : llvm ir builder + aryty : numba type + ary : llvm value + """ + ary = make_array(aryty)(context, builder, value=ary) + tup_intp = types.UniTuple(types.intp, aryty.ndim) + itemsize = context.get_abi_sizeof(context.get_value_type(aryty.dtype)) + check_sig = signature(types.bool_, tup_intp, tup_intp, types.intp) + check_args = [ary.shape, ary.strides, + context.get_constant(types.intp, itemsize)] + is_contig = context.compile_internal(builder, checker, check_sig, + check_args) + return is_contig + + +# array.flags + +@lower_getattr(types.Array, "flags") +def array_flags(context, builder, typ, value): + flagsobj = context.make_helper(builder, types.ArrayFlags(typ)) + flagsobj.parent = value + res = flagsobj._getvalue() + context.nrt.incref(builder, typ, value) + return impl_ret_new_ref(context, builder, typ, res) + + +@lower_getattr(types.ArrayFlags, "contiguous") +@lower_getattr(types.ArrayFlags, "c_contiguous") +def array_flags_c_contiguous(context, builder, typ, value): + if typ.array_type.layout != 'C': + # any layout can still be contiguous + flagsobj = context.make_helper(builder, typ, value=value) + res = _call_contiguous_check(is_contiguous, context, builder, + typ.array_type, flagsobj.parent) + else: + val = typ.array_type.layout == 'C' + res = context.get_constant(types.boolean, val) + return impl_ret_untracked(context, builder, typ, res) + + +@lower_getattr(types.ArrayFlags, "f_contiguous") +def array_flags_f_contiguous(context, builder, typ, value): + if typ.array_type.layout != 'F': + # any layout can still be contiguous + flagsobj = context.make_helper(builder, typ, value=value) + res = _call_contiguous_check(is_fortran, context, builder, + typ.array_type, flagsobj.parent) + else: + layout = typ.array_type.layout + val = layout == 'F' if typ.array_type.ndim > 1 else layout in 'CF' + res = context.get_constant(types.boolean, val) + return impl_ret_untracked(context, builder, typ, res) + + +# ------------------------------------------------------------------------------ +# .real / .imag + +@lower_getattr(types.Array, "real") +def array_real_part(context, builder, typ, value): + if typ.dtype in types.complex_domain: + return array_complex_attr(context, builder, typ, value, attr='real') + elif typ.dtype in types.number_domain: + # as an identity function + return impl_ret_borrowed(context, builder, typ, value) + else: + raise NotImplementedError('unsupported .real for {}'.format(type.dtype)) + + +@lower_getattr(types.Array, "imag") +def array_imag_part(context, builder, typ, value): + if typ.dtype in types.complex_domain: + return array_complex_attr(context, builder, typ, value, attr='imag') + elif typ.dtype in types.number_domain: + # return a readonly zero array + sig = signature(typ.copy(readonly=True), typ) + arrtype, shapes = _parse_empty_like_args(context, builder, sig, [value]) + ary = _empty_nd_impl(context, builder, arrtype, shapes) + cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, + ary.nitems), 0) + return impl_ret_new_ref(context, builder, sig.return_type, + ary._getvalue()) + else: + raise NotImplementedError('unsupported .imag for {}'.format(type.dtype)) + + +def array_complex_attr(context, builder, typ, value, attr): + """ + Given a complex array, it's memory layout is: + + R C R C R C + ^ ^ ^ + + (`R` indicates a float for the real part; + `C` indicates a float for the imaginary part; + the `^` indicates the start of each element) + + To get the real part, we can simply change the dtype and itemsize to that + of the underlying float type. The new layout is: + + R x R x R x + ^ ^ ^ + + (`x` indicates unused) + + A load operation will use the dtype to determine the number of bytes to + load. + + To get the imaginary part, we shift the pointer by 1 float offset and + change the dtype and itemsize. The new layout is: + + x C x C x C + ^ ^ ^ + """ + if attr not in ['real', 'imag'] or typ.dtype not in types.complex_domain: + raise NotImplementedError("cannot get attribute `{}`".format(attr)) + + arrayty = make_array(typ) + array = arrayty(context, builder, value) + + # sizeof underlying float type + flty = typ.dtype.underlying_float + sizeof_flty = context.get_abi_sizeof(context.get_data_type(flty)) + itemsize = array.itemsize.type(sizeof_flty) + + # cast data pointer to float type + llfltptrty = context.get_value_type(flty).as_pointer() + dataptr = builder.bitcast(array.data, llfltptrty) + + # add offset + if attr == 'imag': + dataptr = builder.gep(dataptr, [ir.IntType(32)(1)]) + + # make result + resultty = typ.copy(dtype=flty, layout='A') + result = make_array(resultty)(context, builder) + repl = dict(data=dataptr, itemsize=itemsize) + cgutils.copy_struct(result, array, repl) + return impl_ret_borrowed(context, builder, resultty, result._getvalue()) + + +@overload_method(types.Array, 'conj') +@overload_method(types.Array, 'conjugate') +def array_conj(arr): + def impl(arr): + return np.conj(arr) + return impl + +# ------------------------------------------------------------------------------ +# DType attribute + + +def dtype_type(context, builder, dtypety, dtypeval): + # Just return a dummy opaque value + return context.get_dummy_value() + + +lower_getattr(types.DType, 'type')(dtype_type) +lower_getattr(types.DType, 'kind')(dtype_type) + + +# ------------------------------------------------------------------------------ +# static_getitem on Numba numerical types to create "array" types + + +@lower_builtin('static_getitem', types.NumberClass, types.Any) +def static_getitem_number_clazz(context, builder, sig, args): + """This handles the "static_getitem" when a Numba type is subscripted e.g: + var = typed.List.empty_list(float64[::1, :]) + It only allows this on simple numerical types. Compound types, like + records, are not supported. + """ + retty = sig.return_type + if isinstance(retty, types.Array): + # This isn't used or practically accessible, but has to exist, so just + # put in a NULL of the right type. + res = context.get_value_type(retty)(None) + return impl_ret_untracked(context, builder, retty, res) + else: + # This should be unreachable unless the implementation on the Type + # metaclass is changed. + msg = ("Unreachable; the definition of __getitem__ on the " + "numba.types.abstract.Type metaclass should prevent access.") + raise errors.LoweringError(msg) + + +# ------------------------------------------------------------------------------ +# Structured / record lookup + +@lower_getattr_generic(types.Array) +def array_record_getattr(context, builder, typ, value, attr): + """ + Generic getattr() implementation for record arrays: fetch the given + record member, i.e. a subarray. + """ + arrayty = make_array(typ) + array = arrayty(context, builder, value) + + rectype = typ.dtype + if not isinstance(rectype, types.Record): + raise NotImplementedError("attribute %r of %s not defined" + % (attr, typ)) + dtype = rectype.typeof(attr) + offset = rectype.offset(attr) + + if isinstance(dtype, types.NestedArray): + resty = typ.copy( + dtype=dtype.dtype, ndim=typ.ndim + dtype.ndim, layout='A') + else: + resty = typ.copy(dtype=dtype, layout='A') + + raryty = make_array(resty) + + rary = raryty(context, builder) + + constoffset = context.get_constant(types.intp, offset) + + newdataptr = cgutils.pointer_add( + builder, array.data, constoffset, return_type=rary.data.type, + ) + if isinstance(dtype, types.NestedArray): + # new shape = recarray shape + inner dimension from nestedarray + shape = cgutils.unpack_tuple(builder, array.shape, typ.ndim) + shape += [context.get_constant(types.intp, i) for i in dtype.shape] + # new strides = recarray strides + strides of the inner nestedarray + strides = cgutils.unpack_tuple(builder, array.strides, typ.ndim) + strides += [context.get_constant(types.intp, i) for i in dtype.strides] + # New datasize = size of elements of the nestedarray + datasize = context.get_abi_sizeof(context.get_data_type(dtype.dtype)) + else: + # New shape, strides, and datasize match the underlying array + shape = array.shape + strides = array.strides + datasize = context.get_abi_sizeof(context.get_data_type(dtype)) + populate_array(rary, + data=newdataptr, + shape=shape, + strides=strides, + itemsize=context.get_constant(types.intp, datasize), + meminfo=array.meminfo, + parent=array.parent) + res = rary._getvalue() + return impl_ret_borrowed(context, builder, resty, res) + + +@lower_builtin('static_getitem', types.Array, types.StringLiteral) +def array_record_getitem(context, builder, sig, args): + index = args[1] + if not isinstance(index, str): + # This will fallback to normal getitem + raise NotImplementedError + return array_record_getattr(context, builder, sig.args[0], args[0], index) + + +@lower_getattr_generic(types.Record) +def record_getattr(context, builder, typ, value, attr): + """ + Generic getattr() implementation for records: get the given record member. + """ + context.sentry_record_alignment(typ, attr) + offset = typ.offset(attr) + elemty = typ.typeof(attr) + + if isinstance(elemty, types.NestedArray): + # Only a nested array's *data* is stored in a structured array, + # so we create an array structure to point to that data. + aryty = make_array(elemty) + ary = aryty(context, builder) + dtype = elemty.dtype + newshape = [context.get_constant(types.intp, s) for s in + elemty.shape] + newstrides = [context.get_constant(types.intp, s) for s in + elemty.strides] + newdata = cgutils.get_record_member(builder, value, offset, + context.get_data_type(dtype)) + populate_array( + ary, + data=newdata, + shape=cgutils.pack_array(builder, newshape), + strides=cgutils.pack_array(builder, newstrides), + itemsize=context.get_constant(types.intp, elemty.size), + meminfo=None, + parent=None, + ) + res = ary._getvalue() + return impl_ret_borrowed(context, builder, typ, res) + else: + dptr = cgutils.get_record_member(builder, value, offset, + context.get_data_type(elemty)) + align = None if typ.aligned else 1 + res = context.unpack_value(builder, elemty, dptr, align) + return impl_ret_borrowed(context, builder, typ, res) + + +@lower_setattr_generic(types.Record) +def record_setattr(context, builder, sig, args, attr): + """ + Generic setattr() implementation for records: set the given record member. + """ + typ, valty = sig.args + target, val = args + + context.sentry_record_alignment(typ, attr) + offset = typ.offset(attr) + elemty = typ.typeof(attr) + + if isinstance(elemty, types.NestedArray): + # Copy the data from the RHS into the nested array + val_struct = cgutils.create_struct_proxy(valty)(context, builder, + value=args[1]) + src = val_struct.data + dest = cgutils.get_record_member(builder, target, offset, + src.type.pointee) + cgutils.memcpy(builder, dest, src, + context.get_constant(types.intp, elemty.nitems)) + else: + # Set the given scalar record member + dptr = cgutils.get_record_member(builder, target, offset, + context.get_data_type(elemty)) + val = context.cast(builder, val, valty, elemty) + align = None if typ.aligned else 1 + context.pack_value(builder, elemty, val, dptr, align=align) + + +@lower_builtin('static_getitem', types.Record, types.StringLiteral) +def record_static_getitem_str(context, builder, sig, args): + """ + Record.__getitem__ redirects to getattr() + """ + impl = context.get_getattr(sig.args[0], args[1]) + return impl(context, builder, sig.args[0], args[0], args[1]) + + +@lower_builtin('static_getitem', types.Record, types.IntegerLiteral) +def record_static_getitem_int(context, builder, sig, args): + """ + Record.__getitem__ redirects to getattr() + """ + idx = sig.args[1].literal_value + fields = list(sig.args[0].fields) + ll_field = context.insert_const_string(builder.module, fields[idx]) + impl = context.get_getattr(sig.args[0], ll_field) + return impl(context, builder, sig.args[0], args[0], fields[idx]) + + +@lower_builtin('static_setitem', types.Record, types.StringLiteral, types.Any) +def record_static_setitem_str(context, builder, sig, args): + """ + Record.__setitem__ redirects to setattr() + """ + recty, _, valty = sig.args + rec, idx, val = args + getattr_sig = signature(sig.return_type, recty, valty) + impl = context.get_setattr(idx, getattr_sig) + assert impl is not None + return impl(builder, (rec, val)) + + +@lower_builtin('static_setitem', types.Record, types.IntegerLiteral, types.Any) +def record_static_setitem_int(context, builder, sig, args): + """ + Record.__setitem__ redirects to setattr() + """ + recty, _, valty = sig.args + rec, idx, val = args + getattr_sig = signature(sig.return_type, recty, valty) + fields = list(sig.args[0].fields) + impl = context.get_setattr(fields[idx], getattr_sig) + assert impl is not None + return impl(builder, (rec, val)) + + +# ------------------------------------------------------------------------------ +# Constant arrays and records + + +@lower_constant(types.Array) +def constant_array(context, builder, ty, pyval): + """ + Create a constant array (mechanism is target-dependent). + """ + return context.make_constant_array(builder, ty, pyval) + + +@lower_constant(types.Record) +def constant_record(context, builder, ty, pyval): + """ + Create a record constant as a stack-allocated array of bytes. + """ + lty = ir.ArrayType(ir.IntType(8), pyval.nbytes) + val = lty(bytearray(pyval.tostring())) + return cgutils.alloca_once_value(builder, val) + + +@lower_constant(types.Bytes) +def constant_bytes(context, builder, ty, pyval): + """ + Create a constant array from bytes (mechanism is target-dependent). + """ + buf = np.array(bytearray(pyval), dtype=np.uint8) + return context.make_constant_array(builder, ty, buf) + +# ------------------------------------------------------------------------------ +# Comparisons + + +@lower_builtin(operator.is_, types.Array, types.Array) +def array_is(context, builder, sig, args): + aty, bty = sig.args + if aty != bty: + return cgutils.false_bit + + def array_is_impl(a, b): + return (a.shape == b.shape and + a.strides == b.strides and + a.ctypes.data == b.ctypes.data) + + return context.compile_internal(builder, array_is_impl, sig, args) + +# ------------------------------------------------------------------------------ +# Hash + + +@overload_attribute(types.Array, "__hash__") +def ol_array_hash(arr): + return lambda arr: None + + +# ------------------------------------------------------------------------------ +# builtin `np.flat` implementation + +def make_array_flat_cls(flatiterty): + """ + Return the Structure representation of the given *flatiterty* (an + instance of types.NumpyFlatType). + """ + return _make_flattening_iter_cls(flatiterty, 'flat') + + +def make_array_ndenumerate_cls(nditerty): + """ + Return the Structure representation of the given *nditerty* (an + instance of types.NumpyNdEnumerateType). + """ + return _make_flattening_iter_cls(nditerty, 'ndenumerate') + + +def _increment_indices(context, builder, ndim, shape, indices, end_flag=None, + loop_continue=None, loop_break=None): + zero = context.get_constant(types.intp, 0) + + bbend = builder.append_basic_block('end_increment') + + if end_flag is not None: + builder.store(cgutils.false_byte, end_flag) + + for dim in reversed(range(ndim)): + idxptr = cgutils.gep_inbounds(builder, indices, dim) + idx = cgutils.increment_index(builder, builder.load(idxptr)) + + count = shape[dim] + in_bounds = builder.icmp_signed('<', idx, count) + with cgutils.if_likely(builder, in_bounds): + # New index is still in bounds + builder.store(idx, idxptr) + if loop_continue is not None: + loop_continue(dim) + builder.branch(bbend) + # Index out of bounds => reset it and proceed it to outer index + builder.store(zero, idxptr) + if loop_break is not None: + loop_break(dim) + + if end_flag is not None: + builder.store(cgutils.true_byte, end_flag) + builder.branch(bbend) + + builder.position_at_end(bbend) + + +def _increment_indices_array(context, builder, arrty, arr, indices, + end_flag=None): + shape = cgutils.unpack_tuple(builder, arr.shape, arrty.ndim) + _increment_indices(context, builder, arrty.ndim, shape, indices, end_flag) + + +def make_nditer_cls(nditerty): + """ + Return the Structure representation of the given *nditerty* (an + instance of types.NumpyNdIterType). + """ + ndim = nditerty.ndim + layout = nditerty.layout + narrays = len(nditerty.arrays) + nshapes = ndim if nditerty.need_shaped_indexing else 1 + + class BaseSubIter(object): + """ + Base class for sub-iterators of a nditer() instance. + """ + + def __init__(self, nditer, member_name, start_dim, end_dim): + self.nditer = nditer + self.member_name = member_name + self.start_dim = start_dim + self.end_dim = end_dim + self.ndim = end_dim - start_dim + + def set_member_ptr(self, ptr): + setattr(self.nditer, self.member_name, ptr) + + @functools.cached_property + def member_ptr(self): + return getattr(self.nditer, self.member_name) + + def init_specific(self, context, builder): + pass + + def loop_continue(self, context, builder, logical_dim): + pass + + def loop_break(self, context, builder, logical_dim): + pass + + class FlatSubIter(BaseSubIter): + """ + Sub-iterator walking a contiguous array in physical order, with + support for broadcasting (the index is reset on the outer dimension). + """ + + def init_specific(self, context, builder): + zero = context.get_constant(types.intp, 0) + self.set_member_ptr(cgutils.alloca_once_value(builder, zero)) + + def compute_pointer(self, context, builder, indices, arrty, arr): + index = builder.load(self.member_ptr) + return builder.gep(arr.data, [index]) + + def loop_continue(self, context, builder, logical_dim): + if logical_dim == self.ndim - 1: + # Only increment index inside innermost logical dimension + index = builder.load(self.member_ptr) + index = cgutils.increment_index(builder, index) + builder.store(index, self.member_ptr) + + def loop_break(self, context, builder, logical_dim): + if logical_dim == 0: + # At the exit of outermost logical dimension, reset index + zero = context.get_constant(types.intp, 0) + builder.store(zero, self.member_ptr) + elif logical_dim == self.ndim - 1: + # Inside innermost logical dimension, increment index + index = builder.load(self.member_ptr) + index = cgutils.increment_index(builder, index) + builder.store(index, self.member_ptr) + + class TrivialFlatSubIter(BaseSubIter): + """ + Sub-iterator walking a contiguous array in physical order, + *without* support for broadcasting. + """ + + def init_specific(self, context, builder): + assert not nditerty.need_shaped_indexing + + def compute_pointer(self, context, builder, indices, arrty, arr): + assert len(indices) <= 1, len(indices) + return builder.gep(arr.data, indices) + + class IndexedSubIter(BaseSubIter): + """ + Sub-iterator walking an array in logical order. + """ + + def compute_pointer(self, context, builder, indices, arrty, arr): + assert len(indices) == self.ndim + return cgutils.get_item_pointer(context, builder, arrty, arr, + indices, wraparound=False) + + class ZeroDimSubIter(BaseSubIter): + """ + Sub-iterator "walking" a 0-d array. + """ + + def compute_pointer(self, context, builder, indices, arrty, arr): + return arr.data + + class ScalarSubIter(BaseSubIter): + """ + Sub-iterator "walking" a scalar value. + """ + + def compute_pointer(self, context, builder, indices, arrty, arr): + return arr + + class NdIter(cgutils.create_struct_proxy(nditerty)): + """ + .nditer() implementation. + + Note: 'F' layout means the shape is iterated in reverse logical order, + so indices and shapes arrays have to be reversed as well. + """ + + @functools.cached_property + def subiters(self): + l = [] + factories = {'flat': FlatSubIter if nditerty.need_shaped_indexing + else TrivialFlatSubIter, + 'indexed': IndexedSubIter, + '0d': ZeroDimSubIter, + 'scalar': ScalarSubIter, + } + for i, sub in enumerate(nditerty.indexers): + kind, start_dim, end_dim, _ = sub + member_name = 'index%d' % i + factory = factories[kind] + l.append(factory(self, member_name, start_dim, end_dim)) + return l + + def init_specific(self, context, builder, arrtys, arrays): + """ + Initialize the nditer() instance for the specific array inputs. + """ + zero = context.get_constant(types.intp, 0) + + # Store inputs + self.arrays = context.make_tuple(builder, types.Tuple(arrtys), + arrays) + # Create slots for scalars + for i, ty in enumerate(arrtys): + if not isinstance(ty, types.Array): + member_name = 'scalar%d' % i + # XXX as_data()? + slot = cgutils.alloca_once_value(builder, arrays[i]) + setattr(self, member_name, slot) + + arrays = self._arrays_or_scalars(context, builder, arrtys, arrays) + + # Extract iterator shape (the shape of the most-dimensional input) + main_shape_ty = types.UniTuple(types.intp, ndim) + main_shape = None + main_nitems = None + for i, arrty in enumerate(arrtys): + if isinstance(arrty, types.Array) and arrty.ndim == ndim: + main_shape = arrays[i].shape + main_nitems = arrays[i].nitems + break + else: + # Only scalar inputs => synthesize a dummy shape + assert ndim == 0 + main_shape = context.make_tuple(builder, main_shape_ty, ()) + main_nitems = context.get_constant(types.intp, 1) + + # Validate shapes of array inputs + def check_shape(shape, main_shape): + n = len(shape) + for i in range(n): + if shape[i] != main_shape[len(main_shape) - n + i]: + raise ValueError("nditer(): operands could not be " + "broadcast together") + + for arrty, arr in zip(arrtys, arrays): + if isinstance(arrty, types.Array) and arrty.ndim > 0: + sig = signature(types.none, + types.UniTuple(types.intp, arrty.ndim), + main_shape_ty) + context.compile_internal(builder, check_shape, + sig, (arr.shape, main_shape)) + + # Compute shape and size + shapes = cgutils.unpack_tuple(builder, main_shape) + if layout == 'F': + shapes = shapes[::-1] + + # If shape is empty, mark iterator exhausted + shape_is_empty = builder.icmp_signed('==', main_nitems, zero) + exhausted = builder.select(shape_is_empty, cgutils.true_byte, + cgutils.false_byte) + + if not nditerty.need_shaped_indexing: + # Flatten shape to make iteration faster on small innermost + # dimensions (e.g. a (100000, 3) shape) + shapes = (main_nitems,) + assert len(shapes) == nshapes + + indices = cgutils.alloca_once(builder, zero.type, size=nshapes) + for dim in range(nshapes): + idxptr = cgutils.gep_inbounds(builder, indices, dim) + builder.store(zero, idxptr) + + self.indices = indices + self.shape = cgutils.pack_array(builder, shapes, zero.type) + self.exhausted = cgutils.alloca_once_value(builder, exhausted) + + # Initialize subiterators + for subiter in self.subiters: + subiter.init_specific(context, builder) + + def iternext_specific(self, context, builder, result): + """ + Compute next iteration of the nditer() instance. + """ + bbend = builder.append_basic_block('end') + + # Branch early if exhausted + exhausted = cgutils.as_bool_bit(builder, + builder.load(self.exhausted)) + with cgutils.if_unlikely(builder, exhausted): + result.set_valid(False) + builder.branch(bbend) + + arrtys = nditerty.arrays + arrays = cgutils.unpack_tuple(builder, self.arrays) + arrays = self._arrays_or_scalars(context, builder, arrtys, arrays) + indices = self.indices + + # Compute iterated results + result.set_valid(True) + views = self._make_views(context, builder, indices, arrtys, arrays) + views = [v._getvalue() for v in views] + if len(views) == 1: + result.yield_(views[0]) + else: + result.yield_(context.make_tuple(builder, nditerty.yield_type, + views)) + + shape = cgutils.unpack_tuple(builder, self.shape) + _increment_indices(context, builder, len(shape), shape, + indices, self.exhausted, + functools.partial(self._loop_continue, + context, + builder), + functools.partial(self._loop_break, + context, + builder), + ) + + builder.branch(bbend) + builder.position_at_end(bbend) + + def _loop_continue(self, context, builder, dim): + for sub in self.subiters: + if sub.start_dim <= dim < sub.end_dim: + sub.loop_continue(context, builder, dim - sub.start_dim) + + def _loop_break(self, context, builder, dim): + for sub in self.subiters: + if sub.start_dim <= dim < sub.end_dim: + sub.loop_break(context, builder, dim - sub.start_dim) + + def _make_views(self, context, builder, indices, arrtys, arrays): + """ + Compute the views to be yielded. + """ + views = [None] * narrays + indexers = nditerty.indexers + subiters = self.subiters + rettys = nditerty.yield_type + if isinstance(rettys, types.BaseTuple): + rettys = list(rettys) + else: + rettys = [rettys] + indices = [builder.load(cgutils.gep_inbounds(builder, indices, i)) + for i in range(nshapes)] + + for sub, subiter in zip(indexers, subiters): + _, _, _, array_indices = sub + sub_indices = indices[subiter.start_dim:subiter.end_dim] + if layout == 'F': + sub_indices = sub_indices[::-1] + for i in array_indices: + assert views[i] is None + views[i] = self._make_view(context, builder, sub_indices, + rettys[i], + arrtys[i], arrays[i], subiter) + assert all(v for v in views) + return views + + def _make_view(self, context, builder, indices, retty, arrty, arr, + subiter): + """ + Compute a 0d view for a given input array. + """ + assert isinstance(retty, types.Array) and retty.ndim == 0 + + ptr = subiter.compute_pointer(context, builder, indices, arrty, arr) + view = context.make_array(retty)(context, builder) + + itemsize = get_itemsize(context, retty) + shape = context.make_tuple(builder, types.UniTuple(types.intp, 0), + ()) + strides = context.make_tuple(builder, types.UniTuple(types.intp, 0), + ()) + # HACK: meminfo=None avoids expensive refcounting operations + # on ephemeral views + populate_array(view, ptr, shape, strides, itemsize, meminfo=None) + return view + + def _arrays_or_scalars(self, context, builder, arrtys, arrays): + # Return a list of either array structures or pointers to + # scalar slots + l = [] + for i, (arrty, arr) in enumerate(zip(arrtys, arrays)): + if isinstance(arrty, types.Array): + l.append(context.make_array(arrty)(context, + builder, + value=arr)) + else: + l.append(getattr(self, "scalar%d" % i)) + return l + + return NdIter + + +def make_ndindex_cls(nditerty): + """ + Return the Structure representation of the given *nditerty* (an + instance of types.NumpyNdIndexType). + """ + ndim = nditerty.ndim + + class NdIndexIter(cgutils.create_struct_proxy(nditerty)): + """ + .ndindex() implementation. + """ + + def init_specific(self, context, builder, shapes): + zero = context.get_constant(types.intp, 0) + indices = cgutils.alloca_once(builder, zero.type, + size=context.get_constant(types.intp, + ndim)) + exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte) + + for dim in range(ndim): + idxptr = cgutils.gep_inbounds(builder, indices, dim) + builder.store(zero, idxptr) + # 0-sized dimensions really indicate an empty array, + # but we have to catch that condition early to avoid + # a bug inside the iteration logic. + dim_size = shapes[dim] + dim_is_empty = builder.icmp_unsigned('==', dim_size, zero) + with cgutils.if_unlikely(builder, dim_is_empty): + builder.store(cgutils.true_byte, exhausted) + + self.indices = indices + self.exhausted = exhausted + self.shape = cgutils.pack_array(builder, shapes, zero.type) + + def iternext_specific(self, context, builder, result): + zero = context.get_constant(types.intp, 0) + + bbend = builder.append_basic_block('end') + + exhausted = cgutils.as_bool_bit(builder, + builder.load(self.exhausted)) + with cgutils.if_unlikely(builder, exhausted): + result.set_valid(False) + builder.branch(bbend) + + indices = [builder.load(cgutils.gep_inbounds(builder, + self.indices, + dim)) + for dim in range(ndim)] + for load in indices: + mark_positive(builder, load) + + result.yield_(cgutils.pack_array(builder, indices, zero.type)) + result.set_valid(True) + + shape = cgutils.unpack_tuple(builder, self.shape, ndim) + _increment_indices(context, builder, ndim, shape, + self.indices, self.exhausted) + + builder.branch(bbend) + builder.position_at_end(bbend) + + return NdIndexIter + + +def _make_flattening_iter_cls(flatiterty, kind): + assert kind in ('flat', 'ndenumerate') + + array_type = flatiterty.array_type + + if array_type.layout == 'C': + class CContiguousFlatIter(cgutils.create_struct_proxy(flatiterty)): + """ + .flat() / .ndenumerate() implementation for C-contiguous arrays. + """ + + def init_specific(self, context, builder, arrty, arr): + zero = context.get_constant(types.intp, 0) + self.index = cgutils.alloca_once_value(builder, zero) + # We can't trust strides[-1] to always contain the right + # step value, see + # http://docs.scipy.org/doc/numpy-dev/release.html#npy-relaxed-strides-checking # noqa: E501 + self.stride = arr.itemsize + + if kind == 'ndenumerate': + # Zero-initialize the indices array. + indices = cgutils.alloca_once( + builder, zero.type, + size=context.get_constant(types.intp, arrty.ndim)) + + for dim in range(arrty.ndim): + idxptr = cgutils.gep_inbounds(builder, indices, dim) + builder.store(zero, idxptr) + + self.indices = indices + + # NOTE: Using gep() instead of explicit pointer addition helps + # LLVM vectorize the loop (since the stride is known and + # constant). This is not possible in the non-contiguous case, + # where the strides are unknown at compile-time. + + def iternext_specific(self, context, builder, arrty, arr, result): + ndim = arrty.ndim + nitems = arr.nitems + + index = builder.load(self.index) + is_valid = builder.icmp_signed('<', index, nitems) + result.set_valid(is_valid) + + with cgutils.if_likely(builder, is_valid): + ptr = builder.gep(arr.data, [index]) + value = load_item(context, builder, arrty, ptr) + if kind == 'flat': + result.yield_(value) + else: + # ndenumerate(): fetch and increment indices + indices = self.indices + idxvals = [builder.load(cgutils.gep_inbounds(builder, + indices, + dim)) + for dim in range(ndim)] + idxtuple = cgutils.pack_array(builder, idxvals) + result.yield_( + cgutils.make_anonymous_struct(builder, + [idxtuple, value])) + _increment_indices_array(context, builder, arrty, + arr, indices) + + index = cgutils.increment_index(builder, index) + builder.store(index, self.index) + + def getitem(self, context, builder, arrty, arr, index): + ptr = builder.gep(arr.data, [index]) + return load_item(context, builder, arrty, ptr) + + def setitem(self, context, builder, arrty, arr, index, value): + ptr = builder.gep(arr.data, [index]) + store_item(context, builder, arrty, value, ptr) + + return CContiguousFlatIter + + else: + class FlatIter(cgutils.create_struct_proxy(flatiterty)): + """ + Generic .flat() / .ndenumerate() implementation for + non-contiguous arrays. + It keeps track of pointers along each dimension in order to + minimize computations. + """ + + def init_specific(self, context, builder, arrty, arr): + zero = context.get_constant(types.intp, 0) + data = arr.data + ndim = arrty.ndim + shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) + + indices = cgutils.alloca_once( + builder, zero.type, size=context.get_constant(types.intp, + arrty.ndim)) + pointers = cgutils.alloca_once( + builder, data.type, size=context.get_constant(types.intp, + arrty.ndim)) + exhausted = cgutils.alloca_once_value(builder, + cgutils.false_byte) + + # Initialize indices and pointers with their start values. + for dim in range(ndim): + idxptr = cgutils.gep_inbounds(builder, indices, dim) + ptrptr = cgutils.gep_inbounds(builder, pointers, dim) + builder.store(data, ptrptr) + builder.store(zero, idxptr) + # 0-sized dimensions really indicate an empty array, + # but we have to catch that condition early to avoid + # a bug inside the iteration logic (see issue #846). + dim_size = shapes[dim] + dim_is_empty = builder.icmp_unsigned('==', dim_size, zero) + with cgutils.if_unlikely(builder, dim_is_empty): + builder.store(cgutils.true_byte, exhausted) + + self.indices = indices + self.pointers = pointers + self.exhausted = exhausted + + def iternext_specific(self, context, builder, arrty, arr, result): + ndim = arrty.ndim + shapes = cgutils.unpack_tuple(builder, arr.shape, ndim) + strides = cgutils.unpack_tuple(builder, arr.strides, ndim) + indices = self.indices + pointers = self.pointers + + zero = context.get_constant(types.intp, 0) + + bbend = builder.append_basic_block('end') + + # Catch already computed iterator exhaustion + is_exhausted = cgutils.as_bool_bit( + builder, builder.load(self.exhausted)) + with cgutils.if_unlikely(builder, is_exhausted): + result.set_valid(False) + builder.branch(bbend) + result.set_valid(True) + + # Current pointer inside last dimension + last_ptr = cgutils.gep_inbounds(builder, pointers, ndim - 1) + ptr = builder.load(last_ptr) + value = load_item(context, builder, arrty, ptr) + if kind == 'flat': + result.yield_(value) + else: + # ndenumerate() => yield (indices, value) + idxvals = [builder.load(cgutils.gep_inbounds(builder, + indices, + dim)) + for dim in range(ndim)] + idxtuple = cgutils.pack_array(builder, idxvals) + result.yield_( + cgutils.make_anonymous_struct(builder, + [idxtuple, value])) + + # Update indices and pointers by walking from inner + # dimension to outer. + for dim in reversed(range(ndim)): + idxptr = cgutils.gep_inbounds(builder, indices, dim) + idx = cgutils.increment_index(builder, + builder.load(idxptr)) + + count = shapes[dim] + stride = strides[dim] + in_bounds = builder.icmp_signed('<', idx, count) + with cgutils.if_likely(builder, in_bounds): + # Index is valid => pointer can simply be incremented. + builder.store(idx, idxptr) + ptrptr = cgutils.gep_inbounds(builder, pointers, dim) + ptr = builder.load(ptrptr) + ptr = cgutils.pointer_add(builder, ptr, stride) + builder.store(ptr, ptrptr) + # Reset pointers in inner dimensions + for inner_dim in range(dim + 1, ndim): + ptrptr = cgutils.gep_inbounds(builder, + pointers, + inner_dim) + builder.store(ptr, ptrptr) + builder.branch(bbend) + # Reset index and continue with next dimension + builder.store(zero, idxptr) + + # End of array + builder.store(cgutils.true_byte, self.exhausted) + builder.branch(bbend) + + builder.position_at_end(bbend) + + def _ptr_for_index(self, context, builder, arrty, arr, index): + ndim = arrty.ndim + shapes = cgutils.unpack_tuple(builder, arr.shape, count=ndim) + strides = cgutils.unpack_tuple(builder, arr.strides, count=ndim) + + # First convert the flattened index into a regular n-dim index + indices = [] + for dim in reversed(range(ndim)): + indices.append(builder.urem(index, shapes[dim])) + index = builder.udiv(index, shapes[dim]) + indices.reverse() + + ptr = cgutils.get_item_pointer2(context, builder, arr.data, + shapes, strides, arrty.layout, + indices) + return ptr + + def getitem(self, context, builder, arrty, arr, index): + ptr = self._ptr_for_index(context, builder, arrty, arr, index) + return load_item(context, builder, arrty, ptr) + + def setitem(self, context, builder, arrty, arr, index, value): + ptr = self._ptr_for_index(context, builder, arrty, arr, index) + store_item(context, builder, arrty, value, ptr) + + return FlatIter + + +@lower_getattr(types.Array, "flat") +def make_array_flatiter(context, builder, arrty, arr): + flatitercls = make_array_flat_cls(types.NumpyFlatType(arrty)) + flatiter = flatitercls(context, builder) + + flatiter.array = arr + + arrcls = context.make_array(arrty) + arr = arrcls(context, builder, ref=flatiter._get_ptr_by_name('array')) + + flatiter.init_specific(context, builder, arrty, arr) + + res = flatiter._getvalue() + return impl_ret_borrowed(context, builder, types.NumpyFlatType(arrty), res) + + +@lower_builtin('iternext', types.NumpyFlatType) +@iternext_impl(RefType.BORROWED) +def iternext_numpy_flatiter(context, builder, sig, args, result): + [flatiterty] = sig.args + [flatiter] = args + + flatitercls = make_array_flat_cls(flatiterty) + flatiter = flatitercls(context, builder, value=flatiter) + + arrty = flatiterty.array_type + arrcls = context.make_array(arrty) + arr = arrcls(context, builder, value=flatiter.array) + + flatiter.iternext_specific(context, builder, arrty, arr, result) + + +@lower_builtin(operator.getitem, types.NumpyFlatType, types.Integer) +def iternext_numpy_getitem(context, builder, sig, args): + flatiterty = sig.args[0] + flatiter, index = args + + flatitercls = make_array_flat_cls(flatiterty) + flatiter = flatitercls(context, builder, value=flatiter) + + arrty = flatiterty.array_type + arrcls = context.make_array(arrty) + arr = arrcls(context, builder, value=flatiter.array) + + res = flatiter.getitem(context, builder, arrty, arr, index) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin(operator.setitem, types.NumpyFlatType, types.Integer, + types.Any) +def iternext_numpy_getitem_any(context, builder, sig, args): + flatiterty = sig.args[0] + flatiter, index, value = args + + flatitercls = make_array_flat_cls(flatiterty) + flatiter = flatitercls(context, builder, value=flatiter) + + arrty = flatiterty.array_type + arrcls = context.make_array(arrty) + arr = arrcls(context, builder, value=flatiter.array) + + flatiter.setitem(context, builder, arrty, arr, index, value) + return context.get_dummy_value() + + +@lower_builtin(len, types.NumpyFlatType) +def iternext_numpy_getitem_flat(context, builder, sig, args): + flatiterty = sig.args[0] + flatitercls = make_array_flat_cls(flatiterty) + flatiter = flatitercls(context, builder, value=args[0]) + + arrcls = context.make_array(flatiterty.array_type) + arr = arrcls(context, builder, value=flatiter.array) + return arr.nitems + + +@lower_builtin(np.ndenumerate, types.Array) +def make_array_ndenumerate(context, builder, sig, args): + arrty, = sig.args + arr, = args + nditercls = make_array_ndenumerate_cls(types.NumpyNdEnumerateType(arrty)) + nditer = nditercls(context, builder) + + nditer.array = arr + + arrcls = context.make_array(arrty) + arr = arrcls(context, builder, ref=nditer._get_ptr_by_name('array')) + + nditer.init_specific(context, builder, arrty, arr) + + res = nditer._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin('iternext', types.NumpyNdEnumerateType) +@iternext_impl(RefType.BORROWED) +def iternext_numpy_nditer(context, builder, sig, args, result): + [nditerty] = sig.args + [nditer] = args + + nditercls = make_array_ndenumerate_cls(nditerty) + nditer = nditercls(context, builder, value=nditer) + + arrty = nditerty.array_type + arrcls = context.make_array(arrty) + arr = arrcls(context, builder, value=nditer.array) + + nditer.iternext_specific(context, builder, arrty, arr, result) + + +@lower_builtin(pndindex, types.VarArg(types.Integer)) +@lower_builtin(np.ndindex, types.VarArg(types.Integer)) +def make_array_ndindex(context, builder, sig, args): + """ndindex(*shape)""" + shape = [context.cast(builder, arg, argty, types.intp) + for argty, arg in zip(sig.args, args)] + + nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape))) + nditer = nditercls(context, builder) + nditer.init_specific(context, builder, shape) + + res = nditer._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin(pndindex, types.BaseTuple) +@lower_builtin(np.ndindex, types.BaseTuple) +def make_array_ndindex_tuple(context, builder, sig, args): + """ndindex(shape)""" + ndim = sig.return_type.ndim + if ndim > 0: + idxty = sig.args[0].dtype + tup = args[0] + + shape = cgutils.unpack_tuple(builder, tup, ndim) + shape = [context.cast(builder, idx, idxty, types.intp) + for idx in shape] + else: + shape = [] + + nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape))) + nditer = nditercls(context, builder) + nditer.init_specific(context, builder, shape) + + res = nditer._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin('iternext', types.NumpyNdIndexType) +@iternext_impl(RefType.BORROWED) +def iternext_numpy_ndindex(context, builder, sig, args, result): + [nditerty] = sig.args + [nditer] = args + + nditercls = make_ndindex_cls(nditerty) + nditer = nditercls(context, builder, value=nditer) + + nditer.iternext_specific(context, builder, result) + + +@lower_builtin(np.nditer, types.Any) +def make_array_nditer(context, builder, sig, args): + """ + nditer(...) + """ + nditerty = sig.return_type + arrtys = nditerty.arrays + + if isinstance(sig.args[0], types.BaseTuple): + arrays = cgutils.unpack_tuple(builder, args[0]) + else: + arrays = [args[0]] + + nditer = make_nditer_cls(nditerty)(context, builder) + nditer.init_specific(context, builder, arrtys, arrays) + + res = nditer._getvalue() + return impl_ret_borrowed(context, builder, nditerty, res) + + +@lower_builtin('iternext', types.NumpyNdIterType) +@iternext_impl(RefType.BORROWED) +def iternext_numpy_nditer2(context, builder, sig, args, result): + [nditerty] = sig.args + [nditer] = args + + nditer = make_nditer_cls(nditerty)(context, builder, value=nditer) + nditer.iternext_specific(context, builder, result) + + +@lower_builtin(operator.eq, types.DType, types.DType) +def dtype_eq_impl(context, builder, sig, args): + arg1, arg2 = sig.args + res = ir.Constant(ir.IntType(1), int(arg1 == arg2)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ------------------------------------------------------------------------------ +# Numpy array constructors + +def _empty_nd_impl(context, builder, arrtype, shapes): + """Utility function used for allocating a new array during LLVM code + generation (lowering). Given a target context, builder, array + type, and a tuple or list of lowered dimension sizes, returns a + LLVM value pointing at a Numba runtime allocated array. + """ + arycls = make_array(arrtype) + ary = arycls(context, builder) + + datatype = context.get_data_type(arrtype.dtype) + itemsize = context.get_constant(types.intp, get_itemsize(context, arrtype)) + + # compute array length + arrlen = context.get_constant(types.intp, 1) + overflow = Constant(ir.IntType(1), 0) + for s in shapes: + arrlen_mult = builder.smul_with_overflow(arrlen, s) + arrlen = builder.extract_value(arrlen_mult, 0) + overflow = builder.or_( + overflow, builder.extract_value(arrlen_mult, 1) + ) + + if arrtype.ndim == 0: + strides = () + elif arrtype.layout == 'C': + strides = [itemsize] + for dimension_size in reversed(shapes[1:]): + strides.append(builder.mul(strides[-1], dimension_size)) + strides = tuple(reversed(strides)) + elif arrtype.layout == 'F': + strides = [itemsize] + for dimension_size in shapes[:-1]: + strides.append(builder.mul(strides[-1], dimension_size)) + strides = tuple(strides) + else: + raise NotImplementedError( + "Don't know how to allocate array with layout '{0}'.".format( + arrtype.layout)) + + # Check overflow, numpy also does this after checking order + allocsize_mult = builder.smul_with_overflow(arrlen, itemsize) + allocsize = builder.extract_value(allocsize_mult, 0) + overflow = builder.or_(overflow, builder.extract_value(allocsize_mult, 1)) + + with builder.if_then(overflow, likely=False): + # Raise same error as numpy, see: + # https://github.com/numpy/numpy/blob/2a488fe76a0f732dc418d03b452caace161673da/numpy/core/src/multiarray/ctors.c#L1095-L1101 # noqa: E501 + context.call_conv.return_user_exc( + builder, ValueError, + ("array is too big; `arr.size * arr.dtype.itemsize` is larger than" + " the maximum possible size.",) + ) + + dtype = arrtype.dtype + align_val = context.get_preferred_array_alignment(dtype) + align = context.get_constant(types.uint32, align_val) + args = (context.get_dummy_value(), allocsize, align) + + mip = types.MemInfoPointer(types.voidptr) + arytypeclass = types.TypeRef(type(arrtype)) + argtypes = signature(mip, arytypeclass, types.intp, types.uint32) + + meminfo = context.compile_internal(builder, _call_allocator, argtypes, args) + data = context.nrt.meminfo_data(builder, meminfo) + + intp_t = context.get_value_type(types.intp) + shape_array = cgutils.pack_array(builder, shapes, ty=intp_t) + strides_array = cgutils.pack_array(builder, strides, ty=intp_t) + + populate_array(ary, + data=builder.bitcast(data, datatype.as_pointer()), + shape=shape_array, + strides=strides_array, + itemsize=itemsize, + meminfo=meminfo) + + return ary + + +@overload_classmethod(types.Array, "_allocate") +def _ol_array_allocate(cls, allocsize, align): + """Implements a Numba-only default target (cpu) classmethod on the array + type. + """ + def impl(cls, allocsize, align): + return intrin_alloc(allocsize, align) + return impl + + +def _call_allocator(arrtype, size, align): + """Trampoline to call the intrinsic used for allocation + """ + return arrtype._allocate(size, align) + + +@intrinsic +def intrin_alloc(typingctx, allocsize, align): + """Intrinsic to call into the allocator for Array + """ + def codegen(context, builder, signature, args): + [allocsize, align] = args + meminfo = context.nrt.meminfo_alloc_aligned(builder, allocsize, align) + return meminfo + + mip = types.MemInfoPointer(types.voidptr) # return untyped pointer + sig = signature(mip, allocsize, align) + return sig, codegen + + +def _parse_shape(context, builder, ty, val): + """ + Parse the shape argument to an array constructor. + """ + def safecast_intp(context, builder, src_t, src): + """Cast src to intp only if value can be maintained""" + intp_t = context.get_value_type(types.intp) + intp_width = intp_t.width + intp_ir = ir.IntType(intp_width) + maxval = Constant(intp_ir, ((1 << intp_width - 1) - 1)) + if src_t.width < intp_width: + res = builder.sext(src, intp_ir) + elif src_t.width >= intp_width: + is_larger = builder.icmp_signed(">", src, maxval) + with builder.if_then(is_larger, likely=False): + context.call_conv.return_user_exc( + builder, ValueError, + ("Cannot safely convert value to intp",) + ) + if src_t.width > intp_width: + res = builder.trunc(src, intp_ir) + else: + res = src + return res + + if isinstance(ty, types.Integer): + ndim = 1 + passed_shapes = [context.cast(builder, val, ty, types.intp)] + else: + assert isinstance(ty, types.BaseTuple) + ndim = ty.count + passed_shapes = cgutils.unpack_tuple(builder, val, count=ndim) + + shapes = [] + for s in passed_shapes: + shapes.append(safecast_intp(context, builder, s.type, s)) + + zero = context.get_constant_generic(builder, types.intp, 0) + for dim in range(ndim): + is_neg = builder.icmp_signed('<', shapes[dim], zero) + with cgutils.if_unlikely(builder, is_neg): + context.call_conv.return_user_exc( + builder, ValueError, ("negative dimensions not allowed",) + ) + + return shapes + + +def _parse_empty_args(context, builder, sig, args): + """ + Parse the arguments of a np.empty(), np.zeros() or np.ones() call. + """ + arrshapetype = sig.args[0] + arrshape = args[0] + arrtype = sig.return_type + return arrtype, _parse_shape(context, builder, arrshapetype, arrshape) + + +def _parse_empty_like_args(context, builder, sig, args): + """ + Parse the arguments of a np.empty_like(), np.zeros_like() or + np.ones_like() call. + """ + arytype = sig.args[0] + if isinstance(arytype, types.Array): + ary = make_array(arytype)(context, builder, value=args[0]) + shapes = cgutils.unpack_tuple(builder, ary.shape, count=arytype.ndim) + return sig.return_type, shapes + else: + return sig.return_type, () + + +def _check_const_str_dtype(fname, dtype): + if isinstance(dtype, types.UnicodeType): + msg = f"If np.{fname} dtype is a string it must be a string constant." + raise errors.TypingError(msg) + + +@intrinsic +def numpy_empty_nd(tyctx, ty_shape, ty_dtype, ty_retty_ref): + ty_retty = ty_retty_ref.instance_type + sig = ty_retty(ty_shape, ty_dtype, ty_retty_ref) + + def codegen(cgctx, builder, sig, llargs): + arrtype, shapes = _parse_empty_args(cgctx, builder, sig, llargs) + ary = _empty_nd_impl(cgctx, builder, arrtype, shapes) + return ary._getvalue() + return sig, codegen + + +@overload(np.empty) +def ol_np_empty(shape, dtype=float): + _check_const_str_dtype("empty", dtype) + if (dtype is float or + (isinstance(dtype, types.Function) and dtype.typing_key is float) or + is_nonelike(dtype)): #default + nb_dtype = types.double + else: + nb_dtype = ty_parse_dtype(dtype) + + ndim = ty_parse_shape(shape) + if nb_dtype is not None and ndim is not None: + retty = types.Array(dtype=nb_dtype, ndim=ndim, layout='C') + + def impl(shape, dtype=float): + return numpy_empty_nd(shape, dtype, retty) + return impl + else: + msg = f"Cannot parse input types to function np.empty({shape}, {dtype})" + raise errors.TypingError(msg) + + +@intrinsic +def numpy_empty_like_nd(tyctx, ty_prototype, ty_dtype, ty_retty_ref): + ty_retty = ty_retty_ref.instance_type + sig = ty_retty(ty_prototype, ty_dtype, ty_retty_ref) + + def codegen(cgctx, builder, sig, llargs): + arrtype, shapes = _parse_empty_like_args(cgctx, builder, sig, llargs) + ary = _empty_nd_impl(cgctx, builder, arrtype, shapes) + return ary._getvalue() + return sig, codegen + + +@overload(np.empty_like) +def ol_np_empty_like(arr, dtype=None): + _check_const_str_dtype("empty_like", dtype) + if not is_nonelike(dtype): + nb_dtype = ty_parse_dtype(dtype) + elif isinstance(arr, types.Array): + nb_dtype = arr.dtype + else: + nb_dtype = arr + if nb_dtype is not None: + if isinstance(arr, types.Array): + layout = arr.layout if arr.layout != 'A' else 'C' + retty = arr.copy(dtype=nb_dtype, layout=layout, readonly=False) + else: + retty = types.Array(nb_dtype, 0, 'C') + else: + msg = ("Cannot parse input types to function " + f"np.empty_like({arr}, {dtype})") + raise errors.TypingError(msg) + + def impl(arr, dtype=None): + return numpy_empty_like_nd(arr, dtype, retty) + return impl + + +@intrinsic +def _zero_fill_array_method(tyctx, self): + sig = types.none(self) + + def codegen(cgctx, builder, sig, llargs): + ary = make_array(sig.args[0])(cgctx, builder, llargs[0]) + cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, ary.nitems), + 0) + return sig, codegen + + +@overload_method(types.Array, '_zero_fill') +def ol_array_zero_fill(self): + """Adds a `._zero_fill` method to zero fill an array using memset.""" + def impl(self): + _zero_fill_array_method(self) + return impl + + +@overload(np.zeros) +def ol_np_zeros(shape, dtype=float): + _check_const_str_dtype("zeros", dtype) + + def impl(shape, dtype=float): + arr = np.empty(shape, dtype=dtype) + arr._zero_fill() + return arr + return impl + + +@overload(np.zeros_like) +def ol_np_zeros_like(a, dtype=None): + _check_const_str_dtype("zeros_like", dtype) + + # NumPy uses 'a' as the arg name for the array-like + def impl(a, dtype=None): + arr = np.empty_like(a, dtype=dtype) + arr._zero_fill() + return arr + return impl + + +@overload(np.ones_like) +def ol_np_ones_like(a, dtype=None): + _check_const_str_dtype("ones_like", dtype) + + # NumPy uses 'a' as the arg name for the array-like + def impl(a, dtype=None): + arr = np.empty_like(a, dtype=dtype) + arr_flat = arr.flat + for idx in range(len(arr_flat)): + arr_flat[idx] = 1 + return arr + return impl + + +@overload(np.full) +def impl_np_full(shape, fill_value, dtype=None): + _check_const_str_dtype("full", dtype) + if not is_nonelike(dtype): + nb_dtype = ty_parse_dtype(dtype) + else: + nb_dtype = fill_value + + def full(shape, fill_value, dtype=None): + arr = np.empty(shape, nb_dtype) + arr_flat = arr.flat + for idx in range(len(arr_flat)): + arr_flat[idx] = fill_value + return arr + return full + + +@overload(np.full_like) +def impl_np_full_like(a, fill_value, dtype=None): + _check_const_str_dtype("full_like", dtype) + + def full_like(a, fill_value, dtype=None): + arr = np.empty_like(a, dtype) + arr_flat = arr.flat + for idx in range(len(arr_flat)): + arr_flat[idx] = fill_value + return arr + + return full_like + + +@overload(np.ones) +def ol_np_ones(shape, dtype=None): + # for some reason the NumPy default for dtype is None in the source but + # ends up as np.float64 by definition. + _check_const_str_dtype("ones", dtype) + + def impl(shape, dtype=None): + arr = np.empty(shape, dtype=dtype) + arr_flat = arr.flat + for idx in range(len(arr_flat)): + arr_flat[idx] = 1 + return arr + return impl + + +@overload(np.identity) +def impl_np_identity(n, dtype=None): + _check_const_str_dtype("identity", dtype) + if not is_nonelike(dtype): + nb_dtype = ty_parse_dtype(dtype) + else: + nb_dtype = types.double + + def identity(n, dtype=None): + arr = np.zeros((n, n), nb_dtype) + for i in range(n): + arr[i, i] = 1 + return arr + return identity + + +def _eye_none_handler(N, M): + pass + + +@extending.overload(_eye_none_handler) +def _eye_none_handler_impl(N, M): + if isinstance(M, types.NoneType): + def impl(N, M): + return N + else: + def impl(N, M): + return M + return impl + + +@extending.overload(np.eye) +def numpy_eye(N, M=None, k=0, dtype=float): + + if dtype is None or isinstance(dtype, types.NoneType): + dt = np.dtype(float) + elif isinstance(dtype, (types.DTypeSpec, types.Number)): + # dtype or instance of dtype + dt = as_dtype(getattr(dtype, 'dtype', dtype)) + else: + dt = np.dtype(dtype) + + def impl(N, M=None, k=0, dtype=float): + _M = _eye_none_handler(N, M) + arr = np.zeros((N, _M), dt) + if k >= 0: + d = min(N, _M - k) + for i in range(d): + arr[i, i + k] = 1 + else: + d = min(N + k, _M) + for i in range(d): + arr[i - k, i] = 1 + return arr + return impl + + +@overload(np.diag) +def impl_np_diag(v, k=0): + if not type_can_asarray(v): + raise errors.TypingError('The argument "v" must be array-like') + + if isinstance(v, types.Array): + if v.ndim not in (1, 2): + raise errors.NumbaTypeError("Input must be 1- or 2-d.") + + def diag_impl(v, k=0): + if v.ndim == 1: + s = v.shape + n = s[0] + abs(k) + ret = np.zeros((n, n), v.dtype) + if k >= 0: + for i in range(n - k): + ret[i, k + i] = v[i] + else: + for i in range(n + k): + ret[i - k, i] = v[i] + return ret + else: # 2-d + rows, cols = v.shape + if k < 0: + rows = rows + k + if k > 0: + cols = cols - k + n = max(min(rows, cols), 0) + ret = np.empty(n, v.dtype) + if k >= 0: + for i in range(n): + ret[i] = v[i, k + i] + else: + for i in range(n): + ret[i] = v[i - k, i] + return ret + return diag_impl + + +@overload(np.indices) +def numpy_indices(dimensions): + if not isinstance(dimensions, types.UniTuple): + msg = 'The argument "dimensions" must be a tuple of integers' + raise errors.TypingError(msg) + + if not isinstance(dimensions.dtype, types.Integer): + msg = 'The argument "dimensions" must be a tuple of integers' + raise errors.TypingError(msg) + + N = len(dimensions) + shape = (1,) * N + + def impl(dimensions): + res = np.empty((N,) + dimensions, dtype=np.int64) + i = 0 + for dim in dimensions: + idx = np.arange(dim, dtype=np.int64).reshape( + tuple_setitem(shape, i, dim) + ) + res[i] = idx + i += 1 + + return res + + return impl + + +@overload(np.diagflat) +def numpy_diagflat(v, k=0): + if not type_can_asarray(v): + msg = 'The argument "v" must be array-like' + raise errors.TypingError(msg) + + if not isinstance(k, (int, types.Integer)): + msg = 'The argument "k" must be an integer' + raise errors.TypingError(msg) + + def impl(v, k=0): + v = np.asarray(v) + v = v.ravel() + s = len(v) + abs_k = abs(k) + n = s + abs_k + res = np.zeros((n, n), v.dtype) + i = np.maximum(0, -k) + j = np.maximum(0, k) + for t in range(s): + res[i + t, j + t] = v[t] + + return res + + return impl + + +def generate_getitem_setitem_with_axis(ndim, kind): + assert kind in ('getitem', 'setitem') + + if kind == 'getitem': + fn = ''' + def _getitem(a, idx, axis): + if axis == 0: + return a[idx, ...] + ''' + for i in range(1, ndim): + lst = (':',) * i + fn += f''' + elif axis == {i}: + return a[{", ".join(lst)}, idx, ...] + ''' + else: + fn = ''' + def _setitem(a, idx, axis, vals): + if axis == 0: + a[idx, ...] = vals + ''' + + for i in range(1, ndim): + lst = (':',) * i + fn += f''' + elif axis == {i}: + a[{", ".join(lst)}, idx, ...] = vals + ''' + + fn = textwrap.dedent(fn) + exec(fn, globals()) + fn = globals()[f'_{kind}'] + return register_jitable(fn) + + +@overload(np.take) +@overload_method(types.Array, 'take') +def numpy_take(a, indices, axis=None): + + if cgutils.is_nonelike(axis): + if isinstance(a, types.Array) and isinstance(indices, types.Integer): + def take_impl(a, indices, axis=None): + if indices > (a.size - 1) or indices < -a.size: + raise IndexError("Index out of bounds") + return a.ravel()[indices] + return take_impl + + if isinstance(a, types.Array) and isinstance(indices, types.Array): + F_order = indices.layout == 'F' + + def take_impl(a, indices, axis=None): + ret = np.empty(indices.size, dtype=a.dtype) + if F_order: + walker = indices.copy() # get C order + else: + walker = indices + it = np.nditer(walker) + i = 0 + flat = a.ravel() + for x in it: + if x > (a.size - 1) or x < -a.size: + raise IndexError("Index out of bounds") + ret[i] = flat[x] + i = i + 1 + return ret.reshape(indices.shape) + return take_impl + + if isinstance(a, types.Array) and \ + isinstance(indices, (types.List, types.BaseTuple)): + def take_impl(a, indices, axis=None): + convert = np.array(indices) + return np.take(a, convert) + return take_impl + else: + if isinstance(a, types.Array) and isinstance(indices, types.Integer): + t = (0,) * (a.ndim - 1) + + # np.squeeze is too hard to implement in Numba as the tuple "t" + # needs to be allocated beforehand we don't know it's size until + # code gets executed. + @register_jitable + def _squeeze(r, axis): + tup = tuple(t) + j = 0 + assert axis < len(r.shape) and r.shape[axis] == 1, r.shape + for idx in range(len(r.shape)): + s = r.shape[idx] + if idx != axis: + tup = tuple_setitem(tup, j, s) + j += 1 + return r.reshape(tup) + + def take_impl(a, indices, axis=None): + r = np.take(a, (indices,), axis=axis) + if a.ndim == 1: + return r[0] + if axis < 0: + axis += a.ndim + return _squeeze(r, axis) + return take_impl + + if isinstance(a, types.Array) and \ + isinstance(indices, (types.Array, types.List, types.BaseTuple)): + + ndim = a.ndim + + _getitem = generate_getitem_setitem_with_axis(ndim, 'getitem') + _setitem = generate_getitem_setitem_with_axis(ndim, 'setitem') + + def take_impl(a, indices, axis=None): + if axis < 0: + axis += a.ndim + + if axis < 0 or axis >= a.ndim: + msg = (f"axis {axis} is out of bounds for array " + f"of dimension {a.ndim}") + raise ValueError(msg) + + shape = tuple_setitem(a.shape, axis, len(indices)) + out = np.empty(shape, dtype=a.dtype) + for i in range(len(indices)): + y = _getitem(a, indices[i], axis) + _setitem(out, i, axis, y) + return out + return take_impl + + +def _arange_dtype(*args): + bounds = [a for a in args if not isinstance(a, types.NoneType)] + + if any(isinstance(a, types.Complex) for a in bounds): + dtype = types.complex128 + elif any(isinstance(a, types.Float) for a in bounds): + dtype = types.float64 + else: + # `np.arange(10).dtype` is always `np.dtype(int)`, aka `np.int_`, which + # in all released versions of numpy corresponds to the C `long` type. + # Windows 64 is broken by default here because Numba (as of 0.47) does + # not differentiate between Python and NumPy integers, so a `typeof(1)` + # on w64 is `int64`, i.e. `intp`. This means an arange() will + # be typed as arange(int64) and the following will yield int64 opposed + # to int32. Example: without a load of analysis to work out of the args + # were wrapped in NumPy int*() calls it's not possible to detect the + # difference between `np.arange(10)` and `np.arange(np.int64(10)`. + NPY_TY = getattr(types, "int%s" % (8 * np.dtype(int).itemsize)) + + # unliteral these types such that `max` works. + unliteral_bounds = [types.unliteral(x) for x in bounds] + dtype = max(unliteral_bounds + [NPY_TY,]) + + return dtype + + +@overload(np.arange) +def np_arange(start, / ,stop=None, step=None, dtype=None): + if isinstance(stop, types.Optional): + stop = stop.type + if isinstance(step, types.Optional): + step = step.type + if isinstance(dtype, types.Optional): + dtype = dtype.type + + if stop is None: + stop = types.none + if step is None: + step = types.none + if dtype is None: + dtype = types.none + + if (not isinstance(start, types.Number) or + not isinstance(stop, (types.NoneType, types.Number)) or + not isinstance(step, (types.NoneType, types.Number)) or + not isinstance(dtype, (types.NoneType, types.DTypeSpec))): + + return + + if isinstance(dtype, types.NoneType): + true_dtype = _arange_dtype(start, stop, step) + else: + true_dtype = dtype.dtype + + use_complex = any([isinstance(x, types.Complex) + for x in (start, stop, step)]) + + start_value = getattr(start, "literal_value", None) + stop_value = getattr(stop, "literal_value", None) + step_value = getattr(step, "literal_value", None) + + def impl(start, /, stop=None, step=None, dtype=None): + # Allow for improved performance if given literal arguments. + lit_start = start_value if start_value is not None else start + lit_stop = stop_value if stop_value is not None else stop + lit_step = step_value if step_value is not None else step + + _step = lit_step if lit_step is not None else 1 + if lit_stop is None: + _start, _stop = 0, lit_start + else: + _start, _stop = lit_start, lit_stop + + if _step == 0: + raise ValueError("Maximum allowed size exceeded") + + nitems_c = (_stop - _start) / _step + nitems_r = int(math.ceil(nitems_c.real)) + + # Binary operator needed for compiler branch pruning. + if use_complex is True: + nitems_i = int(math.ceil(nitems_c.imag)) + nitems = max(min(nitems_i, nitems_r), 0) + else: + nitems = max(nitems_r, 0) + arr = np.empty(nitems, true_dtype) + val = _start + for i in range(nitems): + arr[i] = val + (i * _step) + return arr + + return impl + + +@overload(np.linspace) +def numpy_linspace(start, stop, num=50): + if not all(isinstance(arg, types.Number) for arg in [start, stop]): + return + + if not isinstance(num, (int, types.Integer)): + msg = 'The argument "num" must be an integer' + raise errors.TypingError(msg) + + if any(isinstance(arg, types.Complex) for arg in [start, stop]): + if config.USE_LEGACY_TYPE_SYSTEM: + dtype = types.complex128 + else: + dtype = types.np_complex128 + else: + dtype = types.float64 + + # Implementation based on https://github.com/numpy/numpy/blob/v1.20.0/numpy/core/function_base.py#L24 # noqa: E501 + def linspace(start, stop, num=50): + arr = np.empty(num, dtype) + # The multiply by 1.0 mirrors + # https://github.com/numpy/numpy/blob/v1.20.0/numpy/core/function_base.py#L125-L128 # noqa: E501 + # the side effect of this is important... start and stop become the same + # type as `dtype` i.e. 64/128 bits wide (float/complex). This is + # important later when used in the `np.divide`. + start = start * 1.0 + stop = stop * 1.0 + if num == 0: + return arr + div = num - 1 + if div > 0: + delta = stop - start + step = np.divide(delta, div) + for i in range(0, num): + arr[i] = start + (i * step) + else: + arr[0] = start + if num > 1: + arr[-1] = stop + return arr + return linspace + + +def _array_copy(context, builder, sig, args): + """ + Array copy. + """ + arytype = sig.args[0] + ary = make_array(arytype)(context, builder, value=args[0]) + shapes = cgutils.unpack_tuple(builder, ary.shape) + + rettype = sig.return_type + ret = _empty_nd_impl(context, builder, rettype, shapes) + + src_data = ary.data + dest_data = ret.data + + assert rettype.layout in "CF" + if arytype.layout == rettype.layout: + # Fast path: memcpy + cgutils.raw_memcpy(builder, dest_data, src_data, ary.nitems, + ary.itemsize, align=1) + + else: + src_strides = cgutils.unpack_tuple(builder, ary.strides) + dest_strides = cgutils.unpack_tuple(builder, ret.strides) + intp_t = context.get_value_type(types.intp) + + with cgutils.loop_nest(builder, shapes, intp_t) as indices: + src_ptr = cgutils.get_item_pointer2(context, builder, src_data, + shapes, src_strides, + arytype.layout, indices) + dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data, + shapes, dest_strides, + rettype.layout, indices) + builder.store(builder.load(src_ptr), dest_ptr) + + return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue()) + + +@intrinsic +def _array_copy_intrinsic(typingctx, a): + assert isinstance(a, types.Array) + layout = 'F' if a.layout == 'F' else 'C' + ret = a.copy(layout=layout, readonly=False) + sig = ret(a) + return sig, _array_copy + + +@lower_builtin("array.copy", types.Array) +def array_copy(context, builder, sig, args): + return _array_copy(context, builder, sig, args) + + +@overload(np.copy) +def impl_numpy_copy(a): + if isinstance(a, types.Array): + def numpy_copy(a): + return _array_copy_intrinsic(a) + return numpy_copy + + +def _as_layout_array(context, builder, sig, args, output_layout): + """ + Common logic for layout conversion function; + e.g. ascontiguousarray and asfortranarray + """ + retty = sig.return_type + aryty = sig.args[0] + assert retty.layout == output_layout, 'return-type has incorrect layout' + + if aryty.ndim == 0: + # 0-dim input => asfortranarray() returns a 1-dim array + assert retty.ndim == 1 + ary = make_array(aryty)(context, builder, value=args[0]) + ret = make_array(retty)(context, builder) + + shape = context.get_constant_generic( + builder, types.UniTuple(types.intp, 1), (1,), + ) + strides = context.make_tuple(builder, + types.UniTuple(types.intp, 1), + (ary.itemsize,)) + populate_array(ret, ary.data, shape, strides, ary.itemsize, + ary.meminfo, ary.parent) + return impl_ret_borrowed(context, builder, retty, ret._getvalue()) + + elif (retty.layout == aryty.layout + or (aryty.ndim == 1 and aryty.layout in 'CF')): + # 1-dim contiguous input => return the same array + return impl_ret_borrowed(context, builder, retty, args[0]) + + else: + if aryty.layout == 'A': + # There's still chance the array is in contiguous layout, + # just that we don't know at compile time. + # We can do a runtime check. + + # Prepare and call is_contiguous or is_fortran + assert output_layout in 'CF' + check_func = is_contiguous if output_layout == 'C' else is_fortran + is_contig = _call_contiguous_check(check_func, + context, + builder, + aryty, + args[0]) + with builder.if_else(is_contig) as (then, orelse): + # If the array is already contiguous, just return it + with then: + out_then = impl_ret_borrowed(context, builder, retty, + args[0]) + then_blk = builder.block + # Otherwise, copy to a new contiguous region + with orelse: + out_orelse = _array_copy(context, builder, sig, args) + orelse_blk = builder.block + # Phi node for the return value + ret_phi = builder.phi(out_then.type) + ret_phi.add_incoming(out_then, then_blk) + ret_phi.add_incoming(out_orelse, orelse_blk) + return ret_phi + + else: + # Return a copy with the right layout + return _array_copy(context, builder, sig, args) + + +@intrinsic +def _as_layout_array_intrinsic(typingctx, a, output_layout): + if not isinstance(output_layout, types.StringLiteral): + raise errors.RequireLiteralValue(output_layout) + + ret = a.copy(layout=output_layout.literal_value, ndim=max(a.ndim, 1)) + sig = ret(a, output_layout) + + return sig, lambda c, b, s, a: _as_layout_array( + c, b, s, a, output_layout=output_layout.literal_value) + + +@overload(np.ascontiguousarray) +def array_ascontiguousarray(a): + if not type_can_asarray(a): + raise errors.TypingError('The argument "a" must be array-like') + + if isinstance(a, (types.Number, types.Boolean,)): + def impl(a): + return np.ascontiguousarray(np.array(a)) + elif isinstance(a, types.Array): + def impl(a): + return _as_layout_array_intrinsic(a, 'C') + return impl + + +@overload(np.asfortranarray) +def array_asfortranarray(a): + if not type_can_asarray(a): + raise errors.TypingError('The argument "a" must be array-like') + + if isinstance(a, (types.Number, types.Boolean,)): + def impl(a): + return np.asfortranarray(np.array(a)) + return impl + elif isinstance(a, types.Array): + def impl(a): + return _as_layout_array_intrinsic(a, 'F') + return impl + + +@lower_builtin("array.astype", types.Array, types.DTypeSpec) +@lower_builtin("array.astype", types.Array, types.StringLiteral) +def array_astype(context, builder, sig, args): + arytype = sig.args[0] + ary = make_array(arytype)(context, builder, value=args[0]) + shapes = cgutils.unpack_tuple(builder, ary.shape) + + rettype = sig.return_type + ret = _empty_nd_impl(context, builder, rettype, shapes) + + src_data = ary.data + dest_data = ret.data + + src_strides = cgutils.unpack_tuple(builder, ary.strides) + dest_strides = cgutils.unpack_tuple(builder, ret.strides) + intp_t = context.get_value_type(types.intp) + + with cgutils.loop_nest(builder, shapes, intp_t) as indices: + src_ptr = cgutils.get_item_pointer2(context, builder, src_data, + shapes, src_strides, + arytype.layout, indices) + dest_ptr = cgutils.get_item_pointer2(context, builder, dest_data, + shapes, dest_strides, + rettype.layout, indices) + item = load_item(context, builder, arytype, src_ptr) + item = context.cast(builder, item, arytype.dtype, rettype.dtype) + store_item(context, builder, rettype, item, dest_ptr) + + return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue()) + + +@intrinsic +def np_frombuffer(typingctx, buffer, dtype, retty): + ty = retty.instance_type + sig = ty(buffer, dtype, retty) + + def codegen(context, builder, sig, args): + bufty = sig.args[0] + aryty = sig.return_type + + buf = make_array(bufty)(context, builder, value=args[0]) + out_ary_ty = make_array(aryty) + out_ary = out_ary_ty(context, builder) + out_datamodel = out_ary._datamodel + + itemsize = get_itemsize(context, aryty) + ll_itemsize = Constant(buf.itemsize.type, itemsize) + nbytes = builder.mul(buf.nitems, buf.itemsize) + + # Check that the buffer size is compatible + rem = builder.srem(nbytes, ll_itemsize) + is_incompatible = cgutils.is_not_null(builder, rem) + with builder.if_then(is_incompatible, likely=False): + msg = "buffer size must be a multiple of element size" + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + + shape = cgutils.pack_array(builder, [builder.sdiv(nbytes, ll_itemsize)]) + strides = cgutils.pack_array(builder, [ll_itemsize]) + data = builder.bitcast( + buf.data, context.get_value_type(out_datamodel.get_type('data')) + ) + + populate_array(out_ary, + data=data, + shape=shape, + strides=strides, + itemsize=ll_itemsize, + meminfo=buf.meminfo, + parent=buf.parent,) + + res = out_ary._getvalue() + return impl_ret_borrowed(context, builder, sig.return_type, res) + return sig, codegen + + +@overload(np.frombuffer) +def impl_np_frombuffer(buffer, dtype=float): + _check_const_str_dtype("frombuffer", dtype) + + if not isinstance(buffer, types.Buffer) or buffer.layout != 'C': + msg = f'Argument "buffer" must be buffer-like. Got {buffer}' + raise errors.TypingError(msg) + + if (dtype is float or + (isinstance(dtype, types.Function) and dtype.typing_key is float) or + is_nonelike(dtype)): #default + nb_dtype = types.double + else: + nb_dtype = ty_parse_dtype(dtype) + + if nb_dtype is not None: + retty = types.Array(dtype=nb_dtype, ndim=1, layout='C', + readonly=not buffer.mutable) + else: + msg = ("Cannot parse input types to function " + f"np.frombuffer({buffer}, {dtype})") + raise errors.TypingError(msg) + + def impl(buffer, dtype=float): + return np_frombuffer(buffer, dtype, retty) + return impl + + +@overload(carray) +def impl_carray(ptr, shape, dtype=None): + if is_nonelike(dtype): + intrinsic_cfarray = get_cfarray_intrinsic('C', None) + + def impl(ptr, shape, dtype=None): + return intrinsic_cfarray(ptr, shape) + return impl + elif isinstance(dtype, types.DTypeSpec): + intrinsic_cfarray = get_cfarray_intrinsic('C', dtype) + + def impl(ptr, shape, dtype=None): + return intrinsic_cfarray(ptr, shape) + return impl + + +@overload(farray) +def impl_farray(ptr, shape, dtype=None): + if is_nonelike(dtype): + intrinsic_cfarray = get_cfarray_intrinsic('F', None) + + def impl(ptr, shape, dtype=None): + return intrinsic_cfarray(ptr, shape) + return impl + elif isinstance(dtype, types.DTypeSpec): + intrinsic_cfarray = get_cfarray_intrinsic('F', dtype) + + def impl(ptr, shape, dtype=None): + return intrinsic_cfarray(ptr, shape) + return impl + + +def get_cfarray_intrinsic(layout, dtype_): + @intrinsic + def intrinsic_cfarray(typingctx, ptr, shape): + if ptr is types.voidptr: + ptr_dtype = None + elif isinstance(ptr, types.CPointer): + ptr_dtype = ptr.dtype + else: + msg = f"pointer argument expected, got '{ptr}'" + raise errors.NumbaTypeError(msg) + + if dtype_ is None: + if ptr_dtype is None: + msg = "explicit dtype required for void* argument" + raise errors.NumbaTypeError(msg) + dtype = ptr_dtype + elif isinstance(dtype_, types.DTypeSpec): + dtype = dtype_.dtype + if ptr_dtype is not None and dtype != ptr_dtype: + msg = f"mismatching dtype '{dtype}' for pointer type '{ptr}'" + raise errors.NumbaTypeError(msg) + else: + msg = f"invalid dtype spec '{dtype_}'" + raise errors.NumbaTypeError(msg) + + ndim = ty_parse_shape(shape) + if ndim is None: + msg = f"invalid shape '{shape}'" + raise errors.NumbaTypeError(msg) + + retty = types.Array(dtype, ndim, layout) + sig = signature(retty, ptr, shape) + return sig, np_cfarray + return intrinsic_cfarray + + +def np_cfarray(context, builder, sig, args): + """ + numba.numpy_support.carray(...) and + numba.numpy_support.farray(...). + """ + ptrty, shapety = sig.args[:2] + ptr, shape = args[:2] + + aryty = sig.return_type + assert aryty.layout in 'CF' + + out_ary = make_array(aryty)(context, builder) + + itemsize = get_itemsize(context, aryty) + ll_itemsize = cgutils.intp_t(itemsize) + + if isinstance(shapety, types.BaseTuple): + shapes = cgutils.unpack_tuple(builder, shape) + else: + shapety = (shapety,) + shapes = (shape,) + shapes = [context.cast(builder, value, fromty, types.intp) + for fromty, value in zip(shapety, shapes)] + + off = ll_itemsize + strides = [] + if aryty.layout == 'F': + for s in shapes: + strides.append(off) + off = builder.mul(off, s) + else: + for s in reversed(shapes): + strides.append(off) + off = builder.mul(off, s) + strides.reverse() + + data = builder.bitcast(ptr, + context.get_data_type(aryty.dtype).as_pointer()) + + populate_array(out_ary, + data=data, + shape=shapes, + strides=strides, + itemsize=ll_itemsize, + # Array is not memory-managed + meminfo=None, + ) + + res = out_ary._getvalue() + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +def _get_seq_size(context, builder, seqty, seq): + if isinstance(seqty, types.BaseTuple): + return context.get_constant(types.intp, len(seqty)) + elif isinstance(seqty, types.Sequence): + len_impl = context.get_function(len, signature(types.intp, seqty,)) + return len_impl(builder, (seq,)) + else: + assert 0 + + +def _get_borrowing_getitem(context, seqty): + """ + Return a getitem() implementation that doesn't incref its result. + """ + retty = seqty.dtype + getitem_impl = context.get_function(operator.getitem, + signature(retty, seqty, types.intp)) + + def wrap(builder, args): + ret = getitem_impl(builder, args) + if context.enable_nrt: + context.nrt.decref(builder, retty, ret) + return ret + + return wrap + + +def compute_sequence_shape(context, builder, ndim, seqty, seq): + """ + Compute the likely shape of a nested sequence (possibly 0d). + """ + intp_t = context.get_value_type(types.intp) + zero = Constant(intp_t, 0) + + def get_first_item(seqty, seq): + if isinstance(seqty, types.BaseTuple): + if len(seqty) == 0: + return None, None + else: + return seqty[0], builder.extract_value(seq, 0) + else: + getitem_impl = _get_borrowing_getitem(context, seqty) + return seqty.dtype, getitem_impl(builder, (seq, zero)) + + # Compute shape by traversing the first element of each nested + # sequence + shapes = [] + innerty, inner = seqty, seq + + for i in range(ndim): + if i > 0: + innerty, inner = get_first_item(innerty, inner) + shapes.append(_get_seq_size(context, builder, innerty, inner)) + + return tuple(shapes) + + +def check_sequence_shape(context, builder, seqty, seq, shapes): + """ + Check the nested sequence matches the given *shapes*. + """ + + def _fail(): + context.call_conv.return_user_exc(builder, ValueError, + ("incompatible sequence shape",)) + + def check_seq_size(seqty, seq, shapes): + if len(shapes) == 0: + return + + size = _get_seq_size(context, builder, seqty, seq) + expected = shapes[0] + mismatch = builder.icmp_signed('!=', size, expected) + with builder.if_then(mismatch, likely=False): + _fail() + + if len(shapes) == 1: + return + + if isinstance(seqty, types.Sequence): + getitem_impl = _get_borrowing_getitem(context, seqty) + with cgutils.for_range(builder, size) as loop: + innerty = seqty.dtype + inner = getitem_impl(builder, (seq, loop.index)) + check_seq_size(innerty, inner, shapes[1:]) + + elif isinstance(seqty, types.BaseTuple): + for i in range(len(seqty)): + innerty = seqty[i] + inner = builder.extract_value(seq, i) + check_seq_size(innerty, inner, shapes[1:]) + + else: + assert 0, seqty + + check_seq_size(seqty, seq, shapes) + + +def assign_sequence_to_array(context, builder, data, shapes, strides, + arrty, seqty, seq): + """ + Assign a nested sequence contents to an array. The shape must match + the sequence's structure. + """ + + def assign_item(indices, valty, val): + ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides, + arrty.layout, indices, wraparound=False) + val = context.cast(builder, val, valty, arrty.dtype) + store_item(context, builder, arrty, val, ptr) + + def assign(seqty, seq, shapes, indices): + if len(shapes) == 0: + assert not isinstance(seqty, (types.Sequence, types.BaseTuple)) + assign_item(indices, seqty, seq) + return + + size = shapes[0] + + if isinstance(seqty, types.Sequence): + getitem_impl = _get_borrowing_getitem(context, seqty) + with cgutils.for_range(builder, size) as loop: + innerty = seqty.dtype + inner = getitem_impl(builder, (seq, loop.index)) + assign(innerty, inner, shapes[1:], indices + (loop.index,)) + + elif isinstance(seqty, types.BaseTuple): + for i in range(len(seqty)): + innerty = seqty[i] + inner = builder.extract_value(seq, i) + index = context.get_constant(types.intp, i) + assign(innerty, inner, shapes[1:], indices + (index,)) + + else: + assert 0, seqty + + assign(seqty, seq, shapes, ()) + + +def np_array_typer(typingctx, object, dtype): + ndim, seq_dtype = _parse_nested_sequence(typingctx, object) + if is_nonelike(dtype): + dtype = seq_dtype + else: + dtype = ty_parse_dtype(dtype) + if dtype is None: + return + return types.Array(dtype, ndim, 'C') + + +@intrinsic +def np_array(typingctx, obj, dtype): + _check_const_str_dtype("array", dtype) + ret = np_array_typer(typingctx, obj, dtype) + sig = ret(obj, dtype) + + def codegen(context, builder, sig, args): + arrty = sig.return_type + ndim = arrty.ndim + seqty = sig.args[0] + seq = args[0] + + shapes = compute_sequence_shape(context, builder, ndim, seqty, seq) + assert len(shapes) == ndim + + check_sequence_shape(context, builder, seqty, seq, shapes) + arr = _empty_nd_impl(context, builder, arrty, shapes) + assign_sequence_to_array(context, builder, arr.data, shapes, + arr.strides, arrty, seqty, seq) + + return impl_ret_new_ref(context, builder, sig.return_type, + arr._getvalue()) + + return sig, codegen + + +@overload(np.array) +def impl_np_array(object, dtype=None): + _check_const_str_dtype("array", dtype) + if not type_can_asarray(object): + raise errors.TypingError('The argument "object" must ' + 'be array-like') + if not is_nonelike(dtype) and ty_parse_dtype(dtype) is None: + msg = 'The argument "dtype" must be a data-type if it is provided' + raise errors.TypingError(msg) + + def impl(object, dtype=None): + return np_array(object, dtype) + return impl + + +def _normalize_axis(context, builder, func_name, ndim, axis): + zero = axis.type(0) + ll_ndim = axis.type(ndim) + + # Normalize negative axis + is_neg_axis = builder.icmp_signed('<', axis, zero) + axis = builder.select(is_neg_axis, builder.add(axis, ll_ndim), axis) + + # Check axis for bounds + axis_out_of_bounds = builder.or_( + builder.icmp_signed('<', axis, zero), + builder.icmp_signed('>=', axis, ll_ndim)) + with builder.if_then(axis_out_of_bounds, likely=False): + msg = "%s(): axis out of bounds" % func_name + context.call_conv.return_user_exc(builder, IndexError, (msg,)) + + return axis + + +def _insert_axis_in_shape(context, builder, orig_shape, ndim, axis): + """ + Compute shape with the new axis inserted + e.g. given original shape (2, 3, 4) and axis=2, + the returned new shape is (2, 3, 1, 4). + """ + assert len(orig_shape) == ndim - 1 + + ll_shty = ir.ArrayType(cgutils.intp_t, ndim) + shapes = cgutils.alloca_once(builder, ll_shty) + + one = cgutils.intp_t(1) + + # 1. copy original sizes at appropriate places + for dim in range(ndim - 1): + ll_dim = cgutils.intp_t(dim) + after_axis = builder.icmp_signed('>=', ll_dim, axis) + sh = orig_shape[dim] + idx = builder.select(after_axis, + builder.add(ll_dim, one), + ll_dim) + builder.store(sh, cgutils.gep_inbounds(builder, shapes, 0, idx)) + + # 2. insert new size (1) at axis dimension + builder.store(one, cgutils.gep_inbounds(builder, shapes, 0, axis)) + + return cgutils.unpack_tuple(builder, builder.load(shapes)) + + +def _insert_axis_in_strides(context, builder, orig_strides, ndim, axis): + """ + Same as _insert_axis_in_shape(), but with a strides array. + """ + assert len(orig_strides) == ndim - 1 + + ll_shty = ir.ArrayType(cgutils.intp_t, ndim) + strides = cgutils.alloca_once(builder, ll_shty) + + one = cgutils.intp_t(1) + zero = cgutils.intp_t(0) + + # 1. copy original strides at appropriate places + for dim in range(ndim - 1): + ll_dim = cgutils.intp_t(dim) + after_axis = builder.icmp_signed('>=', ll_dim, axis) + idx = builder.select(after_axis, + builder.add(ll_dim, one), + ll_dim) + builder.store(orig_strides[dim], + cgutils.gep_inbounds(builder, strides, 0, idx)) + + # 2. insert new stride at axis dimension + # (the value is indifferent for a 1-sized dimension, we use 0) + builder.store(zero, cgutils.gep_inbounds(builder, strides, 0, axis)) + + return cgutils.unpack_tuple(builder, builder.load(strides)) + + +def expand_dims(context, builder, sig, args, axis): + """ + np.expand_dims() with the given axis. + """ + retty = sig.return_type + ndim = retty.ndim + arrty = sig.args[0] + + arr = make_array(arrty)(context, builder, value=args[0]) + ret = make_array(retty)(context, builder) + + shapes = cgutils.unpack_tuple(builder, arr.shape) + strides = cgutils.unpack_tuple(builder, arr.strides) + + new_shapes = _insert_axis_in_shape(context, builder, shapes, ndim, axis) + new_strides = _insert_axis_in_strides(context, builder, strides, ndim, axis) + + populate_array(ret, + data=arr.data, + shape=new_shapes, + strides=new_strides, + itemsize=arr.itemsize, + meminfo=arr.meminfo, + parent=arr.parent) + + return ret._getvalue() + + +@intrinsic +def np_expand_dims(typingctx, a, axis): + layout = a.layout if a.ndim <= 1 else 'A' + ret = a.copy(ndim=a.ndim + 1, layout=layout) + sig = ret(a, axis) + + def codegen(context, builder, sig, args): + axis = context.cast(builder, args[1], sig.args[1], types.intp) + axis = _normalize_axis(context, builder, "np.expand_dims", + sig.return_type.ndim, axis) + + ret = expand_dims(context, builder, sig, args, axis) + return impl_ret_borrowed(context, builder, sig.return_type, ret) + + return sig, codegen + + +@overload(np.expand_dims) +def impl_np_expand_dims(a, axis): + if not isinstance(a, types.Array): + msg = f'First argument "a" must be an array. Got {a}' + raise errors.TypingError(msg) + + if not isinstance(axis, types.Integer): + msg = f'Argument "axis" must be an integer. Got {axis}' + raise errors.TypingError(msg) + + def impl(a, axis): + return np_expand_dims(a, axis) + return impl + + +def _atleast_nd(minimum, axes): + @intrinsic + def impl(typingcontext, *args): + arrtys = args + rettys = [arg.copy(ndim=max(arg.ndim, minimum)) for arg in args] + + def codegen(context, builder, sig, args): + transform = _atleast_nd_transform(minimum, axes) + arrs = cgutils.unpack_tuple(builder, args[0]) + + rets = [transform(context, builder, arr, arrty, retty) + for arr, arrty, retty in zip(arrs, arrtys, rettys)] + + if len(rets) > 1: + ret = context.make_tuple(builder, sig.return_type, rets) + else: + ret = rets[0] + return impl_ret_borrowed(context, builder, sig.return_type, ret) + + return signature(types.Tuple(rettys) if len(rettys) > 1 else rettys[0], + types.StarArgTuple.from_types(args)), codegen + + return lambda *args: impl(*args) + + +def _atleast_nd_transform(min_ndim, axes): + """ + Return a callback successively inserting 1-sized dimensions at the + following axes. + """ + assert min_ndim == len(axes) + + def transform(context, builder, arr, arrty, retty): + for i in range(min_ndim): + ndim = i + 1 + if arrty.ndim < ndim: + axis = cgutils.intp_t(axes[i]) + newarrty = arrty.copy(ndim=arrty.ndim + 1) + arr = expand_dims(context, builder, + typing.signature(newarrty, arrty), (arr,), + axis) + arrty = newarrty + + return arr + + return transform + + +@overload(np.atleast_1d) +def np_atleast_1d(*args): + if all(isinstance(arg, types.Array) for arg in args): + return _atleast_nd(1, [0]) + + +@overload(np.atleast_2d) +def np_atleast_2d(*args): + if all(isinstance(arg, types.Array) for arg in args): + return _atleast_nd(2, [0, 0]) + + +@overload(np.atleast_3d) +def np_atleast_3d(*args): + if all(isinstance(arg, types.Array) for arg in args): + return _atleast_nd(3, [0, 0, 2]) + + +def _do_concatenate(context, builder, axis, + arrtys, arrs, arr_shapes, arr_strides, + retty, ret_shapes): + """ + Concatenate arrays along the given axis. + """ + assert len(arrtys) == len(arrs) == len(arr_shapes) == len(arr_strides) + + zero = cgutils.intp_t(0) + + # Allocate return array + ret = _empty_nd_impl(context, builder, retty, ret_shapes) + ret_strides = cgutils.unpack_tuple(builder, ret.strides) + + # Compute the offset by which to bump the destination pointer + # after copying each input array. + # Morally, we need to copy each input array at different start indices + # into the destination array; bumping the destination pointer + # is simply easier than offsetting all destination indices. + copy_offsets = [] + + for arr_sh in arr_shapes: + # offset = ret_strides[axis] * input_shape[axis] + offset = zero + for dim, (size, stride) in enumerate(zip(arr_sh, ret_strides)): + is_axis = builder.icmp_signed('==', axis.type(dim), axis) + addend = builder.mul(size, stride) + offset = builder.select(is_axis, + builder.add(offset, addend), + offset) + copy_offsets.append(offset) + + # Copy input arrays into the return array + ret_data = ret.data + + for arrty, arr, arr_sh, arr_st, offset in zip(arrtys, arrs, arr_shapes, + arr_strides, copy_offsets): + arr_data = arr.data + + # Do the copy loop + # Note the loop nesting is optimized for the destination layout + loop_nest = cgutils.loop_nest(builder, arr_sh, cgutils.intp_t, + order=retty.layout) + + with loop_nest as indices: + src_ptr = cgutils.get_item_pointer2(context, builder, arr_data, + arr_sh, arr_st, + arrty.layout, indices) + val = load_item(context, builder, arrty, src_ptr) + val = context.cast(builder, val, arrty.dtype, retty.dtype) + dest_ptr = cgutils.get_item_pointer2(context, builder, ret_data, + ret_shapes, ret_strides, + retty.layout, indices) + store_item(context, builder, retty, val, dest_ptr) + + # Bump destination pointer + ret_data = cgutils.pointer_add(builder, ret_data, offset) + + return ret + + +def _np_concatenate(context, builder, arrtys, arrs, retty, axis): + ndim = retty.ndim + + arrs = [make_array(aty)(context, builder, value=a) + for aty, a in zip(arrtys, arrs)] + + axis = _normalize_axis(context, builder, "np.concatenate", ndim, axis) + + # Get input shapes + arr_shapes = [cgutils.unpack_tuple(builder, arr.shape) for arr in arrs] + arr_strides = [cgutils.unpack_tuple(builder, arr.strides) for arr in arrs] + + # Compute return shape: + # - the dimension for the concatenation axis is summed over all inputs + # - other dimensions must match exactly for each input + ret_shapes = [cgutils.alloca_once_value(builder, sh) + for sh in arr_shapes[0]] + + for dim in range(ndim): + is_axis = builder.icmp_signed('==', axis.type(dim), axis) + ret_shape_ptr = ret_shapes[dim] + ret_sh = builder.load(ret_shape_ptr) + other_shapes = [sh[dim] for sh in arr_shapes[1:]] + + with builder.if_else(is_axis) as (on_axis, on_other_dim): + with on_axis: + sh = functools.reduce( + builder.add, + other_shapes + [ret_sh]) + builder.store(sh, ret_shape_ptr) + + with on_other_dim: + is_ok = cgutils.true_bit + for sh in other_shapes: + is_ok = builder.and_(is_ok, + builder.icmp_signed('==', sh, ret_sh)) + with builder.if_then(builder.not_(is_ok), likely=False): + context.call_conv.return_user_exc( + builder, ValueError, + ("np.concatenate(): input sizes over " + "dimension %d do not match" % dim,)) + + ret_shapes = [builder.load(sh) for sh in ret_shapes] + + ret = _do_concatenate(context, builder, axis, + arrtys, arrs, arr_shapes, arr_strides, + retty, ret_shapes) + return impl_ret_new_ref(context, builder, retty, ret._getvalue()) + + +def _np_stack(context, builder, arrtys, arrs, retty, axis): + ndim = retty.ndim + + zero = cgutils.intp_t(0) + one = cgutils.intp_t(1) + ll_narrays = cgutils.intp_t(len(arrs)) + + arrs = [make_array(aty)(context, builder, value=a) + for aty, a in zip(arrtys, arrs)] + + axis = _normalize_axis(context, builder, "np.stack", ndim, axis) + + # Check input arrays have the same shape + orig_shape = cgutils.unpack_tuple(builder, arrs[0].shape) + + for arr in arrs[1:]: + is_ok = cgutils.true_bit + for sh, orig_sh in zip(cgutils.unpack_tuple(builder, arr.shape), + orig_shape): + is_ok = builder.and_(is_ok, builder.icmp_signed('==', sh, orig_sh)) + with builder.if_then(builder.not_(is_ok), likely=False): + context.call_conv.return_user_exc( + builder, ValueError, + ("np.stack(): all input arrays must have the same shape",)) + + orig_strides = [cgutils.unpack_tuple(builder, arr.strides) for arr in arrs] + + # Compute input shapes and return shape with the new axis inserted + # e.g. given 5 input arrays of shape (2, 3, 4) and axis=1, + # corrected input shape is (2, 1, 3, 4) and return shape is (2, 5, 3, 4). + ll_shty = ir.ArrayType(cgutils.intp_t, ndim) + + input_shapes = cgutils.alloca_once(builder, ll_shty) + ret_shapes = cgutils.alloca_once(builder, ll_shty) + + # 1. copy original sizes at appropriate places + for dim in range(ndim - 1): + ll_dim = cgutils.intp_t(dim) + after_axis = builder.icmp_signed('>=', ll_dim, axis) + sh = orig_shape[dim] + idx = builder.select(after_axis, + builder.add(ll_dim, one), + ll_dim) + builder.store(sh, cgutils.gep_inbounds(builder, input_shapes, 0, idx)) + builder.store(sh, cgutils.gep_inbounds(builder, ret_shapes, 0, idx)) + + # 2. insert new size at axis dimension + builder.store(one, cgutils.gep_inbounds(builder, input_shapes, 0, axis)) + builder.store(ll_narrays, cgutils.gep_inbounds(builder, + ret_shapes, + 0, + axis)) + + input_shapes = cgutils.unpack_tuple(builder, builder.load(input_shapes)) + input_shapes = [input_shapes] * len(arrs) + ret_shapes = cgutils.unpack_tuple(builder, builder.load(ret_shapes)) + + # Compute input strides for each array with the new axis inserted + input_strides = [cgutils.alloca_once(builder, ll_shty) + for i in range(len(arrs))] + + # 1. copy original strides at appropriate places + for dim in range(ndim - 1): + ll_dim = cgutils.intp_t(dim) + after_axis = builder.icmp_signed('>=', ll_dim, axis) + idx = builder.select(after_axis, + builder.add(ll_dim, one), + ll_dim) + for i in range(len(arrs)): + builder.store(orig_strides[i][dim], + cgutils.gep_inbounds(builder, input_strides[i], 0, + idx)) + + # 2. insert new stride at axis dimension + # (the value is indifferent for a 1-sized dimension, we put 0) + for i in range(len(arrs)): + builder.store(zero, cgutils.gep_inbounds(builder, input_strides[i], 0, + axis)) + + input_strides = [cgutils.unpack_tuple(builder, builder.load(st)) + for st in input_strides] + + # Create concatenated array + ret = _do_concatenate(context, builder, axis, + arrtys, arrs, input_shapes, input_strides, + retty, ret_shapes) + return impl_ret_new_ref(context, builder, retty, ret._getvalue()) + + +def np_concatenate_typer(typingctx, arrays, axis): + if axis is not None and not isinstance(axis, types.Integer): + # Note Numpy allows axis=None, but it isn't documented: + # https://github.com/numpy/numpy/issues/7968 + return + + # does type checking + dtype, ndim = _sequence_of_arrays(typingctx, + "np.concatenate", arrays) + if ndim == 0: + msg = "zero-dimensional arrays cannot be concatenated" + raise errors.NumbaTypeError(msg) + + layout = _choose_concatenation_layout(arrays) + + return types.Array(dtype, ndim, layout) + + +@intrinsic +def np_concatenate(typingctx, arrays, axis): + ret = np_concatenate_typer(typingctx, arrays, axis) + assert isinstance(ret, types.Array) + sig = ret(arrays, axis) + + def codegen(context, builder, sig, args): + axis = context.cast(builder, args[1], sig.args[1], types.intp) + return _np_concatenate(context, builder, + list(sig.args[0]), + cgutils.unpack_tuple(builder, args[0]), + sig.return_type, + axis) + + return sig, codegen + + +@overload(np.concatenate) +def impl_np_concatenate(arrays, axis=0): + if isinstance(arrays, types.BaseTuple): + def impl(arrays, axis=0): + return np_concatenate(arrays, axis) + return impl + + +def _column_stack_dims(context, func_name, arrays): + # column_stack() allows stacking 1-d and 2-d arrays together + for a in arrays: + if a.ndim < 1 or a.ndim > 2: + msg = "np.column_stack() is only defined on 1-d and 2-d arrays" + raise errors.NumbaTypeError(msg) + return 2 + + +@intrinsic +def np_column_stack(typingctx, tup): + dtype, ndim = _sequence_of_arrays(typingctx, + "np.column_stack", tup, + dim_chooser=_column_stack_dims) + layout = _choose_concatenation_layout(tup) + ret = types.Array(dtype, ndim, layout) + sig = ret(tup) + + def codegen(context, builder, sig, args): + orig_arrtys = list(sig.args[0]) + orig_arrs = cgutils.unpack_tuple(builder, args[0]) + + arrtys = [] + arrs = [] + + axis = context.get_constant(types.intp, 1) + + for arrty, arr in zip(orig_arrtys, orig_arrs): + if arrty.ndim == 2: + arrtys.append(arrty) + arrs.append(arr) + else: + # Convert 1d array to 2d column array: np.expand_dims(a, 1) + assert arrty.ndim == 1 + newty = arrty.copy(ndim=2) + expand_sig = typing.signature(newty, arrty) + newarr = expand_dims(context, builder, expand_sig, (arr,), axis) + + arrtys.append(newty) + arrs.append(newarr) + + return _np_concatenate(context, builder, arrtys, arrs, + sig.return_type, axis) + + return sig, codegen + + +@overload(np.column_stack) +def impl_column_stack(tup): + if isinstance(tup, types.BaseTuple): + def impl(tup): + return np_column_stack(tup) + return impl + + +def _np_stack_common(context, builder, sig, args, axis): + """ + np.stack() with the given axis value. + """ + return _np_stack(context, builder, + list(sig.args[0]), + cgutils.unpack_tuple(builder, args[0]), + sig.return_type, + axis) + + +@intrinsic +def np_stack_common(typingctx, arrays, axis): + # does type checking + dtype, ndim = _sequence_of_arrays(typingctx, + "np.stack", arrays) + layout = 'F' if all(a.layout == 'F' for a in arrays) else 'C' + ret = types.Array(dtype, ndim + 1, layout) + sig = ret(arrays, axis) + + def codegen(context, builder, sig, args): + axis = context.cast(builder, args[1], sig.args[1], types.intp) + return _np_stack_common(context, builder, sig, args, axis) + + return sig, codegen + + +@overload(np.stack) +def impl_np_stack(arrays, axis=0): + if isinstance(arrays, types.BaseTuple): + def impl(arrays, axis=0): + return np_stack_common(arrays, axis) + return impl + + +def NdStack_typer(typingctx, func_name, arrays, ndim_min): + # does type checking + dtype, ndim = _sequence_of_arrays(typingctx, func_name, arrays) + ndim = max(ndim, ndim_min) + layout = _choose_concatenation_layout(arrays) + ret = types.Array(dtype, ndim, layout) + return ret + + +@intrinsic +def _np_hstack(typingctx, tup): + ret = NdStack_typer(typingctx, "np.hstack", tup, 1) + sig = ret(tup) + + def codegen(context, builder, sig, args): + tupty = sig.args[0] + ndim = tupty[0].ndim + + if ndim == 0: + # hstack() on 0-d arrays returns a 1-d array + axis = context.get_constant(types.intp, 0) + return _np_stack_common(context, builder, sig, args, axis) + + else: + # As a special case, dimension 0 of 1-dimensional arrays + # is "horizontal" + axis = 0 if ndim == 1 else 1 + + def np_hstack_impl(arrays): + return np.concatenate(arrays, axis=axis) + + return context.compile_internal(builder, np_hstack_impl, sig, args) + + return sig, codegen + + +@overload(np.hstack) +def impl_np_hstack(tup): + if isinstance(tup, types.BaseTuple): + def impl(tup): + return _np_hstack(tup) + return impl + + +@intrinsic +def _np_vstack(typingctx, tup): + ret = NdStack_typer(typingctx, "np.vstack", tup, 2) + sig = ret(tup) + + def codegen(context, builder, sig, args): + tupty = sig.args[0] + ndim = tupty[0].ndim + + if ndim == 0: + def np_vstack_impl(arrays): + return np.expand_dims(np.hstack(arrays), 1) + + elif ndim == 1: + # np.stack(arrays, axis=0) + axis = context.get_constant(types.intp, 0) + return _np_stack_common(context, builder, sig, args, axis) + + else: + def np_vstack_impl(arrays): + return np.concatenate(arrays, axis=0) + + return context.compile_internal(builder, np_vstack_impl, sig, args) + + return sig, codegen + + +@overload(np.vstack) +def impl_np_vstack(tup): + if isinstance(tup, types.BaseTuple): + def impl(tup): + return _np_vstack(tup) + return impl + + +if numpy_version >= (2, 0): + overload(np.row_stack)(impl_np_vstack) + + +@intrinsic +def _np_dstack(typingctx, tup): + ret = NdStack_typer(typingctx, "np.dstack", tup, 3) + sig = ret(tup) + + def codegen(context, builder, sig, args): + tupty = sig.args[0] + retty = sig.return_type + ndim = tupty[0].ndim + + if ndim == 0: + def np_vstack_impl(arrays): + return np.hstack(arrays).reshape(1, 1, -1) + + return context.compile_internal(builder, np_vstack_impl, sig, args) + + elif ndim == 1: + # np.expand_dims(np.stack(arrays, axis=1), axis=0) + axis = context.get_constant(types.intp, 1) + stack_retty = retty.copy(ndim=retty.ndim - 1) + stack_sig = typing.signature(stack_retty, *sig.args) + stack_ret = _np_stack_common(context, builder, stack_sig, args, + axis) + + axis = context.get_constant(types.intp, 0) + expand_sig = typing.signature(retty, stack_retty) + return expand_dims(context, builder, expand_sig, (stack_ret,), axis) + + elif ndim == 2: + # np.stack(arrays, axis=2) + axis = context.get_constant(types.intp, 2) + return _np_stack_common(context, builder, sig, args, axis) + + else: + def np_vstack_impl(arrays): + return np.concatenate(arrays, axis=2) + + return context.compile_internal(builder, np_vstack_impl, sig, args) + + return sig, codegen + + +@overload(np.dstack) +def impl_np_dstack(tup): + if isinstance(tup, types.BaseTuple): + def impl(tup): + return _np_dstack(tup) + return impl + + +@extending.overload_method(types.Array, 'fill') +def arr_fill(arr, val): + + def fill_impl(arr, val): + arr[:] = val + return None + + return fill_impl + + +@extending.overload_method(types.Array, 'dot') +def array_dot(arr, other): + def dot_impl(arr, other): + return np.dot(arr, other) + + return dot_impl + + +@overload(np.fliplr) +def np_flip_lr(m): + + if not type_can_asarray(m): + raise errors.TypingError("Cannot np.fliplr on %s type" % m) + + def impl(m): + A = np.asarray(m) + # this handling is superfluous/dead as < 2d array cannot be indexed as + # present below and so typing fails. If the typing doesn't fail due to + # some future change, this will catch it. + if A.ndim < 2: + raise ValueError('Input must be >= 2-d.') + return A[::, ::-1, ...] + return impl + + +@overload(np.flipud) +def np_flip_ud(m): + + if not type_can_asarray(m): + raise errors.TypingError("Cannot np.flipud on %s type" % m) + + def impl(m): + A = np.asarray(m) + # this handling is superfluous/dead as a 0d array cannot be indexed as + # present below and so typing fails. If the typing doesn't fail due to + # some future change, this will catch it. + if A.ndim < 1: + raise ValueError('Input must be >= 1-d.') + return A[::-1, ...] + return impl + + +@intrinsic +def _build_flip_slice_tuple(tyctx, sz): + """ Creates a tuple of slices for np.flip indexing like + `(slice(None, None, -1),) * sz` """ + if not isinstance(sz, types.IntegerLiteral): + raise errors.RequireLiteralValue(sz) + size = int(sz.literal_value) + tuple_type = types.UniTuple(dtype=types.slice3_type, count=size) + sig = tuple_type(sz) + + def codegen(context, builder, signature, args): + def impl(length, empty_tuple): + out = empty_tuple + for i in range(length): + out = tuple_setitem(out, i, slice(None, None, -1)) + return out + + inner_argtypes = [types.intp, tuple_type] + inner_sig = typing.signature(tuple_type, *inner_argtypes) + ll_idx_type = context.get_value_type(types.intp) + # Allocate an empty tuple + empty_tuple = context.get_constant_undef(tuple_type) + inner_args = [ll_idx_type(size), empty_tuple] + + res = context.compile_internal(builder, impl, inner_sig, inner_args) + return res + + return sig, codegen + + +@overload(np.flip) +def np_flip(m): + # a constant value is needed for the tuple slice, types.Array.ndim can + # provide this and so at presnet only type.Array is support + if not isinstance(m, types.Array): + raise errors.TypingError("Cannot np.flip on %s type" % m) + + def impl(m): + sl = _build_flip_slice_tuple(m.ndim) + return m[sl] + + return impl + + +@overload(np.array_split) +def np_array_split(ary, indices_or_sections, axis=0): + if isinstance(ary, (types.UniTuple, types.ListType, types.List)): + def impl(ary, indices_or_sections, axis=0): + return np.array_split( + np.asarray(ary), + indices_or_sections, + axis=axis + ) + + return impl + + if isinstance(indices_or_sections, types.Integer): + def impl(ary, indices_or_sections, axis=0): + l, rem = divmod(ary.shape[axis], indices_or_sections) + indices = np.cumsum(np.array( + [l + 1] * rem + + [l] * (indices_or_sections - rem - 1) + )) + return np.array_split(ary, indices, axis=axis) + + return impl + + elif ( + isinstance(indices_or_sections, types.IterableType) + and isinstance( + indices_or_sections.iterator_type.yield_type, + types.Integer + ) + ): + def impl(ary, indices_or_sections, axis=0): + slice_tup = build_full_slice_tuple(ary.ndim) + axis = normalize_axis("np.split", "axis", ary.ndim, axis) + out = [] + prev = 0 + for cur in indices_or_sections: + idx = tuple_setitem(slice_tup, axis, slice(prev, cur)) + out.append(ary[idx]) + prev = cur + out.append(ary[tuple_setitem(slice_tup, axis, slice(cur, None))]) + return out + + return impl + + elif ( + isinstance(indices_or_sections, types.Tuple) + and all(isinstance(t, types.Integer) for t in indices_or_sections.types) + ): + def impl(ary, indices_or_sections, axis=0): + slice_tup = build_full_slice_tuple(ary.ndim) + axis = normalize_axis("np.split", "axis", ary.ndim, axis) + out = [] + prev = 0 + for cur in literal_unroll(indices_or_sections): + idx = tuple_setitem(slice_tup, axis, slice(prev, cur)) + out.append(ary[idx]) + prev = cur + out.append(ary[tuple_setitem(slice_tup, axis, slice(cur, None))]) + return out + + return impl + + +@overload(np.split) +def np_split(ary, indices_or_sections, axis=0): + # This is just a wrapper of array_split, but with an extra error if + # indices is an int. + if isinstance(ary, (types.UniTuple, types.ListType, types.List)): + def impl(ary, indices_or_sections, axis=0): + return np.split(np.asarray(ary), indices_or_sections, axis=axis) + + return impl + + if isinstance(indices_or_sections, types.Integer): + def impl(ary, indices_or_sections, axis=0): + _, rem = divmod(ary.shape[axis], indices_or_sections) + if rem != 0: + raise ValueError( + "array split does not result in an equal division" + ) + return np.array_split( + ary, indices_or_sections, axis=axis + ) + + return impl + + else: + return np_array_split(ary, indices_or_sections, axis=axis) + + +@overload(np.vsplit) +def numpy_vsplit(ary, indices_or_sections): + if not isinstance(ary, types.Array): + msg = 'The argument "ary" must be an array' + raise errors.TypingError(msg) + + if not isinstance(indices_or_sections, (types.Integer, types.Array, + types.List, types.UniTuple)): + msg = ('The argument "indices_or_sections" must be int or 1d-array') + raise errors.TypingError(msg) + + def impl(ary, indices_or_sections): + if ary.ndim < 2: + raise ValueError(('vsplit only works on ' + 'arrays of 2 or more dimensions')) + return np.split(ary, indices_or_sections, axis=0) + + return impl + + +@overload(np.hsplit) +def numpy_hsplit(ary, indices_or_sections): + if not isinstance(ary, types.Array): + msg = 'The argument "ary" must be an array' + raise errors.TypingError(msg) + + if not isinstance(indices_or_sections, (types.Integer, types.Array, + types.List, types.UniTuple)): + msg = ('The argument "indices_or_sections" must be int or 1d-array') + raise errors.TypingError(msg) + + def impl(ary, indices_or_sections): + if ary.ndim == 0: + raise ValueError(('hsplit only works on ' + 'arrays of 1 or more dimensions')) + if ary.ndim > 1: + return np.split(ary, indices_or_sections, axis=1) + return np.split(ary, indices_or_sections, axis=0) + + return impl + + +@overload(np.dsplit) +def numpy_dsplit(ary, indices_or_sections): + if not isinstance(ary, types.Array): + msg = 'The argument "ary" must be an array' + raise errors.TypingError(msg) + + if not isinstance(indices_or_sections, (types.Integer, types.Array, + types.List, types.UniTuple)): + msg = ('The argument "indices_or_sections" must be int or 1d-array') + raise errors.TypingError(msg) + + def impl(ary, indices_or_sections): + if ary.ndim < 3: + raise ValueError('dsplit only works on arrays of 3 or more ' + 'dimensions') + return np.split(ary, indices_or_sections, axis=2) + + return impl + + +# ----------------------------------------------------------------------------- +# Sorting + +_sorts = {} + + +def default_lt(a, b): + """ + Trivial comparison function between two keys. + """ + return a < b + + +def get_sort_func(kind, lt_impl, is_argsort=False): + """ + Get a sort implementation of the given kind. + """ + key = kind, lt_impl.__name__, is_argsort + + try: + return _sorts[key] + except KeyError: + if kind == 'quicksort': + sort = quicksort.make_jit_quicksort( + lt=lt_impl, + is_argsort=is_argsort, + is_np_array=True) + func = sort.run_quicksort + elif kind == 'mergesort': + sort = mergesort.make_jit_mergesort( + lt=lt_impl, + is_argsort=is_argsort) + func = sort.run_mergesort + _sorts[key] = func + return func + + +def lt_implementation(dtype): + if isinstance(dtype, types.Float): + return lt_floats + elif isinstance(dtype, types.Complex): + return lt_complex + else: + return default_lt + + +@lower_builtin("array.sort", types.Array) +def array_sort(context, builder, sig, args): + arytype = sig.args[0] + + sort_func = get_sort_func(kind='quicksort', + lt_impl=lt_implementation(arytype.dtype)) + + def array_sort_impl(arr): + # Note we clobber the return value + sort_func(arr) + + return context.compile_internal(builder, array_sort_impl, sig, args) + + +@overload(np.sort) +def impl_np_sort(a): + if not type_can_asarray(a): + raise errors.TypingError('Argument "a" must ' + 'be array-like') + + def np_sort_impl(a): + res = a.copy() + res.sort() + return res + return np_sort_impl + + +@lower_builtin("array.argsort", types.Array, types.StringLiteral) +@lower_builtin(np.argsort, types.Array, types.StringLiteral) +def array_argsort(context, builder, sig, args): + arytype, kind = sig.args + + sort_func = get_sort_func(kind=kind.literal_value, + lt_impl=lt_implementation(arytype.dtype), + is_argsort=True) + + def array_argsort_impl(arr): + return sort_func(arr) + + innersig = sig.replace(args=sig.args[:1]) + innerargs = args[:1] + return context.compile_internal(builder, array_argsort_impl, + innersig, innerargs) + + +# ------------------------------------------------------------------------------ +# Implicit cast + +@lower_cast(types.Array, types.Array) +def array_to_array(context, builder, fromty, toty, val): + # Type inference should have prevented illegal array casting. + assert fromty.mutable != toty.mutable or toty.layout == 'A' + return val + + +@lower_cast(types.Array, types.UnicodeCharSeq) +@lower_cast(types.Array, types.Float) +@lower_cast(types.Array, types.Integer) +@lower_cast(types.Array, types.Complex) +@lower_cast(types.Array, types.Boolean) +@lower_cast(types.Array, types.NPTimedelta) +@lower_cast(types.Array, types.NPDatetime) +def array0d_to_scalar(context, builder, fromty, toty, val): + def impl(a): + # a is an array(T, 0d, O), T is type, O is order + return a.take(0) + + sig = signature(toty, fromty) + res = context.compile_internal(builder, impl, sig, [val]) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_cast(types.Array, types.UnicodeCharSeq) +def array_to_unichrseq(context, builder, fromty, toty, val): + def impl(a): + return str(a[()]) + + sig = signature(toty, fromty) + res = context.compile_internal(builder, impl, sig, [val]) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +# ------------------------------------------------------------------------------ +# Stride tricks + +def reshape_unchecked(a, shape, strides): + """ + An intrinsic returning a derived array with the given shape and strides. + """ + raise NotImplementedError + + +@extending.type_callable(reshape_unchecked) +def type_reshape_unchecked(context): + def check_shape(shape): + return (isinstance(shape, types.BaseTuple) and + all(isinstance(v, types.Integer) for v in shape)) + + def typer(a, shape, strides): + if not isinstance(a, types.Array): + return + if not check_shape(shape) or not check_shape(strides): + return + if len(shape) != len(strides): + return + return a.copy(ndim=len(shape), layout='A') + + return typer + + +@lower_builtin(reshape_unchecked, types.Array, types.BaseTuple, types.BaseTuple) +def impl_shape_unchecked(context, builder, sig, args): + aryty = sig.args[0] + retty = sig.return_type + + ary = make_array(aryty)(context, builder, args[0]) + out = make_array(retty)(context, builder) + shape = cgutils.unpack_tuple(builder, args[1]) + strides = cgutils.unpack_tuple(builder, args[2]) + + populate_array(out, + data=ary.data, + shape=shape, + strides=strides, + itemsize=ary.itemsize, + meminfo=ary.meminfo, + ) + + res = out._getvalue() + return impl_ret_borrowed(context, builder, retty, res) + + +@extending.overload(np.lib.stride_tricks.as_strided) +def as_strided(x, shape=None, strides=None): + if shape in (None, types.none): + @register_jitable + def get_shape(x, shape): + return x.shape + else: + @register_jitable + def get_shape(x, shape): + return shape + + if strides in (None, types.none): + # When *strides* is not passed, as_strided() does a non-size-checking + # reshape(), possibly changing the original strides. This is too + # cumbersome to support right now, and a Web search shows all example + # use cases of as_strided() pass explicit *strides*. + raise errors.TypingError("as_strided() strides argument cannot be None") + else: + @register_jitable + def get_strides(x, strides): + return strides + + def as_strided_impl(x, shape=None, strides=None): + x = reshape_unchecked(x, get_shape(x, shape), get_strides(x, strides)) + return x + + return as_strided_impl + + +@extending.overload(np.lib.stride_tricks.sliding_window_view) +def sliding_window_view(x, window_shape, axis=None): + + # Window shape must be given as either an integer or tuple of integers. + # We also need to generate buffer tuples we can modify to contain the + # final shape and strides (reshape_unchecked does not accept lists). + if isinstance(window_shape, types.Integer): + shape_buffer = tuple(range(x.ndim + 1)) + stride_buffer = tuple(range(x.ndim + 1)) + + @register_jitable + def get_window_shape(window_shape): + return (window_shape,) + + elif (isinstance(window_shape, types.UniTuple) and + isinstance(window_shape.dtype, types.Integer)): + shape_buffer = tuple(range(x.ndim + len(window_shape))) + stride_buffer = tuple(range(x.ndim + len(window_shape))) + + @register_jitable + def get_window_shape(window_shape): + return window_shape + + else: + raise errors.TypingError( + "window_shape must be an integer or tuple of integers" + ) + + # Axis must be integer, tuple of integers, or None for all axes. + if is_nonelike(axis): + @register_jitable + def get_axis(window_shape, axis, ndim): + return list(range(ndim)) + + elif isinstance(axis, types.Integer): + @register_jitable + def get_axis(window_shape, axis, ndim): + return [ + normalize_axis("sliding_window_view", "axis", ndim, axis) + ] + + elif (isinstance(axis, types.UniTuple) and + isinstance(axis.dtype, types.Integer)): + @register_jitable + def get_axis(window_shape, axis, ndim): + return [normalize_axis("sliding_window_view", "axis", ndim, a) + for a in axis] + + else: + raise errors.TypingError( + "axis must be None, an integer or tuple of integers" + ) + + def sliding_window_view_impl(x, window_shape, axis=None): + window_shape = get_window_shape(window_shape) + axis = get_axis(window_shape, axis, x.ndim) + if len(window_shape) != len(axis): + raise ValueError( + "Must provide matching length window_shape and axis" + ) + + # Initialise view details with shape and strides of x. + out_shape = shape_buffer + out_strides = stride_buffer + for i in range(x.ndim): + out_shape = tuple_setitem(out_shape, i, x.shape[i]) + out_strides = tuple_setitem(out_strides, i, x.strides[i]) + + # Trim the dimensions being windowed and set the window shape and + # strides. Note: the same axis can be windowed repeatedly. + i = x.ndim + for ax, dim in zip(axis, window_shape): + if dim < 0: + raise ValueError( + "`window_shape` cannot contain negative values" + ) + if out_shape[ax] < dim: + raise ValueError( + "window_shape cannot be larger than input array shape" + ) + + trimmed = out_shape[ax] - dim + 1 + out_shape = tuple_setitem(out_shape, ax, trimmed) + out_shape = tuple_setitem(out_shape, i, dim) + out_strides = tuple_setitem(out_strides, i, x.strides[ax]) + i += 1 + + # The NumPy version calls as_strided, but our implementation of + # as_strided is effectively a wrapper for reshape_unchecked. + view = reshape_unchecked(x, out_shape, out_strides) + return view + + return sliding_window_view_impl + + +@overload(bool) +def ol_bool(arr): + if isinstance(arr, types.Array): + def impl(arr): + if arr.size == 0: + if numpy_version < (2, 2): + return False # this is deprecated + else: + raise ValueError(("The truth value of an empty array is " + "ambiguous. Use `array.size > 0` to " + "check that an array is not empty.")) + elif arr.size == 1: + return bool(arr.take(0)) + else: + raise ValueError(("The truth value of an array with more than" + " one element is ambiguous. Use a.any() or" + " a.all()")) + return impl + + +@overload(np.swapaxes) +def numpy_swapaxes(a, axis1, axis2): + if not isinstance(axis1, (int, types.Integer)): + raise errors.TypingError('The second argument "axis1" must be an ' + 'integer') + if not isinstance(axis2, (int, types.Integer)): + raise errors.TypingError('The third argument "axis2" must be an ' + 'integer') + if not isinstance(a, types.Array): + raise errors.TypingError('The first argument "a" must be an array') + + # create tuple list for transpose + ndim = a.ndim + axes_list = tuple(range(ndim)) + + def impl(a, axis1, axis2): + axis1 = normalize_axis("np.swapaxes", "axis1", ndim, axis1) + axis2 = normalize_axis("np.swapaxes", "axis2", ndim, axis2) + + # to ensure tuple_setitem support of negative values + if axis1 < 0: + axis1 += ndim + if axis2 < 0: + axis2 += ndim + + axes_tuple = tuple_setitem(axes_list, axis1, axis2) + axes_tuple = tuple_setitem(axes_tuple, axis2, axis1) + return np.transpose(a, axes_tuple) + + return impl + + +@register_jitable +def _take_along_axis_impl( + arr, indices, axis, Ni_orig, Nk_orig, indices_broadcast_shape +): + # Based on example code in + # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/shape_base.py#L90-L103 + # With addition of pre-broadcasting: + # https://github.com/numpy/numpy/issues/19704 + + # Wrap axis, it's used in tuple_setitem so must be (axis >= 0) to ensure + # the GEP is in bounds. + axis = normalize_axis("np.take_along_axis", "axis", arr.ndim, axis) + + # Broadcast the two arrays to matching shapes: + arr_shape = list(arr.shape) + arr_shape[axis] = 1 + for i, (d1, d2) in enumerate(zip(arr_shape, indices.shape)): + if d1 == 1: + new_val = d2 + elif d2 == 1: + new_val = d1 + else: + if d1 != d2: + raise ValueError( + "`arr` and `indices` dimensions don't match" + ) + new_val = d1 + indices_broadcast_shape = tuple_setitem( + indices_broadcast_shape, i, new_val + ) + arr_broadcast_shape = tuple_setitem( + indices_broadcast_shape, axis, arr.shape[axis] + ) + arr = np.broadcast_to(arr, arr_broadcast_shape) + indices = np.broadcast_to(indices, indices_broadcast_shape) + + Ni = Ni_orig + if len(Ni_orig) > 0: + for i in range(len(Ni)): + Ni = tuple_setitem(Ni, i, arr.shape[i]) + Nk = Nk_orig + if len(Nk_orig) > 0: + for i in range(len(Nk)): + Nk = tuple_setitem(Nk, i, arr.shape[axis + 1 + i]) + + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk, arr.dtype) + + np_s_ = (slice(None, None, None),) + + for ii in np.ndindex(Ni): + for kk in np.ndindex(Nk): + a_1d = arr[ii + np_s_ + kk] + indices_1d = indices[ii + np_s_ + kk] + out_1d = out[ii + np_s_ + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + return out + + +@overload(np.take_along_axis) +def arr_take_along_axis(arr, indices, axis): + if not isinstance(arr, types.Array): + raise errors.TypingError('The first argument "arr" must be an array') + if not isinstance(indices, types.Array): + raise errors.TypingError( + 'The second argument "indices" must be an array') + if not isinstance(indices.dtype, types.Integer): + raise errors.TypingError('The indices array must contain integers') + if is_nonelike(axis): + arr_ndim = 1 + else: + arr_ndim = arr.ndim + if arr_ndim != indices.ndim: + # Matches NumPy error: + raise errors.TypingError( + "`indices` and `arr` must have the same number of dimensions" + ) + + indices_broadcast_shape = tuple(range(indices.ndim)) + if is_nonelike(axis): + def take_along_axis_impl(arr, indices, axis): + return _take_along_axis_impl(arr.flatten(), indices, 0, (), (), + indices_broadcast_shape) + else: + check_is_integer(axis, "axis") + if not isinstance(axis, types.IntegerLiteral): + raise errors.NumbaValueError("axis must be a literal value") + axis = axis.literal_value + if axis < 0: + axis = arr.ndim + axis + + if axis < 0 or axis >= arr.ndim: + raise errors.NumbaValueError("axis is out of bounds") + + Ni = tuple(range(axis)) + Nk = tuple(range(axis + 1, arr.ndim)) + + def take_along_axis_impl(arr, indices, axis): + return _take_along_axis_impl(arr, indices, axis, Ni, Nk, + indices_broadcast_shape) + return take_along_axis_impl + + +@overload(np.nan_to_num) +def nan_to_num_impl(x, copy=True, nan=0.0): + if isinstance(x, types.Number): + if isinstance(x, types.Integer): + # Integers do not have nans or infs + def impl(x, copy=True, nan=0.0): + return x + + elif isinstance(x, types.Float): + def impl(x, copy=True, nan=0.0): + if np.isnan(x): + return nan + elif np.isneginf(x): + return np.finfo(type(x)).min + elif np.isposinf(x): + return np.finfo(type(x)).max + return x + elif isinstance(x, types.Complex): + def impl(x, copy=True, nan=0.0): + r = np.nan_to_num(x.real, nan=nan) + c = np.nan_to_num(x.imag, nan=nan) + return complex(r, c) + else: + raise errors.TypingError( + "Only Integer, Float, and Complex values are accepted" + ) + + elif type_can_asarray(x): + if isinstance(x.dtype, types.Integer): + # Integers do not have nans or infs + def impl(x, copy=True, nan=0.0): + return x + elif isinstance(x.dtype, types.Float): + def impl(x, copy=True, nan=0.0): + min_inf = np.finfo(x.dtype).min + max_inf = np.finfo(x.dtype).max + + x_ = np.asarray(x) + output = np.copy(x_) if copy else x_ + + output_flat = output.flat + for i in range(output.size): + if np.isnan(output_flat[i]): + output_flat[i] = nan + elif np.isneginf(output_flat[i]): + output_flat[i] = min_inf + elif np.isposinf(output_flat[i]): + output_flat[i] = max_inf + return output + elif isinstance(x.dtype, types.Complex): + def impl(x, copy=True, nan=0.0): + x_ = np.asarray(x) + output = np.copy(x_) if copy else x_ + + np.nan_to_num(output.real, copy=False, nan=nan) + np.nan_to_num(output.imag, copy=False, nan=nan) + return output + else: + raise errors.TypingError( + "Only Integer, Float, and Complex values are accepted" + ) + else: + raise errors.TypingError("The first argument must be a scalar or an " + "array-like") + return impl diff --git a/venv/lib/python3.10/site-packages/numba/np/extensions.py b/venv/lib/python3.10/site-packages/numba/np/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..e0051a1fc18a24df57dfc337838c37d170b22521 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/extensions.py @@ -0,0 +1,10 @@ +""" +NumPy extensions. +""" + +from numba.np.arraymath import cross2d + + +__all__ = [ + 'cross2d' +] diff --git a/venv/lib/python3.10/site-packages/numba/np/linalg.py b/venv/lib/python3.10/site-packages/numba/np/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..b25b2c6110c9a756eb9438b8daa49f51f9f0497d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/linalg.py @@ -0,0 +1,2853 @@ +""" +Implementation of linear algebra operations. +""" + + +import contextlib +import warnings + +from llvmlite import ir + +import numpy as np +import operator + +from numba.core.imputils import (lower_builtin, impl_ret_borrowed, + impl_ret_new_ref, impl_ret_untracked) +from numba.core.typing import signature +from numba.core.extending import intrinsic, overload, register_jitable +from numba.core import types, cgutils, config +from numba.core.errors import TypingError, NumbaTypeError, \ + NumbaPerformanceWarning +from .arrayobj import make_array, _empty_nd_impl, array_copy +from numba.np import numpy_support as np_support + +ll_char = ir.IntType(8) +ll_char_p = ll_char.as_pointer() +ll_void_p = ll_char_p +ll_intc = ir.IntType(32) +ll_intc_p = ll_intc.as_pointer() +intp_t = cgutils.intp_t +ll_intp_p = intp_t.as_pointer() + + +# fortran int type, this needs to match the F_INT C declaration in +# _lapack.c and is present to accommodate potential future 64bit int +# based LAPACK use. +F_INT_nptype = np.int32 +if config.USE_LEGACY_TYPE_SYSTEM: + F_INT_nbtype = types.int32 + + # BLAS kinds as letters + _blas_kinds = { + types.float32: 's', + types.float64: 'd', + types.complex64: 'c', + types.complex128: 'z', + } +else: + F_INT_nbtype = types.np_int32 + + # BLAS kinds as letters + _blas_kinds = { + types.np_float32: 's', + types.np_float64: 'd', + types.np_complex64: 'c', + types.np_complex128: 'z', + } + + +def get_blas_kind(dtype, func_name=""): + kind = _blas_kinds.get(dtype) + if kind is None: + raise NumbaTypeError("unsupported dtype for %s()" % (func_name,)) + return kind + + +def ensure_blas(): + try: + import scipy.linalg.cython_blas + except ImportError: + raise ImportError("scipy 0.16+ is required for linear algebra") + + +def ensure_lapack(): + try: + import scipy.linalg.cython_lapack + except ImportError: + raise ImportError("scipy 0.16+ is required for linear algebra") + + +def make_constant_slot(context, builder, ty, val): + const = context.get_constant_generic(builder, ty, val) + return cgutils.alloca_once_value(builder, const) + + +class _BLAS: + """ + Functions to return type signatures for wrapped + BLAS functions. + """ + + def __init__(self): + ensure_blas() + + @classmethod + def numba_xxnrm2(cls, dtype): + rtype = getattr(dtype, "underlying_float", dtype) + sig = types.intc(types.char, # kind + types.intp, # n + types.CPointer(dtype), # x + types.intp, # incx + types.CPointer(rtype)) # returned + + return types.ExternalFunction("numba_xxnrm2", sig) + + @classmethod + def numba_xxgemm(cls, dtype): + sig = types.intc( + types.char, # kind + types.char, # transa + types.char, # transb + types.intp, # m + types.intp, # n + types.intp, # k + types.CPointer(dtype), # alpha + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(dtype), # b + types.intp, # ldb + types.CPointer(dtype), # beta + types.CPointer(dtype), # c + types.intp # ldc + ) + return types.ExternalFunction("numba_xxgemm", sig) + + +class _LAPACK: + """ + Functions to return type signatures for wrapped + LAPACK functions. + """ + + def __init__(self): + ensure_lapack() + + @classmethod + def numba_xxgetrf(cls, dtype): + sig = types.intc(types.char, # kind + types.intp, # m + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(F_INT_nbtype) # ipiv + ) + return types.ExternalFunction("numba_xxgetrf", sig) + + @classmethod + def numba_ez_xxgetri(cls, dtype): + sig = types.intc(types.char, # kind + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(F_INT_nbtype) # ipiv + ) + return types.ExternalFunction("numba_ez_xxgetri", sig) + + @classmethod + def numba_ez_rgeev(cls, dtype): + sig = types.intc(types.char, # kind + types.char, # jobvl + types.char, # jobvr + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(dtype), # wr + types.CPointer(dtype), # wi + types.CPointer(dtype), # vl + types.intp, # ldvl + types.CPointer(dtype), # vr + types.intp # ldvr + ) + return types.ExternalFunction("numba_ez_rgeev", sig) + + @classmethod + def numba_ez_cgeev(cls, dtype): + sig = types.intc(types.char, # kind + types.char, # jobvl + types.char, # jobvr + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(dtype), # w + types.CPointer(dtype), # vl + types.intp, # ldvl + types.CPointer(dtype), # vr + types.intp # ldvr + ) + return types.ExternalFunction("numba_ez_cgeev", sig) + + @classmethod + def numba_ez_xxxevd(cls, dtype): + wtype = getattr(dtype, "underlying_float", dtype) + sig = types.intc(types.char, # kind + types.char, # jobz + types.char, # uplo + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(wtype), # w + ) + return types.ExternalFunction("numba_ez_xxxevd", sig) + + @classmethod + def numba_xxpotrf(cls, dtype): + sig = types.intc(types.char, # kind + types.char, # uplo + types.intp, # n + types.CPointer(dtype), # a + types.intp # lda + ) + return types.ExternalFunction("numba_xxpotrf", sig) + + @classmethod + def numba_ez_gesdd(cls, dtype): + stype = getattr(dtype, "underlying_float", dtype) + sig = types.intc( + types.char, # kind + types.char, # jobz + types.intp, # m + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(stype), # s + types.CPointer(dtype), # u + types.intp, # ldu + types.CPointer(dtype), # vt + types.intp # ldvt + ) + + return types.ExternalFunction("numba_ez_gesdd", sig) + + @classmethod + def numba_ez_geqrf(cls, dtype): + sig = types.intc( + types.char, # kind + types.intp, # m + types.intp, # n + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(dtype), # tau + ) + return types.ExternalFunction("numba_ez_geqrf", sig) + + @classmethod + def numba_ez_xxgqr(cls, dtype): + sig = types.intc( + types.char, # kind + types.intp, # m + types.intp, # n + types.intp, # k + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(dtype), # tau + ) + return types.ExternalFunction("numba_ez_xxgqr", sig) + + @classmethod + def numba_ez_gelsd(cls, dtype): + rtype = getattr(dtype, "underlying_float", dtype) + sig = types.intc( + types.char, # kind + types.intp, # m + types.intp, # n + types.intp, # nrhs + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(dtype), # b + types.intp, # ldb + types.CPointer(rtype), # S + types.float64, # rcond + types.CPointer(types.intc) # rank + ) + return types.ExternalFunction("numba_ez_gelsd", sig) + + @classmethod + def numba_xgesv(cls, dtype): + sig = types.intc( + types.char, # kind + types.intp, # n + types.intp, # nhrs + types.CPointer(dtype), # a + types.intp, # lda + types.CPointer(F_INT_nbtype), # ipiv + types.CPointer(dtype), # b + types.intp # ldb + ) + return types.ExternalFunction("numba_xgesv", sig) + + +@contextlib.contextmanager +def make_contiguous(context, builder, sig, args): + """ + Ensure that all array arguments are contiguous, if necessary by + copying them. + A new (sig, args) tuple is yielded. + """ + newtys = [] + newargs = [] + copies = [] + for ty, val in zip(sig.args, args): + if not isinstance(ty, types.Array) or ty.layout in 'CF': + newty, newval = ty, val + else: + newty = ty.copy(layout='C') + copysig = signature(newty, ty) + newval = array_copy(context, builder, copysig, (val,)) + copies.append((newty, newval)) + newtys.append(newty) + newargs.append(newval) + yield signature(sig.return_type, *newtys), tuple(newargs) + for ty, val in copies: + context.nrt.decref(builder, ty, val) + + +def check_c_int(context, builder, n): + """ + Check whether *n* fits in a C `int`. + """ + _maxint = 2**31 - 1 + + def impl(n): + if n > _maxint: + raise OverflowError("array size too large to fit in C int") + + context.compile_internal(builder, impl, + signature(types.none, types.intp), (n,)) + + +def check_blas_return(context, builder, res): + """ + Check the integer error return from one of the BLAS wrappers in + _helperlib.c. + """ + with builder.if_then(cgutils.is_not_null(builder, res), likely=False): + # Those errors shouldn't happen, it's easier to just abort the process + pyapi = context.get_python_api(builder) + pyapi.gil_ensure() + pyapi.fatal_error("BLAS wrapper returned with an error") + + +def check_lapack_return(context, builder, res): + """ + Check the integer error return from one of the LAPACK wrappers in + _helperlib.c. + """ + with builder.if_then(cgutils.is_not_null(builder, res), likely=False): + # Those errors shouldn't happen, it's easier to just abort the process + pyapi = context.get_python_api(builder) + pyapi.gil_ensure() + pyapi.fatal_error("LAPACK wrapper returned with an error") + + +def call_xxdot(context, builder, conjugate, dtype, + n, a_data, b_data, out_data): + """ + Call the BLAS vector * vector product function for the given arguments. + """ + fnty = ir.FunctionType(ir.IntType(32), + [ll_char, ll_char, intp_t, # kind, conjugate, n + ll_void_p, ll_void_p, ll_void_p, # a, b, out + ]) + fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_xxdot") + + kind = get_blas_kind(dtype) + kind_val = ir.Constant(ll_char, ord(kind)) + conjugate = ir.Constant(ll_char, int(conjugate)) + + res = builder.call(fn, (kind_val, conjugate, n, + builder.bitcast(a_data, ll_void_p), + builder.bitcast(b_data, ll_void_p), + builder.bitcast(out_data, ll_void_p))) + check_blas_return(context, builder, res) + + +def call_xxgemv(context, builder, do_trans, + m_type, m_shapes, m_data, v_data, out_data): + """ + Call the BLAS matrix * vector product function for the given arguments. + """ + fnty = ir.FunctionType(ir.IntType(32), + [ll_char, ll_char, # kind, trans + intp_t, intp_t, # m, n + ll_void_p, ll_void_p, intp_t, # alpha, a, lda + ll_void_p, ll_void_p, ll_void_p, # x, beta, y + ]) + fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_xxgemv") + + dtype = m_type.dtype + alpha = make_constant_slot(context, builder, dtype, 1.0) + beta = make_constant_slot(context, builder, dtype, 0.0) + + if m_type.layout == 'F': + m, n = m_shapes + lda = m_shapes[0] + else: + n, m = m_shapes + lda = m_shapes[1] + + kind = get_blas_kind(dtype) + kind_val = ir.Constant(ll_char, ord(kind)) + trans = ir.Constant(ll_char, ord('t') if do_trans else ord('n')) + + res = builder.call(fn, (kind_val, trans, m, n, + builder.bitcast(alpha, ll_void_p), + builder.bitcast(m_data, ll_void_p), lda, + builder.bitcast(v_data, ll_void_p), + builder.bitcast(beta, ll_void_p), + builder.bitcast(out_data, ll_void_p))) + check_blas_return(context, builder, res) + + +def call_xxgemm(context, builder, + x_type, x_shapes, x_data, + y_type, y_shapes, y_data, + out_type, out_shapes, out_data): + """ + Call the BLAS matrix * matrix product function for the given arguments. + """ + fnty = ir.FunctionType(ir.IntType(32), + [ll_char, # kind + ll_char, ll_char, # transa, transb + intp_t, intp_t, intp_t, # m, n, k + ll_void_p, ll_void_p, intp_t, # alpha, a, lda + ll_void_p, intp_t, ll_void_p, # b, ldb, beta + ll_void_p, intp_t, # c, ldc + ]) + fn = cgutils.get_or_insert_function(builder.module, fnty, "numba_xxgemm") + + m, k = x_shapes + _k, n = y_shapes + dtype = x_type.dtype + alpha = make_constant_slot(context, builder, dtype, 1.0) + beta = make_constant_slot(context, builder, dtype, 0.0) + + trans = ir.Constant(ll_char, ord('t')) + notrans = ir.Constant(ll_char, ord('n')) + + def get_array_param(ty, shapes, data): + return ( + # Transpose if layout different from result's + notrans if ty.layout == out_type.layout else trans, + # Size of the inner dimension in physical array order + shapes[1] if ty.layout == 'C' else shapes[0], + # The data pointer, unit-less + builder.bitcast(data, ll_void_p), + ) + + transa, lda, data_a = get_array_param(y_type, y_shapes, y_data) + transb, ldb, data_b = get_array_param(x_type, x_shapes, x_data) + _, ldc, data_c = get_array_param(out_type, out_shapes, out_data) + + kind = get_blas_kind(dtype) + kind_val = ir.Constant(ll_char, ord(kind)) + + res = builder.call(fn, (kind_val, transa, transb, n, m, k, + builder.bitcast(alpha, ll_void_p), data_a, lda, + data_b, ldb, builder.bitcast(beta, ll_void_p), + data_c, ldc)) + check_blas_return(context, builder, res) + + +def dot_2_mm(context, builder, sig, args): + """ + np.dot(matrix, matrix) + """ + def dot_impl(a, b): + m, k = a.shape + _k, n = b.shape + if k == 0: + return np.zeros((m, n), a.dtype) + out = np.empty((m, n), a.dtype) + return np.dot(a, b, out) + + res = context.compile_internal(builder, dot_impl, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +def dot_2_vm(context, builder, sig, args): + """ + np.dot(vector, matrix) + """ + def dot_impl(a, b): + m, = a.shape + _m, n = b.shape + if m == 0: + return np.zeros((n, ), a.dtype) + out = np.empty((n, ), a.dtype) + return np.dot(a, b, out) + + res = context.compile_internal(builder, dot_impl, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +def dot_2_mv(context, builder, sig, args): + """ + np.dot(matrix, vector) + """ + def dot_impl(a, b): + m, n = a.shape + _n, = b.shape + if n == 0: + return np.zeros((m, ), a.dtype) + out = np.empty((m, ), a.dtype) + return np.dot(a, b, out) + + res = context.compile_internal(builder, dot_impl, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +def dot_2_vv(context, builder, sig, args, conjugate=False): + """ + np.dot(vector, vector) + np.vdot(vector, vector) + """ + aty, bty = sig.args + dtype = sig.return_type + a = make_array(aty)(context, builder, args[0]) + b = make_array(bty)(context, builder, args[1]) + n, = cgutils.unpack_tuple(builder, a.shape) + + def check_args(a, b): + m, = a.shape + n, = b.shape + if m != n: + raise ValueError("incompatible array sizes for np.dot(a, b) " + "(vector * vector)") + + context.compile_internal(builder, check_args, + signature(types.none, *sig.args), args) + check_c_int(context, builder, n) + + out = cgutils.alloca_once(builder, context.get_value_type(dtype)) + call_xxdot(context, builder, conjugate, dtype, n, a.data, b.data, out) + return builder.load(out) + + +@overload(np.dot) +def dot_2(left, right): + """ + np.dot(a, b) + """ + return dot_2_impl('np.dot()', left, right) + + +@overload(operator.matmul) +def matmul_2(left, right): + """ + a @ b + """ + return dot_2_impl("'@'", left, right) + + +def dot_2_impl(name, left, right): + if isinstance(left, types.Array) and isinstance(right, types.Array): + @intrinsic + def _impl(typingcontext, left, right): + ndims = (left.ndim, right.ndim) + + def _dot2_codegen(context, builder, sig, args): + ensure_blas() + + with make_contiguous(context, builder, sig, args) as (sig, args): + if ndims == (2, 2): + return dot_2_mm(context, builder, sig, args) + elif ndims == (2, 1): + return dot_2_mv(context, builder, sig, args) + elif ndims == (1, 2): + return dot_2_vm(context, builder, sig, args) + elif ndims == (1, 1): + return dot_2_vv(context, builder, sig, args) + else: + raise AssertionError('unreachable') + + if left.dtype != right.dtype: + raise TypingError( + "%s arguments must all have the same dtype" % name) + + if ndims == (2, 2): + return_type = types.Array(left.dtype, 2, 'C') + elif ndims == (2, 1) or ndims == (1, 2): + return_type = types.Array(left.dtype, 1, 'C') + elif ndims == (1, 1): + return_type = left.dtype + else: + raise TypingError(("%s: inputs must have compatible " + "dimensions") % name) + return signature(return_type, left, right), _dot2_codegen + + if left.layout not in 'CF' or right.layout not in 'CF': + warnings.warn( + "%s is faster on contiguous arrays, called on %s" % ( + name, (left, right),), NumbaPerformanceWarning) + + return lambda left, right: _impl(left, right) + + +@overload(np.vdot) +def vdot(left, right): + """ + np.vdot(a, b) + """ + if isinstance(left, types.Array) and isinstance(right, types.Array): + @intrinsic + def _impl(typingcontext, left, right): + def codegen(context, builder, sig, args): + ensure_blas() + + with make_contiguous(context, builder, sig, args) as\ + (sig, args): + return dot_2_vv(context, builder, sig, args, conjugate=True) + + if left.ndim != 1 or right.ndim != 1: + raise TypingError("np.vdot() only supported on 1-D arrays") + + if left.dtype != right.dtype: + raise TypingError( + "np.vdot() arguments must all have the same dtype") + return signature(left.dtype, left, right), codegen + + if left.layout not in 'CF' or right.layout not in 'CF': + warnings.warn( + "np.vdot() is faster on contiguous arrays, called on %s" + % ((left, right),), NumbaPerformanceWarning) + + return lambda left, right: _impl(left, right) + + +def dot_3_vm_check_args(a, b, out): + m, = a.shape + _m, n = b.shape + if m != _m: + raise ValueError("incompatible array sizes for " + "np.dot(a, b) (vector * matrix)") + if out.shape != (n,): + raise ValueError("incompatible output array size for " + "np.dot(a, b, out) (vector * matrix)") + + +def dot_3_mv_check_args(a, b, out): + m, _n = a.shape + n, = b.shape + if n != _n: + raise ValueError("incompatible array sizes for np.dot(a, b) " + "(matrix * vector)") + if out.shape != (m,): + raise ValueError("incompatible output array size for " + "np.dot(a, b, out) (matrix * vector)") + + +def dot_3_vm(context, builder, sig, args): + """ + np.dot(vector, matrix, out) + np.dot(matrix, vector, out) + """ + xty, yty, outty = sig.args + assert outty == sig.return_type + dtype = xty.dtype + + x = make_array(xty)(context, builder, args[0]) + y = make_array(yty)(context, builder, args[1]) + out = make_array(outty)(context, builder, args[2]) + x_shapes = cgutils.unpack_tuple(builder, x.shape) + y_shapes = cgutils.unpack_tuple(builder, y.shape) + out_shapes = cgutils.unpack_tuple(builder, out.shape) + if xty.ndim < yty.ndim: + # Vector * matrix + # Asked for x * y, we will compute y.T * x + mty = yty + m_shapes = y_shapes + v_shape = x_shapes[0] + lda = m_shapes[1] + do_trans = yty.layout == 'F' + m_data, v_data = y.data, x.data + check_args = dot_3_vm_check_args + else: + # Matrix * vector + # We will compute x * y + mty = xty + m_shapes = x_shapes + v_shape = y_shapes[0] + lda = m_shapes[0] + do_trans = xty.layout == 'C' + m_data, v_data = x.data, y.data + check_args = dot_3_mv_check_args + + context.compile_internal(builder, check_args, + signature(types.none, *sig.args), args) + for val in m_shapes: + check_c_int(context, builder, val) + + zero = context.get_constant(types.intp, 0) + both_empty = builder.icmp_signed('==', v_shape, zero) + matrix_empty = builder.icmp_signed('==', lda, zero) + is_empty = builder.or_(both_empty, matrix_empty) + with builder.if_else(is_empty, likely=False) as (empty, nonempty): + with empty: + cgutils.memset(builder, out.data, + builder.mul(out.itemsize, out.nitems), 0) + with nonempty: + call_xxgemv(context, builder, do_trans, mty, m_shapes, m_data, + v_data, out.data) + + return impl_ret_borrowed(context, builder, sig.return_type, + out._getvalue()) + + +def dot_3_mm(context, builder, sig, args): + """ + np.dot(matrix, matrix, out) + """ + xty, yty, outty = sig.args + assert outty == sig.return_type + dtype = xty.dtype + + x = make_array(xty)(context, builder, args[0]) + y = make_array(yty)(context, builder, args[1]) + out = make_array(outty)(context, builder, args[2]) + x_shapes = cgutils.unpack_tuple(builder, x.shape) + y_shapes = cgutils.unpack_tuple(builder, y.shape) + out_shapes = cgutils.unpack_tuple(builder, out.shape) + m, k = x_shapes + _k, n = y_shapes + + # The only case Numpy supports + assert outty.layout == 'C' + + def check_args(a, b, out): + m, k = a.shape + _k, n = b.shape + if k != _k: + raise ValueError("incompatible array sizes for np.dot(a, b) " + "(matrix * matrix)") + if out.shape != (m, n): + raise ValueError("incompatible output array size for " + "np.dot(a, b, out) (matrix * matrix)") + + context.compile_internal(builder, check_args, + signature(types.none, *sig.args), args) + + check_c_int(context, builder, m) + check_c_int(context, builder, k) + check_c_int(context, builder, n) + + x_data = x.data + y_data = y.data + out_data = out.data + + # If eliminated dimension is zero, set all entries to zero and return + zero = context.get_constant(types.intp, 0) + both_empty = builder.icmp_signed('==', k, zero) + x_empty = builder.icmp_signed('==', m, zero) + y_empty = builder.icmp_signed('==', n, zero) + is_empty = builder.or_(both_empty, builder.or_(x_empty, y_empty)) + with builder.if_else(is_empty, likely=False) as (empty, nonempty): + with empty: + cgutils.memset(builder, out.data, + builder.mul(out.itemsize, out.nitems), 0) + with nonempty: + # Check if any of the operands is really a 1-d vector represented + # as a (1, k) or (k, 1) 2-d array. In those cases, it is pessimal + # to call the generic matrix * matrix product BLAS function. + one = context.get_constant(types.intp, 1) + is_left_vec = builder.icmp_signed('==', m, one) + is_right_vec = builder.icmp_signed('==', n, one) + + with builder.if_else(is_right_vec) as (r_vec, r_mat): + with r_vec: + with builder.if_else(is_left_vec) as (v_v, m_v): + with v_v: + # V * V + call_xxdot(context, builder, False, dtype, + k, x_data, y_data, out_data) + with m_v: + # M * V + do_trans = xty.layout == outty.layout + call_xxgemv(context, builder, do_trans, + xty, x_shapes, x_data, y_data, out_data) + with r_mat: + with builder.if_else(is_left_vec) as (v_m, m_m): + with v_m: + # V * M + do_trans = yty.layout != outty.layout + call_xxgemv(context, builder, do_trans, + yty, y_shapes, y_data, x_data, out_data) + with m_m: + # M * M + call_xxgemm(context, builder, + xty, x_shapes, x_data, + yty, y_shapes, y_data, + outty, out_shapes, out_data) + + return impl_ret_borrowed(context, builder, sig.return_type, + out._getvalue()) + + +@overload(np.dot) +def dot_3(left, right, out): + """ + np.dot(a, b, out) + """ + if (isinstance(left, types.Array) and isinstance(right, types.Array) and + isinstance(out, types.Array)): + @intrinsic + def _impl(typingcontext, left, right, out): + def codegen(context, builder, sig, args): + ensure_blas() + + with make_contiguous(context, builder, sig, args) as (sig, + args): + ndims = set(x.ndim for x in sig.args[:2]) + if ndims == {2}: + return dot_3_mm(context, builder, sig, args) + elif ndims == {1, 2}: + return dot_3_vm(context, builder, sig, args) + else: + raise AssertionError('unreachable') + if left.dtype != right.dtype or left.dtype != out.dtype: + raise TypingError( + "np.dot() arguments must all have the same dtype") + + return signature(out, left, right, out), codegen + + if left.layout not in 'CF' or right.layout not in 'CF' or out.layout\ + not in 'CF': + warnings.warn( + "np.vdot() is faster on contiguous arrays, called on %s" + % ((left, right),), NumbaPerformanceWarning) + + return lambda left, right, out: _impl(left, right, out) + + +if config.USE_LEGACY_TYPE_SYSTEM: + fatal_error_func = types.ExternalFunction("numba_fatal_error", types.intc()) +else: + fatal_error_func = types.ExternalFunction("numba_fatal_error", types.c_intp()) + + +@register_jitable +def _check_finite_matrix(a): + for v in np.nditer(a): + if not np.isfinite(v.item()): + raise np.linalg.LinAlgError( + "Array must not contain infs or NaNs.") + + +def _check_linalg_matrix(a, func_name, la_prefix=True): + # la_prefix is present as some functions, e.g. np.trace() + # are documented under "linear algebra" but aren't in the + # module + prefix = "np.linalg" if la_prefix else "np" + interp = (prefix, func_name) + # Unpack optional type + if isinstance(a, types.Optional): + a = a.type + if not isinstance(a, types.Array): + msg = "%s.%s() only supported for array types" % interp + raise TypingError(msg, highlighting=False) + if not a.ndim == 2: + msg = "%s.%s() only supported on 2-D arrays." % interp + raise TypingError(msg, highlighting=False) + if not isinstance(a.dtype, (types.Float, types.Complex)): + msg = "%s.%s() only supported on "\ + "float and complex arrays." % interp + raise TypingError(msg, highlighting=False) + + +def _check_homogeneous_types(func_name, *types): + t0 = types[0].dtype + for t in types[1:]: + if t.dtype != t0: + msg = "np.linalg.%s() only supports inputs that have homogeneous dtypes." % func_name + raise TypingError(msg, highlighting=False) + + +def _copy_to_fortran_order(): + pass + + +@overload(_copy_to_fortran_order) +def ol_copy_to_fortran_order(a): + # This function copies the array 'a' into a new array with fortran order. + # This exists because the copy routines don't take order flags yet. + F_layout = a.layout == 'F' + A_layout = a.layout == 'A' + def impl(a): + if F_layout: + # it's F ordered at compile time, just copy + acpy = np.copy(a) + elif A_layout: + # decide based on runtime value + flag_f = a.flags.f_contiguous + if flag_f: + # it's already F ordered, so copy but in a round about way to + # ensure that the copy is also F ordered + acpy = np.copy(a.T).T + else: + # it's something else ordered, so let asfortranarray deal with + # copying and making it fortran ordered + acpy = np.asfortranarray(a) + else: + # it's C ordered at compile time, asfortranarray it. + acpy = np.asfortranarray(a) + return acpy + return impl + + +@register_jitable +def _inv_err_handler(r): + if r != 0: + if r < 0: + fatal_error_func() + assert 0 # unreachable + if r > 0: + raise np.linalg.LinAlgError( + "Matrix is singular to machine precision.") + +@register_jitable +def _dummy_liveness_func(a): + """pass a list of variables to be preserved through dead code elimination""" + return a[0] + + +@overload(np.linalg.inv) +def inv_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "inv") + + numba_xxgetrf = _LAPACK().numba_xxgetrf(a.dtype) + + numba_xxgetri = _LAPACK().numba_ez_xxgetri(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "inv")) + + def inv_impl(a): + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + if n == 0: + return acpy + + ipiv = np.empty(n, dtype=F_INT_nptype) + + r = numba_xxgetrf(kind, n, n, acpy.ctypes, n, ipiv.ctypes) + _inv_err_handler(r) + + r = numba_xxgetri(kind, n, acpy.ctypes, n, ipiv.ctypes) + _inv_err_handler(r) + + # help liveness analysis + _dummy_liveness_func([acpy.size, ipiv.size]) + return acpy + + return inv_impl + + +@register_jitable +def _handle_err_maybe_convergence_problem(r): + if r != 0: + if r < 0: + fatal_error_func() + assert 0 # unreachable + if r > 0: + raise ValueError("Internal algorithm failed to converge.") + + +def _check_linalg_1_or_2d_matrix(a, func_name, la_prefix=True): + # la_prefix is present as some functions, e.g. np.trace() + # are documented under "linear algebra" but aren't in the + # module + prefix = "np.linalg" if la_prefix else "np" + interp = (prefix, func_name) + # checks that a matrix is 1 or 2D + if not isinstance(a, types.Array): + raise TypingError("%s.%s() only supported for array types " + % interp) + if not a.ndim <= 2: + raise TypingError("%s.%s() only supported on 1 and 2-D arrays " + % interp) + if not isinstance(a.dtype, (types.Float, types.Complex)): + raise TypingError("%s.%s() only supported on " + "float and complex arrays." % interp) + + +@overload(np.linalg.cholesky) +def cho_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "cholesky") + + numba_xxpotrf = _LAPACK().numba_xxpotrf(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "cholesky")) + UP = ord('U') + LO = ord('L') + + def cho_impl(a): + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + # The output is allocated in C order + out = a.copy() + + if n == 0: + return out + + # Pass UP since xxpotrf() operates in F order + # The semantics ensure this works fine + # (out is really its Hermitian in F order, but UP instructs + # xxpotrf to compute the Hermitian of the upper triangle + # => they cancel each other) + r = numba_xxpotrf(kind, UP, n, out.ctypes, n) + if r != 0: + if r < 0: + fatal_error_func() + assert 0 # unreachable + if r > 0: + raise np.linalg.LinAlgError( + "Matrix is not positive definite.") + # Zero out upper triangle, in F order + for col in range(n): + out[:col, col] = 0 + return out + + return cho_impl + +@overload(np.linalg.eig) +def eig_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "eig") + + numba_ez_rgeev = _LAPACK().numba_ez_rgeev(a.dtype) + numba_ez_cgeev = _LAPACK().numba_ez_cgeev(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "eig")) + + JOBVL = ord('N') + JOBVR = ord('V') + + def real_eig_impl(a): + """ + eig() implementation for real arrays. + """ + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + ldvl = 1 + ldvr = n + wr = np.empty(n, dtype=a.dtype) + wi = np.empty(n, dtype=a.dtype) + vl = np.empty((n, ldvl), dtype=a.dtype) + vr = np.empty((n, ldvr), dtype=a.dtype) + + if n == 0: + return (wr, vr.T) + + r = numba_ez_rgeev(kind, + JOBVL, + JOBVR, + n, + acpy.ctypes, + n, + wr.ctypes, + wi.ctypes, + vl.ctypes, + ldvl, + vr.ctypes, + ldvr) + _handle_err_maybe_convergence_problem(r) + + # By design numba does not support dynamic return types, however, + # Numpy does. Numpy uses this ability in the case of returning + # eigenvalues/vectors of a real matrix. The return type of + # np.linalg.eig(), when operating on a matrix in real space + # depends on the values present in the matrix itself (recalling + # that eigenvalues are the roots of the characteristic polynomial + # of the system matrix, which will by construction depend on the + # values present in the system matrix). As numba cannot handle + # the case of a runtime decision based domain change relative to + # the input type, if it is required numba raises as below. + if np.any(wi): + raise ValueError( + "eig() argument must not cause a domain change.") + + # put these in to help with liveness analysis, + # `.ctypes` doesn't keep the vars alive + _dummy_liveness_func([acpy.size, vl.size, vr.size, wr.size, wi.size]) + return (wr, vr.T) + + def cmplx_eig_impl(a): + """ + eig() implementation for complex arrays. + """ + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + ldvl = 1 + ldvr = n + w = np.empty(n, dtype=a.dtype) + vl = np.empty((n, ldvl), dtype=a.dtype) + vr = np.empty((n, ldvr), dtype=a.dtype) + + if n == 0: + return (w, vr.T) + + r = numba_ez_cgeev(kind, + JOBVL, + JOBVR, + n, + acpy.ctypes, + n, + w.ctypes, + vl.ctypes, + ldvl, + vr.ctypes, + ldvr) + _handle_err_maybe_convergence_problem(r) + + # put these in to help with liveness analysis, + # `.ctypes` doesn't keep the vars alive + _dummy_liveness_func([acpy.size, vl.size, vr.size, w.size]) + return (w, vr.T) + + if isinstance(a.dtype, types.scalars.Complex): + return cmplx_eig_impl + else: + return real_eig_impl + +@overload(np.linalg.eigvals) +def eigvals_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "eigvals") + + numba_ez_rgeev = _LAPACK().numba_ez_rgeev(a.dtype) + numba_ez_cgeev = _LAPACK().numba_ez_cgeev(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "eigvals")) + + JOBVL = ord('N') + JOBVR = ord('N') + + def real_eigvals_impl(a): + """ + eigvals() implementation for real arrays. + """ + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + ldvl = 1 + ldvr = 1 + wr = np.empty(n, dtype=a.dtype) + + if n == 0: + return wr + + wi = np.empty(n, dtype=a.dtype) + + # not referenced but need setting for MKL null check + vl = np.empty((1), dtype=a.dtype) + vr = np.empty((1), dtype=a.dtype) + + r = numba_ez_rgeev(kind, + JOBVL, + JOBVR, + n, + acpy.ctypes, + n, + wr.ctypes, + wi.ctypes, + vl.ctypes, + ldvl, + vr.ctypes, + ldvr) + _handle_err_maybe_convergence_problem(r) + + # By design numba does not support dynamic return types, however, + # Numpy does. Numpy uses this ability in the case of returning + # eigenvalues/vectors of a real matrix. The return type of + # np.linalg.eigvals(), when operating on a matrix in real space + # depends on the values present in the matrix itself (recalling + # that eigenvalues are the roots of the characteristic polynomial + # of the system matrix, which will by construction depend on the + # values present in the system matrix). As numba cannot handle + # the case of a runtime decision based domain change relative to + # the input type, if it is required numba raises as below. + if np.any(wi): + raise ValueError( + "eigvals() argument must not cause a domain change.") + + # put these in to help with liveness analysis, + # `.ctypes` doesn't keep the vars alive + _dummy_liveness_func([acpy.size, vl.size, vr.size, wr.size, wi.size]) + return wr + + def cmplx_eigvals_impl(a): + """ + eigvals() implementation for complex arrays. + """ + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + ldvl = 1 + ldvr = 1 + w = np.empty(n, dtype=a.dtype) + + if n == 0: + return w + + vl = np.empty((1), dtype=a.dtype) + vr = np.empty((1), dtype=a.dtype) + + r = numba_ez_cgeev(kind, + JOBVL, + JOBVR, + n, + acpy.ctypes, + n, + w.ctypes, + vl.ctypes, + ldvl, + vr.ctypes, + ldvr) + _handle_err_maybe_convergence_problem(r) + + # put these in to help with liveness analysis, + # `.ctypes` doesn't keep the vars alive + _dummy_liveness_func([acpy.size, vl.size, vr.size, w.size]) + return w + + if isinstance(a.dtype, types.scalars.Complex): + return cmplx_eigvals_impl + else: + return real_eigvals_impl + +@overload(np.linalg.eigh) +def eigh_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "eigh") + + # convert typing floats to numpy floats for use in the impl + w_type = getattr(a.dtype, "underlying_float", a.dtype) + w_dtype = np_support.as_dtype(w_type) + + numba_ez_xxxevd = _LAPACK().numba_ez_xxxevd(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "eigh")) + + JOBZ = ord('V') + UPLO = ord('L') + + def eigh_impl(a): + n = a.shape[-1] + + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + w = np.empty(n, dtype=w_dtype) + + if n == 0: + return (w, acpy) + + r = numba_ez_xxxevd(kind, # kind + JOBZ, # jobz + UPLO, # uplo + n, # n + acpy.ctypes, # a + n, # lda + w.ctypes # w + ) + _handle_err_maybe_convergence_problem(r) + + # help liveness analysis + _dummy_liveness_func([acpy.size, w.size]) + return (w, acpy) + + return eigh_impl + +@overload(np.linalg.eigvalsh) +def eigvalsh_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "eigvalsh") + + # convert typing floats to numpy floats for use in the impl + w_type = getattr(a.dtype, "underlying_float", a.dtype) + w_dtype = np_support.as_dtype(w_type) + + numba_ez_xxxevd = _LAPACK().numba_ez_xxxevd(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "eigvalsh")) + + JOBZ = ord('N') + UPLO = ord('L') + + def eigvalsh_impl(a): + n = a.shape[-1] + + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + w = np.empty(n, dtype=w_dtype) + + if n == 0: + return w + + r = numba_ez_xxxevd(kind, # kind + JOBZ, # jobz + UPLO, # uplo + n, # n + acpy.ctypes, # a + n, # lda + w.ctypes # w + ) + _handle_err_maybe_convergence_problem(r) + + # help liveness analysis + _dummy_liveness_func([acpy.size, w.size]) + return w + + return eigvalsh_impl + +@overload(np.linalg.svd) +def svd_impl(a, full_matrices=1): + ensure_lapack() + + _check_linalg_matrix(a, "svd") + + # convert typing floats to numpy floats for use in the impl + s_type = getattr(a.dtype, "underlying_float", a.dtype) + s_dtype = np_support.as_dtype(s_type) + + numba_ez_gesdd = _LAPACK().numba_ez_gesdd(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "svd")) + + JOBZ_A = ord('A') + JOBZ_S = ord('S') + + def svd_impl(a, full_matrices=1): + n = a.shape[-1] + m = a.shape[-2] + + if n == 0 or m == 0: + raise np.linalg.LinAlgError("Arrays cannot be empty") + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + ldu = m + minmn = min(m, n) + + if full_matrices: + JOBZ = JOBZ_A + ucol = m + ldvt = n + else: + JOBZ = JOBZ_S + ucol = minmn + ldvt = minmn + + u = np.empty((ucol, ldu), dtype=a.dtype) + s = np.empty(minmn, dtype=s_dtype) + vt = np.empty((n, ldvt), dtype=a.dtype) + + r = numba_ez_gesdd( + kind, # kind + JOBZ, # jobz + m, # m + n, # n + acpy.ctypes, # a + m, # lda + s.ctypes, # s + u.ctypes, # u + ldu, # ldu + vt.ctypes, # vt + ldvt # ldvt + ) + _handle_err_maybe_convergence_problem(r) + + # help liveness analysis + _dummy_liveness_func([acpy.size, vt.size, u.size, s.size]) + return (u.T, s, vt.T) + + return svd_impl + + +@overload(np.linalg.qr) +def qr_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "qr") + + # Need two functions, the first computes R, storing it in the upper + # triangle of A with the below diagonal part of A containing elementary + # reflectors needed to construct Q. The second turns the below diagonal + # entries of A into Q, storing Q in A (creates orthonormal columns from + # the elementary reflectors). + + numba_ez_geqrf = _LAPACK().numba_ez_geqrf(a.dtype) + numba_ez_xxgqr = _LAPACK().numba_ez_xxgqr(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "qr")) + + def qr_impl(a): + n = a.shape[-1] + m = a.shape[-2] + + if n == 0 or m == 0: + raise np.linalg.LinAlgError("Arrays cannot be empty") + + _check_finite_matrix(a) + + # copy A as it will be destroyed + q = _copy_to_fortran_order(a) + + lda = m + + minmn = min(m, n) + tau = np.empty((minmn), dtype=a.dtype) + + ret = numba_ez_geqrf( + kind, # kind + m, # m + n, # n + q.ctypes, # a + m, # lda + tau.ctypes # tau + ) + if ret < 0: + fatal_error_func() + assert 0 # unreachable + + # pull out R, this is transposed because of Fortran + r = np.zeros((n, minmn), dtype=a.dtype).T + + # the triangle in R + for i in range(minmn): + for j in range(i + 1): + r[j, i] = q[j, i] + + # and the possible square in R + for i in range(minmn, n): + for j in range(minmn): + r[j, i] = q[j, i] + + ret = numba_ez_xxgqr( + kind, # kind + m, # m + minmn, # n + minmn, # k + q.ctypes, # a + m, # lda + tau.ctypes # tau + ) + _handle_err_maybe_convergence_problem(ret) + + # help liveness analysis + _dummy_liveness_func([tau.size, q.size]) + return (q[:, :minmn], r) + + return qr_impl + + +# helpers and jitted specialisations required for np.linalg.lstsq +# and np.linalg.solve. These functions have "system" in their name +# as a differentiator. + +def _system_copy_in_b(bcpy, b, nrhs): + """ + Correctly copy 'b' into the 'bcpy' scratch space. + """ + raise NotImplementedError + + +@overload(_system_copy_in_b) +def _system_copy_in_b_impl(bcpy, b, nrhs): + if b.ndim == 1: + def oneD_impl(bcpy, b, nrhs): + bcpy[:b.shape[-1], 0] = b + return oneD_impl + else: + def twoD_impl(bcpy, b, nrhs): + bcpy[:b.shape[-2], :nrhs] = b + return twoD_impl + + +def _system_compute_nrhs(b): + """ + Compute the number of right hand sides in the system of equations + """ + raise NotImplementedError + + +@overload(_system_compute_nrhs) +def _system_compute_nrhs_impl(b): + if b.ndim == 1: + def oneD_impl(b): + return 1 + return oneD_impl + else: + def twoD_impl(b): + return b.shape[-1] + return twoD_impl + + +def _system_check_dimensionally_valid(a, b): + """ + Check that AX=B style system input is dimensionally valid. + """ + raise NotImplementedError + + +@overload(_system_check_dimensionally_valid) +def _system_check_dimensionally_valid_impl(a, b): + ndim = b.ndim + if ndim == 1: + def oneD_impl(a, b): + am = a.shape[-2] + bm = b.shape[-1] + if am != bm: + raise np.linalg.LinAlgError( + "Incompatible array sizes, system is not dimensionally valid.") + return oneD_impl + else: + def twoD_impl(a, b): + am = a.shape[-2] + bm = b.shape[-2] + if am != bm: + raise np.linalg.LinAlgError( + "Incompatible array sizes, system is not dimensionally valid.") + return twoD_impl + + +def _system_check_non_empty(a, b): + """ + Check that AX=B style system input is not empty. + """ + raise NotImplementedError + + +@overload(_system_check_non_empty) +def _system_check_non_empty_impl(a, b): + ndim = b.ndim + if ndim == 1: + def oneD_impl(a, b): + am = a.shape[-2] + an = a.shape[-1] + bm = b.shape[-1] + if am == 0 or bm == 0 or an == 0: + raise np.linalg.LinAlgError('Arrays cannot be empty') + return oneD_impl + else: + def twoD_impl(a, b): + am = a.shape[-2] + an = a.shape[-1] + bm = b.shape[-2] + bn = b.shape[-1] + if am == 0 or bm == 0 or an == 0 or bn == 0: + raise np.linalg.LinAlgError('Arrays cannot be empty') + return twoD_impl + + +def _lstsq_residual(b, n, nrhs): + """ + Compute the residual from the 'b' scratch space. + """ + raise NotImplementedError + + +@overload(_lstsq_residual) +def _lstsq_residual_impl(b, n, nrhs): + ndim = b.ndim + dtype = b.dtype + real_dtype = np_support.as_dtype(getattr(dtype, "underlying_float", dtype)) + + if ndim == 1: + if isinstance(dtype, (types.Complex)): + def cmplx_impl(b, n, nrhs): + res = np.empty((1,), dtype=real_dtype) + res[0] = np.sum(np.abs(b[n:, 0])**2) + return res + return cmplx_impl + else: + def real_impl(b, n, nrhs): + res = np.empty((1,), dtype=real_dtype) + res[0] = np.sum(b[n:, 0]**2) + return res + return real_impl + else: + assert ndim == 2 + if isinstance(dtype, (types.Complex)): + def cmplx_impl(b, n, nrhs): + res = np.empty((nrhs), dtype=real_dtype) + for k in range(nrhs): + res[k] = np.sum(np.abs(b[n:, k])**2) + return res + return cmplx_impl + else: + def real_impl(b, n, nrhs): + res = np.empty((nrhs), dtype=real_dtype) + for k in range(nrhs): + res[k] = np.sum(b[n:, k]**2) + return res + return real_impl + + +def _lstsq_solution(b, bcpy, n): + """ + Extract 'x' (the lstsq solution) from the 'bcpy' scratch space. + Note 'b' is only used to check the system input dimension... + """ + raise NotImplementedError + + +@overload(_lstsq_solution) +def _lstsq_solution_impl(b, bcpy, n): + if b.ndim == 1: + def oneD_impl(b, bcpy, n): + return bcpy.T.ravel()[:n] + return oneD_impl + else: + def twoD_impl(b, bcpy, n): + return bcpy[:n, :].copy() + return twoD_impl + + +@overload(np.linalg.lstsq) +def lstsq_impl(a, b, rcond=-1.0): + ensure_lapack() + + _check_linalg_matrix(a, "lstsq") + + # B can be 1D or 2D. + _check_linalg_1_or_2d_matrix(b, "lstsq") + + _check_homogeneous_types("lstsq", a, b) + + np_dt = np_support.as_dtype(a.dtype) + nb_dt = a.dtype + + # convert typing floats to np floats for use in the impl + r_type = getattr(nb_dt, "underlying_float", nb_dt) + real_dtype = np_support.as_dtype(r_type) + + # lapack solver + numba_ez_gelsd = _LAPACK().numba_ez_gelsd(a.dtype) + + kind = ord(get_blas_kind(nb_dt, "lstsq")) + + # The following functions select specialisations based on + # information around 'b', a lot of this effort is required + # as 'b' can be either 1D or 2D, and then there are + # some optimisations available depending on real or complex + # space. + + def lstsq_impl(a, b, rcond=-1.0): + n = a.shape[-1] + m = a.shape[-2] + nrhs = _system_compute_nrhs(b) + + # check the systems have no inf or NaN + _check_finite_matrix(a) + _check_finite_matrix(b) + + # check the system is not empty + _system_check_non_empty(a, b) + + # check the systems are dimensionally valid + _system_check_dimensionally_valid(a, b) + + minmn = min(m, n) + maxmn = max(m, n) + + # a is destroyed on exit, copy it + acpy = _copy_to_fortran_order(a) + + # b is overwritten on exit with the solution, copy allocate + bcpy = np.empty((nrhs, maxmn), dtype=np_dt).T + # specialised copy in due to b being 1 or 2D + _system_copy_in_b(bcpy, b, nrhs) + + # Allocate returns + s = np.empty(minmn, dtype=real_dtype) + rank_ptr = np.empty(1, dtype=np.int32) + + r = numba_ez_gelsd( + kind, # kind + m, # m + n, # n + nrhs, # nrhs + acpy.ctypes, # a + m, # lda + bcpy.ctypes, # a + maxmn, # ldb + s.ctypes, # s + rcond, # rcond + rank_ptr.ctypes # rank + ) + _handle_err_maybe_convergence_problem(r) + + # set rank to that which was computed + rank = rank_ptr[0] + + # compute residuals + if rank < n or m <= n: + res = np.empty((0), dtype=real_dtype) + else: + # this requires additional dispatch as there's a faster + # impl if the result is in the real domain (no abs() required) + res = _lstsq_residual(bcpy, n, nrhs) + + # extract 'x', the solution + x = _lstsq_solution(b, bcpy, n) + + # help liveness analysis + _dummy_liveness_func([acpy.size, bcpy.size, s.size, rank_ptr.size]) + return (x, res, rank, s[:minmn]) + + return lstsq_impl + + +def _solve_compute_return(b, bcpy): + """ + Extract 'x' (the solution) from the 'bcpy' scratch space. + Note 'b' is only used to check the system input dimension... + """ + raise NotImplementedError + + +@overload(_solve_compute_return) +def _solve_compute_return_impl(b, bcpy): + if b.ndim == 1: + def oneD_impl(b, bcpy): + return bcpy.T.ravel() + return oneD_impl + else: + def twoD_impl(b, bcpy): + return bcpy + return twoD_impl + + +@overload(np.linalg.solve) +def solve_impl(a, b): + ensure_lapack() + + _check_linalg_matrix(a, "solve") + _check_linalg_1_or_2d_matrix(b, "solve") + + _check_homogeneous_types("solve", a, b) + + np_dt = np_support.as_dtype(a.dtype) + nb_dt = a.dtype + + # the lapack solver + numba_xgesv = _LAPACK().numba_xgesv(a.dtype) + + kind = ord(get_blas_kind(nb_dt, "solve")) + + def solve_impl(a, b): + n = a.shape[-1] + nrhs = _system_compute_nrhs(b) + + # check the systems have no inf or NaN + _check_finite_matrix(a) + _check_finite_matrix(b) + + # check the systems are dimensionally valid + _system_check_dimensionally_valid(a, b) + + # a is destroyed on exit, copy it + acpy = _copy_to_fortran_order(a) + + # b is overwritten on exit with the solution, copy allocate + bcpy = np.empty((nrhs, n), dtype=np_dt).T + if n == 0: + return _solve_compute_return(b, bcpy) + + # specialised copy in due to b being 1 or 2D + _system_copy_in_b(bcpy, b, nrhs) + + # allocate pivot array (needs to be fortran int size) + ipiv = np.empty(n, dtype=F_INT_nptype) + + r = numba_xgesv( + kind, # kind + n, # n + nrhs, # nhrs + acpy.ctypes, # a + n, # lda + ipiv.ctypes, # ipiv + bcpy.ctypes, # b + n # ldb + ) + _inv_err_handler(r) + + # help liveness analysis + _dummy_liveness_func([acpy.size, bcpy.size, ipiv.size]) + return _solve_compute_return(b, bcpy) + + return solve_impl + + +@overload(np.linalg.pinv) +def pinv_impl(a, rcond=1.e-15): + ensure_lapack() + + _check_linalg_matrix(a, "pinv") + + # convert typing floats to numpy floats for use in the impl + s_type = getattr(a.dtype, "underlying_float", a.dtype) + s_dtype = np_support.as_dtype(s_type) + + numba_ez_gesdd = _LAPACK().numba_ez_gesdd(a.dtype) + + numba_xxgemm = _BLAS().numba_xxgemm(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "pinv")) + JOB = ord('S') + + # need conjugate transposes + TRANSA = ord('C') + TRANSB = ord('C') + + # scalar constants + dt = np_support.as_dtype(a.dtype) + zero = np.array([0.], dtype=dt) + one = np.array([1.], dtype=dt) + + def pinv_impl(a, rcond=1.e-15): + + # The idea is to build the pseudo-inverse via inverting the singular + # value decomposition of a matrix `A`. Mathematically, this is roughly + # A = U*S*V^H [The SV decomposition of A] + # A^+ = V*(S^+)*U^H [The inverted SV decomposition of A] + # where ^+ is pseudo inversion and ^H is Hermitian transpose. + # As V and U are unitary, their inverses are simply their Hermitian + # transpose. S has singular values on its diagonal and zero elsewhere, + # it is inverted trivially by reciprocal of the diagonal values with + # the exception that zero singular values remain as zero. + # + # The practical implementation can take advantage of a few things to + # gain a few % performance increase: + # * A is destroyed by the SVD algorithm from LAPACK so a copy is + # required, this memory is exactly the right size in which to return + # the pseudo-inverse and so can be reused for this purpose. + # * The pseudo-inverse of S can be applied to either V or U^H, this + # then leaves a GEMM operation to compute the inverse via either: + # A^+ = (V*(S^+))*U^H + # or + # A^+ = V*((S^+)*U^H) + # however application of S^+ to V^H or U is more convenient as they + # are the result of the SVD algorithm. The application of the + # diagonal system is just a matrix multiplication which results in a + # row/column scaling (direction depending). To save effort, this + # "matrix multiplication" is applied to the smallest of U or V^H and + # only up to the point of "cut-off" (see next note) just as a direct + # scaling. + # * The cut-off level for application of S^+ can be used to reduce + # total effort, this cut-off can come via rcond or may just naturally + # be present as a result of zeros in the singular values. Regardless + # there's no need to multiply by zeros in the application of S^+ to + # V^H or U as above. Further, the GEMM operation can be shrunk in + # effort by noting that the possible zero block generated by the + # presence of zeros in S^+ has no effect apart from wasting cycles as + # it is all fmadd()s where one operand is zero. The inner dimension + # of the GEMM operation can therefore be set as shrunk accordingly! + + n = a.shape[-1] + m = a.shape[-2] + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + if m == 0 or n == 0: + return acpy.T.ravel().reshape(a.shape).T + + minmn = min(m, n) + + u = np.empty((minmn, m), dtype=a.dtype) + s = np.empty(minmn, dtype=s_dtype) + vt = np.empty((n, minmn), dtype=a.dtype) + + r = numba_ez_gesdd( + kind, # kind + JOB, # job + m, # m + n, # n + acpy.ctypes, # a + m, # lda + s.ctypes, # s + u.ctypes, # u + m, # ldu + vt.ctypes, # vt + minmn # ldvt + ) + _handle_err_maybe_convergence_problem(r) + + # Invert singular values under threshold. Also find the index of + # the threshold value as this is the upper limit for the application + # of the inverted singular values. Finding this value saves + # multiplication by a block of zeros that would be created by the + # application of these values to either U or V^H ahead of multiplying + # them together. This is done by simply in BLAS parlance via + # restricting the `k` dimension to `cut_idx` in `xgemm` whilst keeping + # the leading dimensions correct. + + cut_at = s[0] * rcond + cut_idx = 0 + for k in range(minmn): + if s[k] > cut_at: + s[k] = 1. / s[k] + cut_idx = k + cut_idx += 1 + + # Use cut_idx so there's no scaling by 0. + if m >= n: + # U is largest so apply S^+ to V^H. + for i in range(n): + for j in range(cut_idx): + vt[i, j] = vt[i, j] * s[j] + else: + # V^H is largest so apply S^+ to U. + for i in range(cut_idx): + s_local = s[i] + for j in range(minmn): + u[i, j] = u[i, j] * s_local + + # Do (v^H)^H*U^H (obviously one of the matrices includes the S^+ + # scaling) and write back to acpy. Note the innner dimension of cut_idx + # taking account of the possible zero block. + # We can store the result in acpy, given we had to create it + # for use in the SVD, and it is now redundant and the right size + # but wrong shape. + + r = numba_xxgemm( + kind, + TRANSA, # TRANSA + TRANSB, # TRANSB + n, # M + m, # N + cut_idx, # K + one.ctypes, # ALPHA + vt.ctypes, # A + minmn, # LDA + u.ctypes, # B + m, # LDB + zero.ctypes, # BETA + acpy.ctypes, # C + n # LDC + ) + + # help liveness analysis + #acpy.size + #vt.size + #u.size + #s.size + #one.size + #zero.size + _dummy_liveness_func([acpy.size, vt.size, u.size, s.size, one.size, + zero.size]) + return acpy.T.ravel().reshape(a.shape).T + + return pinv_impl + + +def _get_slogdet_diag_walker(a): + """ + Walks the diag of a LUP decomposed matrix + uses that det(A) = prod(diag(lup(A))) + and also that log(a)+log(b) = log(a*b) + The return sign is adjusted based on the values found + such that the log(value) stays in the real domain. + """ + if isinstance(a.dtype, types.Complex): + @register_jitable + def cmplx_diag_walker(n, a, sgn): + # walk diagonal + csgn = sgn + 0.j + acc = 0. + for k in range(n): + absel = np.abs(a[k, k]) + csgn = csgn * (a[k, k] / absel) + acc = acc + np.log(absel) + return (csgn, acc) + return cmplx_diag_walker + else: + @register_jitable + def real_diag_walker(n, a, sgn): + # walk diagonal + acc = 0. + for k in range(n): + v = a[k, k] + if v < 0.: + sgn = -sgn + v = -v + acc = acc + np.log(v) + # sgn is a float dtype + return (sgn + 0., acc) + return real_diag_walker + + +@overload(np.linalg.slogdet) +def slogdet_impl(a): + ensure_lapack() + + _check_linalg_matrix(a, "slogdet") + + numba_xxgetrf = _LAPACK().numba_xxgetrf(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "slogdet")) + + diag_walker = _get_slogdet_diag_walker(a) + + ONE = a.dtype(1) + ZERO = getattr(a.dtype, "underlying_float", a.dtype)(0) + + def slogdet_impl(a): + n = a.shape[-1] + if a.shape[-2] != n: + msg = "Last 2 dimensions of the array must be square." + raise np.linalg.LinAlgError(msg) + + if n == 0: + return (ONE, ZERO) + + _check_finite_matrix(a) + + acpy = _copy_to_fortran_order(a) + + ipiv = np.empty(n, dtype=F_INT_nptype) + + r = numba_xxgetrf(kind, n, n, acpy.ctypes, n, ipiv.ctypes) + + if r > 0: + # factorisation failed, return same defaults as np + return (0., -np.inf) + _inv_err_handler(r) # catch input-to-lapack problem + + # The following, prior to the call to diag_walker, is present + # to account for the effect of possible permutations to the + # sign of the determinant. + # This is the same idea as in numpy: + # File name `umath_linalg.c.src` e.g. + # https://github.com/numpy/numpy/blob/master/numpy/linalg/umath_linalg.c.src + # in function `@TYPE@_slogdet_single_element`. + sgn = 1 + for k in range(n): + sgn = sgn + (ipiv[k] != (k + 1)) + + sgn = sgn & 1 + if sgn == 0: + sgn = -1 + + # help liveness analysis + _dummy_liveness_func([ipiv.size]) + return diag_walker(n, acpy, sgn) + + return slogdet_impl + + +@overload(np.linalg.det) +def det_impl(a): + + ensure_lapack() + + _check_linalg_matrix(a, "det") + + def det_impl(a): + (sgn, slogdet) = np.linalg.slogdet(a) + return sgn * np.exp(slogdet) + + return det_impl + + +def _compute_singular_values(a): + """ + Compute singular values of *a*. + """ + raise NotImplementedError + + +@overload(_compute_singular_values) +def _compute_singular_values_impl(a): + """ + Returns a function to compute singular values of `a` + """ + numba_ez_gesdd = _LAPACK().numba_ez_gesdd(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "svd")) + + # Flag for "only compute `S`" to give to xgesdd + JOBZ_N = ord('N') + + nb_ret_type = getattr(a.dtype, "underlying_float", a.dtype) + np_ret_type = np_support.as_dtype(nb_ret_type) + np_dtype = np_support.as_dtype(a.dtype) + + # These are not referenced in the computation but must be set + # for MKL. + u = np.empty((1, 1), dtype=np_dtype) + vt = np.empty((1, 1), dtype=np_dtype) + + def sv_function(a): + """ + Computes singular values. + """ + # Don't use the np.linalg.svd impl instead + # call LAPACK to shortcut doing the "reconstruct + # singular vectors from reflectors" step and just + # get back the singular values. + n = a.shape[-1] + m = a.shape[-2] + if m == 0 or n == 0: + raise np.linalg.LinAlgError('Arrays cannot be empty') + _check_finite_matrix(a) + + ldu = m + minmn = min(m, n) + + # need to be >=1 but aren't referenced + ucol = 1 + ldvt = 1 + + acpy = _copy_to_fortran_order(a) + + # u and vt are not referenced however need to be + # allocated (as done above) for MKL as it + # checks for ref is nullptr. + s = np.empty(minmn, dtype=np_ret_type) + + r = numba_ez_gesdd( + kind, # kind + JOBZ_N, # jobz + m, # m + n, # n + acpy.ctypes, # a + m, # lda + s.ctypes, # s + u.ctypes, # u + ldu, # ldu + vt.ctypes, # vt + ldvt # ldvt + ) + _handle_err_maybe_convergence_problem(r) + + # help liveness analysis + _dummy_liveness_func([acpy.size, vt.size, u.size, s.size]) + return s + + return sv_function + + +def _oneD_norm_2(a): + """ + Compute the L2-norm of 1D-array *a*. + """ + raise NotImplementedError + + +@overload(_oneD_norm_2) +def _oneD_norm_2_impl(a): + nb_ret_type = getattr(a.dtype, "underlying_float", a.dtype) + np_ret_type = np_support.as_dtype(nb_ret_type) + + xxnrm2 = _BLAS().numba_xxnrm2(a.dtype) + + kind = ord(get_blas_kind(a.dtype, "norm")) + + def impl(a): + # Just ignore order, calls are guarded to only come + # from cases where order=None or order=2. + n = len(a) + # Call L2-norm routine from BLAS + ret = np.empty((1,), dtype=np_ret_type) + jmp = int(a.strides[0] / a.itemsize) + r = xxnrm2( + kind, # kind + n, # n + a.ctypes, # x + jmp, # incx + ret.ctypes # result + ) + if r < 0: + fatal_error_func() + assert 0 # unreachable + + # help liveness analysis + #ret.size + #a.size + _dummy_liveness_func([ret.size, a.size]) + return ret[0] + + return impl + + +def _get_norm_impl(x, ord_flag): + # This function is quite involved as norm supports a large + # range of values to select different norm types via kwarg `ord`. + # The implementation below branches on dimension of the input + # (1D or 2D). The default for `ord` is `None` which requires + # special handling in numba, this is dealt with first in each of + # the dimension branches. Following this the various norms are + # computed via code that is in most cases simply a loop version + # of a ufunc based version as found in numpy. + + # The following is common to both 1D and 2D cases. + # Convert typing floats to numpy floats for use in the impl. + # The return type is always a float, numba differs from numpy in + # that it returns an input precision specific value whereas numpy + # always returns np.float64. + nb_ret_type = getattr(x.dtype, "underlying_float", x.dtype) + np_ret_type = np_support.as_dtype(nb_ret_type) + + np_dtype = np_support.as_dtype(x.dtype) + + xxnrm2 = _BLAS().numba_xxnrm2(x.dtype) + + kind = ord(get_blas_kind(x.dtype, "norm")) + + if x.ndim == 1: + # 1D cases + + # handle "ord" being "None", must be done separately + if ord_flag in (None, types.none): + def oneD_impl(x, ord=None): + return _oneD_norm_2(x) + else: + def oneD_impl(x, ord=None): + n = len(x) + + # Shortcut to handle zero length arrays + # this differs slightly to numpy in that + # numpy raises a ValueError for kwarg ord= + # +/-np.inf as the reduction operations like + # max() and min() don't accept zero length + # arrays + if n == 0: + return 0.0 + + # Note: on order == 2 + # This is the same as for ord=="None" but because + # we have to handle "None" specially this condition + # is separated + if ord == 2: + return _oneD_norm_2(x) + elif ord == np.inf: + # max(abs(x)) + ret = abs(x[0]) + for k in range(1, n): + val = abs(x[k]) + if val > ret: + ret = val + return ret + + elif ord == -np.inf: + # min(abs(x)) + ret = abs(x[0]) + for k in range(1, n): + val = abs(x[k]) + if val < ret: + ret = val + return ret + + elif ord == 0: + # sum(x != 0) + ret = 0.0 + for k in range(n): + if x[k] != 0.: + ret += 1. + return ret + + elif ord == 1: + # sum(abs(x)) + ret = 0.0 + for k in range(n): + ret += abs(x[k]) + return ret + + else: + # sum(abs(x)**ord)**(1./ord) + ret = 0.0 + for k in range(n): + ret += abs(x[k])**ord + return ret**(1. / ord) + return oneD_impl + + elif x.ndim == 2: + # 2D cases + + # handle "ord" being "None" + if ord_flag in (None, types.none): + # Force `x` to be C-order, so that we can take a contiguous + # 1D view. + if x.layout == 'C': + @register_jitable + def array_prepare(x): + return x + elif x.layout == 'F': + @register_jitable + def array_prepare(x): + # Legal since L2(x) == L2(x.T) + return x.T + else: + @register_jitable + def array_prepare(x): + return x.copy() + + # Compute the Frobenius norm, this is the L2,2 induced norm of `x` + # which is the L2-norm of x.ravel() and so can be computed via BLAS + def twoD_impl(x, ord=None): + n = x.size + if n == 0: + # reshape() currently doesn't support zero-sized arrays + return 0.0 + x_c = array_prepare(x) + return _oneD_norm_2(x_c.reshape(n)) + else: + # max value for this dtype + max_val = np.finfo(np_ret_type.type).max + + def twoD_impl(x, ord=None): + n = x.shape[-1] + m = x.shape[-2] + + # Shortcut to handle zero size arrays + # this differs slightly to numpy in that + # numpy raises errors for some ord values + # and in other cases returns zero. + if x.size == 0: + return 0.0 + + if ord == np.inf: + # max of sum of abs across rows + # max(sum(abs(x)), axis=1) + global_max = 0. + for ii in range(m): + tmp = 0. + for jj in range(n): + tmp += abs(x[ii, jj]) + if tmp > global_max: + global_max = tmp + return global_max + + elif ord == -np.inf: + # min of sum of abs across rows + # min(sum(abs(x)), axis=1) + global_min = max_val + for ii in range(m): + tmp = 0. + for jj in range(n): + tmp += abs(x[ii, jj]) + if tmp < global_min: + global_min = tmp + return global_min + elif ord == 1: + # max of sum of abs across cols + # max(sum(abs(x)), axis=0) + global_max = 0. + for ii in range(n): + tmp = 0. + for jj in range(m): + tmp += abs(x[jj, ii]) + if tmp > global_max: + global_max = tmp + return global_max + + elif ord == -1: + # min of sum of abs across cols + # min(sum(abs(x)), axis=0) + global_min = max_val + for ii in range(n): + tmp = 0. + for jj in range(m): + tmp += abs(x[jj, ii]) + if tmp < global_min: + global_min = tmp + return global_min + + # Results via SVD, singular values are sorted on return + # by definition. + elif ord == 2: + # max SV + return _compute_singular_values(x)[0] + elif ord == -2: + # min SV + return _compute_singular_values(x)[-1] + else: + # replicate numpy error + raise ValueError("Invalid norm order for matrices.") + return twoD_impl + else: + assert 0 # unreachable + + +@overload(np.linalg.norm) +def norm_impl(x, ord=None): + ensure_lapack() + + _check_linalg_1_or_2d_matrix(x, "norm") + + return _get_norm_impl(x, ord) + + +@overload(np.linalg.cond) +def cond_impl(x, p=None): + ensure_lapack() + + _check_linalg_matrix(x, "cond") + + def impl(x, p=None): + # This is extracted for performance, numpy does approximately: + # `condition = norm(x) * norm(inv(x))` + # in the cases of `p == 2` or `p ==-2` singular values are used + # for computing norms. This costs numpy an svd of `x` then an + # inversion of `x` and another svd of `x`. + # Below is a different approach, which also gives a more + # accurate answer as there is no inversion involved. + # Recall that the singular values of an inverted matrix are the + # reciprocal of singular values of the original matrix. + # Therefore calling `svd(x)` once yields all the information + # needed about both `x` and `inv(x)` without the cost or + # potential loss of accuracy incurred through inversion. + # For the case of `p == 2`, the result is just the ratio of + # `largest singular value/smallest singular value`, and for the + # case of `p==-2` the result is simply the + # `smallest singular value/largest singular value`. + # As a result of this, numba accepts non-square matrices as + # input when p==+/-2 as well as when p==None. + if p == 2 or p == -2 or p is None: + s = _compute_singular_values(x) + if p == 2 or p is None: + r = np.divide(s[0], s[-1]) + else: + r = np.divide(s[-1], s[0]) + else: # cases np.inf, -np.inf, 1, -1 + norm_x = np.linalg.norm(x, p) + norm_inv_x = np.linalg.norm(np.linalg.inv(x), p) + r = norm_x * norm_inv_x + # NumPy uses a NaN mask, if the input has a NaN, it will return NaN, + # Numba calls ban NaN through the use of _check_finite_matrix but this + # catches cases where NaN occurs through floating point use + if np.isnan(r): + return np.inf + else: + return r + return impl + + +@register_jitable +def _get_rank_from_singular_values(sv, t): + """ + Gets rank from singular values with cut-off at a given tolerance + """ + rank = 0 + for k in range(len(sv)): + if sv[k] > t: + rank = rank + 1 + else: # sv is ordered big->small so break on condition not met + break + return rank + + +@overload(np.linalg.matrix_rank) +def matrix_rank_impl(A, tol=None): + """ + Computes rank for matrices and vectors. + The only issue that may arise is that because numpy uses double + precision lapack calls whereas numba uses type specific lapack + calls, some singular values may differ and therefore counting the + number of them above a tolerance may lead to different counts, + and therefore rank, in some cases. + """ + ensure_lapack() + + _check_linalg_1_or_2d_matrix(A, "matrix_rank") + + def _2d_matrix_rank_impl(A, tol): + + # handle the tol==None case separately for type inference to work + if tol in (None, types.none): + nb_type = getattr(A.dtype, "underlying_float", A.dtype) + np_type = np_support.as_dtype(nb_type) + eps_val = np.finfo(np_type).eps + + def _2d_tol_none_impl(A, tol=None): + s = _compute_singular_values(A) + # replicate numpy default tolerance calculation + r = A.shape[0] + c = A.shape[1] + l = max(r, c) + t = s[0] * l * eps_val + return _get_rank_from_singular_values(s, t) + return _2d_tol_none_impl + else: + def _2d_tol_not_none_impl(A, tol=None): + s = _compute_singular_values(A) + return _get_rank_from_singular_values(s, tol) + return _2d_tol_not_none_impl + + def _get_matrix_rank_impl(A, tol): + ndim = A.ndim + if ndim == 1: + # NOTE: Technically, the numpy implementation could be argued as + # incorrect for the case of a vector (1D matrix). If a tolerance + # is provided and a vector with a singular value below tolerance is + # encountered this should report a rank of zero, the numpy + # implementation does not do this and instead elects to report that + # if any value in the vector is nonzero then the rank is 1. + # An example would be [0, 1e-15, 0, 2e-15] which numpy reports as + # rank 1 invariant of `tol`. The singular value for this vector is + # obviously sqrt(5)*1e-15 and so a tol of e.g. sqrt(6)*1e-15 should + # lead to a reported rank of 0 whereas a tol of 1e-15 should lead + # to a reported rank of 1, numpy reports 1 regardless. + # The code below replicates the numpy behaviour. + def _1d_matrix_rank_impl(A, tol=None): + for k in range(len(A)): + if A[k] != 0.: + return 1 + return 0 + return _1d_matrix_rank_impl + elif ndim == 2: + return _2d_matrix_rank_impl(A, tol) + else: + assert 0 # unreachable + + return _get_matrix_rank_impl(A, tol) + + +@overload(np.linalg.matrix_power) +def matrix_power_impl(a, n): + """ + Computes matrix power. Only integer powers are supported in numpy. + """ + + _check_linalg_matrix(a, "matrix_power") + np_dtype = np_support.as_dtype(a.dtype) + + nt = getattr(n, 'dtype', n) + if not isinstance(nt, types.Integer): + raise NumbaTypeError("Exponent must be an integer.") + + def matrix_power_impl(a, n): + + if n == 0: + # this should be eye() but it doesn't support + # the dtype kwarg yet so do it manually to save + # the copy required by eye(a.shape[0]).asdtype() + A = np.zeros(a.shape, dtype=np_dtype) + for k in range(a.shape[0]): + A[k, k] = 1. + return A + + am, an = a.shape[-1], a.shape[-2] + if am != an: + raise ValueError('input must be a square array') + + # empty, return a copy + if am == 0: + return a.copy() + + # note: to be consistent over contiguousness, C order is + # returned as that is what dot() produces and the most common + # paths through matrix_power will involve that. Therefore + # copies are made here to ensure the data ordering is + # correct for paths not going via dot(). + + if n < 0: + A = np.linalg.inv(a).copy() + if n == -1: # return now + return A + n = -n + else: + if n == 1: # return a copy now + return a.copy() + A = a # this is safe, `a` is only read + + if n < 4: + if n == 2: + return np.dot(A, A) + if n == 3: + return np.dot(np.dot(A, A), A) + else: + + acc = A + exp = n + + # Initialise ret, SSA cannot see the loop will execute, without this + # it appears as uninitialised. + ret = acc + # tried a loop split and branchless using identity matrix as + # input but it seems like having a "first entry" flag is quicker + flag = True + while exp != 0: + if exp & 1: + if flag: + ret = acc + flag = False + else: + ret = np.dot(ret, acc) + acc = np.dot(acc, acc) + exp = exp >> 1 + + return ret + + return matrix_power_impl + +# This is documented under linalg despite not being in the module + + +@overload(np.trace) +def matrix_trace_impl(a, offset=0): + """ + Computes the trace of an array. + """ + + _check_linalg_matrix(a, "trace", la_prefix=False) + + if not isinstance(offset, (int, types.Integer)): + raise NumbaTypeError("integer argument expected, got %s" % offset) + + def matrix_trace_impl(a, offset=0): + rows, cols = a.shape + k = offset + if k < 0: + rows = rows + k + if k > 0: + cols = cols - k + n = max(min(rows, cols), 0) + ret = 0 + if k >= 0: + for i in range(n): + ret += a[i, k + i] + else: + for i in range(n): + ret += a[i - k, i] + return ret + + return matrix_trace_impl + + +def _check_scalar_or_lt_2d_mat(a, func_name, la_prefix=True): + prefix = "np.linalg" if la_prefix else "np" + interp = (prefix, func_name) + # checks that a matrix is 1 or 2D + if isinstance(a, types.Array): + if not a.ndim <= 2: + raise TypingError("%s.%s() only supported on 1 and 2-D arrays " + % interp, highlighting=False) + + +@register_jitable +def outer_impl_none(a, b, out): + aa = np.asarray(a) + bb = np.asarray(b) + return np.multiply(aa.ravel().reshape((aa.size, 1)), + bb.ravel().reshape((1, bb.size))) + + +@register_jitable +def outer_impl_arr(a, b, out): + aa = np.asarray(a) + bb = np.asarray(b) + np.multiply(aa.ravel().reshape((aa.size, 1)), + bb.ravel().reshape((1, bb.size)), + out) + return out + + +def _get_outer_impl(a, b, out): + if out in (None, types.none): + return outer_impl_none + else: + return outer_impl_arr + + +@overload(np.outer) +def outer_impl(a, b, out=None): + + _check_scalar_or_lt_2d_mat(a, "outer", la_prefix=False) + _check_scalar_or_lt_2d_mat(b, "outer", la_prefix=False) + + impl = _get_outer_impl(a, b, out) + + def outer_impl(a, b, out=None): + return impl(a, b, out) + + return outer_impl + + +def _kron_normaliser_impl(x): + # makes x into a 2d array + if isinstance(x, types.Array): + if x.layout not in ('C', 'F'): + raise TypingError("np.linalg.kron only supports 'C' or 'F' layout " + "input arrays. Received an input of " + "layout '{}'.".format(x.layout)) + elif x.ndim == 2: + @register_jitable + def nrm_shape(x): + xn = x.shape[-1] + xm = x.shape[-2] + return x.reshape(xm, xn) + return nrm_shape + else: + @register_jitable + def nrm_shape(x): + xn = x.shape[-1] + return x.reshape(1, xn) + return nrm_shape + else: # assume its a scalar + @register_jitable + def nrm_shape(x): + a = np.empty((1, 1), type(x)) + a[0] = x + return a + return nrm_shape + + +def _kron_return(a, b): + # transforms c into something that kron would return + # based on the shapes of a and b + a_is_arr = isinstance(a, types.Array) + b_is_arr = isinstance(b, types.Array) + if a_is_arr and b_is_arr: + if a.ndim == 2 or b.ndim == 2: + @register_jitable + def ret(a, b, c): + return c + return ret + else: + @register_jitable + def ret(a, b, c): + return c.reshape(c.size) + return ret + else: # at least one of (a, b) is a scalar + if a_is_arr: + @register_jitable + def ret(a, b, c): + return c.reshape(a.shape) + return ret + elif b_is_arr: + @register_jitable + def ret(a, b, c): + return c.reshape(b.shape) + return ret + else: # both scalars + @register_jitable + def ret(a, b, c): + return c[0] + return ret + + +@overload(np.kron) +def kron_impl(a, b): + + _check_scalar_or_lt_2d_mat(a, "kron", la_prefix=False) + _check_scalar_or_lt_2d_mat(b, "kron", la_prefix=False) + + fix_a = _kron_normaliser_impl(a) + fix_b = _kron_normaliser_impl(b) + ret_c = _kron_return(a, b) + + # this is fine because the ufunc for the Hadamard product + # will reject differing dtypes in a and b. + dt = getattr(a, 'dtype', a) + + def kron_impl(a, b): + + aa = fix_a(a) + bb = fix_b(b) + + am = aa.shape[-2] + an = aa.shape[-1] + bm = bb.shape[-2] + bn = bb.shape[-1] + + cm = am * bm + cn = an * bn + + # allocate c + C = np.empty((cm, cn), dtype=dt) + + # In practice this is runs quicker than the more obvious + # `each element of A multiplied by B and assigned to + # a block in C` like alg. + + # loop over rows of A + for i in range(am): + # compute the column offset into C + rjmp = i * bm + # loop over rows of B + for k in range(bm): + # compute row the offset into C + irjmp = rjmp + k + # slice a given row of B + slc = bb[k, :] + # loop over columns of A + for j in range(an): + # vectorized assignment of an element of A + # multiplied by the current row of B into + # a slice of a row of C + cjmp = j * bn + C[irjmp, cjmp:cjmp + bn] = aa[i, j] * slc + + return ret_c(a, b, C) + + return kron_impl diff --git a/venv/lib/python3.10/site-packages/numba/np/math/__init__.py b/venv/lib/python3.10/site-packages/numba/np/math/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f236a1281571a5999f54ae42d7c1f5da4fc65776 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/cmathimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/cmathimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..215c4533da6d43ce54f76b5c63c69e8a159dccf6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/cmathimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/mathimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/mathimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3af9366603e5f3d5602d9d086bd6f930baf2aa5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/mathimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/numbers.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/numbers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..404d1c85493bfff2772b8492ba52649f38529bca Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/math/__pycache__/numbers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/math/cmathimpl.py b/venv/lib/python3.10/site-packages/numba/np/math/cmathimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac32a9144ed4994e150a6a686c461978dd90d99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/math/cmathimpl.py @@ -0,0 +1,542 @@ +""" +Implement the cmath module functions. +""" + + +import cmath +import math + +from numba.core.imputils import impl_ret_untracked +from numba.core import types +from numba.core.typing import signature +from numba.cpython import mathimpl +from numba.core.extending import overload + +# registry = Registry('cmathimpl') +# lower = registry.lower + + +def is_nan(builder, z): + return builder.fcmp_unordered('uno', z.real, z.imag) + +def is_inf(builder, z): + return builder.or_(mathimpl.is_inf(builder, z.real), + mathimpl.is_inf(builder, z.imag)) + +def is_finite(builder, z): + return builder.and_(mathimpl.is_finite(builder, z.real), + mathimpl.is_finite(builder, z.imag)) + + +# @lower(cmath.isnan, types.Complex) +def isnan_float_impl(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + res = is_nan(builder, z) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# @lower(cmath.isinf, types.Complex) +def isinf_float_impl(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + res = is_inf(builder, z) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(cmath.isfinite, types.Complex) +def isfinite_float_impl(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + res = is_finite(builder, z) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @overload(cmath.rect) +def impl_cmath_rect(r, phi): + if all([isinstance(typ, types.Float) for typ in [r, phi]]): + def impl(r, phi): + if not math.isfinite(phi): + if not r: + # cmath.rect(0, phi={inf, nan}) = 0 + return abs(r) + if math.isinf(r): + # cmath.rect(inf, phi={inf, nan}) = inf + j phi + return complex(r, phi) + real = math.cos(phi) + imag = math.sin(phi) + if real == 0. and math.isinf(r): + # 0 * inf would return NaN, we want to keep 0 but xor the sign + real /= r + else: + real *= r + if imag == 0. and math.isinf(r): + # ditto + imag /= r + else: + imag *= r + return complex(real, imag) + return impl + + +def intrinsic_complex_unary(inner_func): + def wrapper(context, builder, sig, args): + [typ] = sig.args + [value] = args + z = context.make_complex(builder, typ, value=value) + x = z.real + y = z.imag + # Same as above: math.isfinite() is unavailable on 2.x so we precompute + # its value and pass it to the pure Python implementation. + x_is_finite = mathimpl.is_finite(builder, x) + y_is_finite = mathimpl.is_finite(builder, y) + inner_sig = signature(sig.return_type, + *(typ.underlying_float,) * 2 + (types.boolean,) * 2) + res = context.compile_internal(builder, inner_func, inner_sig, + (x, y, x_is_finite, y_is_finite)) + return impl_ret_untracked(context, builder, sig, res) + return wrapper + + +NAN = float('nan') +INF = float('inf') + +# @lower(cmath.exp, types.Complex) +@intrinsic_complex_unary +def exp_impl(x, y, x_is_finite, y_is_finite): + """cmath.exp(x + y j)""" + if x_is_finite: + if y_is_finite: + c = math.cos(y) + s = math.sin(y) + r = math.exp(x) + return complex(r * c, r * s) + else: + return complex(NAN, NAN) + elif math.isnan(x): + if y: + return complex(x, x) # nan + j nan + else: + return complex(x, y) # nan + 0j + elif x > 0.0: + # x == +inf + if y_is_finite: + real = math.cos(y) + imag = math.sin(y) + # Avoid NaNs if math.cos(y) or math.sin(y) == 0 + # (e.g. cmath.exp(inf + 0j) == inf + 0j) + if real != 0: + real *= x + if imag != 0: + imag *= x + return complex(real, imag) + else: + return complex(x, NAN) + else: + # x == -inf + if y_is_finite: + r = math.exp(x) + c = math.cos(y) + s = math.sin(y) + return complex(r * c, r * s) + else: + r = 0 + return complex(r, r) + +# @lower(cmath.log, types.Complex) +@intrinsic_complex_unary +def log_impl(x, y, x_is_finite, y_is_finite): + """cmath.log(x + y j)""" + a = math.log(math.hypot(x, y)) + b = math.atan2(y, x) + return complex(a, b) + + +# @lower(cmath.log, types.Complex, types.Complex) +def log_base_impl(context, builder, sig, args): + """cmath.log(z, base)""" + [z, base] = args + + def log_base(z, base): + return cmath.log(z) / cmath.log(base) + + res = context.compile_internal(builder, log_base, sig, args) + return impl_ret_untracked(context, builder, sig, res) + + +# @overload(cmath.log10) +def impl_cmath_log10(z): + if not isinstance(z, types.Complex): + return + + LN_10 = 2.302585092994045684 + + def log10_impl(z): + """cmath.log10(z)""" + z = cmath.log(z) + # This formula gives better results on +/-inf than cmath.log(z, 10) + # See http://bugs.python.org/issue22544 + return complex(z.real / LN_10, z.imag / LN_10) + + return log10_impl + + +# @overload(cmath.phase) +def phase_impl(x): + """cmath.phase(x + y j)""" + + if not isinstance(x, types.Complex): + return + + def impl(x): + return math.atan2(x.imag, x.real) + return impl + + +# @overload(cmath.polar) +def polar_impl(x): + if not isinstance(x, types.Complex): + return + + def impl(x): + r, i = x.real, x.imag + return math.hypot(r, i), math.atan2(i, r) + return impl + + +# @lower(cmath.sqrt, types.Complex) +def sqrt_impl(context, builder, sig, args): + # We risk spurious overflow for components >= FLT_MAX / (1 + sqrt(2)). + + SQRT2 = 1.414213562373095048801688724209698079E0 + ONE_PLUS_SQRT2 = (1. + SQRT2) + theargflt = sig.args[0].underlying_float + # Get a type specific maximum value so scaling for overflow is based on that + MAX = mathimpl.DBL_MAX if theargflt.bitwidth == 64 else mathimpl.FLT_MAX + # THRES will be double precision, should not impact typing as it's just + # used for comparison, there *may* be a few values near THRES which + # deviate from e.g. NumPy due to rounding that occurs in the computation + # of this value in the case of a 32bit argument. + THRES = MAX / ONE_PLUS_SQRT2 + + def sqrt_impl(z): + """cmath.sqrt(z)""" + # This is NumPy's algorithm, see npy_csqrt() in npy_math_complex.c.src + a = z.real + b = z.imag + if a == 0.0 and b == 0.0: + return complex(abs(b), b) + if math.isinf(b): + return complex(abs(b), b) + if math.isnan(a): + return complex(a, a) + if math.isinf(a): + if a < 0.0: + return complex(abs(b - b), math.copysign(a, b)) + else: + return complex(a, math.copysign(b - b, b)) + + # The remaining special case (b is NaN) is handled just fine by + # the normal code path below. + + # Scale to avoid overflow + if abs(a) >= THRES or abs(b) >= THRES: + a *= 0.25 + b *= 0.25 + scale = True + else: + scale = False + # Algorithm 312, CACM vol 10, Oct 1967 + if a >= 0: + t = math.sqrt((a + math.hypot(a, b)) * 0.5) + real = t + imag = b / (2 * t) + else: + t = math.sqrt((-a + math.hypot(a, b)) * 0.5) + real = abs(b) / (2 * t) + imag = math.copysign(t, b) + # Rescale + if scale: + return complex(real * 2, imag) + else: + return complex(real, imag) + + res = context.compile_internal(builder, sqrt_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + + +# @lower(cmath.cos, types.Complex) +def cos_impl(context, builder, sig, args): + def cos_impl(z): + """cmath.cos(z) = cmath.cosh(z j)""" + return cmath.cosh(complex(-z.imag, z.real)) + + res = context.compile_internal(builder, cos_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +# @overload(cmath.cosh) +def impl_cmath_cosh(z): + if not isinstance(z, types.Complex): + return + + def cosh_impl(z): + """cmath.cosh(z)""" + x = z.real + y = z.imag + if math.isinf(x): + if math.isnan(y): + # x = +inf, y = NaN => cmath.cosh(x + y j) = inf + Nan * j + real = abs(x) + imag = y + elif y == 0.0: + # x = +inf, y = 0 => cmath.cosh(x + y j) = inf + 0j + real = abs(x) + imag = y + else: + real = math.copysign(x, math.cos(y)) + imag = math.copysign(x, math.sin(y)) + if x < 0.0: + # x = -inf => negate imaginary part of result + imag = -imag + return complex(real, imag) + return complex(math.cos(y) * math.cosh(x), + math.sin(y) * math.sinh(x)) + return cosh_impl + + +# @lower(cmath.sin, types.Complex) +def sin_impl(context, builder, sig, args): + def sin_impl(z): + """cmath.sin(z) = -j * cmath.sinh(z j)""" + r = cmath.sinh(complex(-z.imag, z.real)) + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, sin_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +# @overload(cmath.sinh) +def impl_cmath_sinh(z): + if not isinstance(z, types.Complex): + return + + def sinh_impl(z): + """cmath.sinh(z)""" + x = z.real + y = z.imag + if math.isinf(x): + if math.isnan(y): + # x = +/-inf, y = NaN => cmath.sinh(x + y j) = x + NaN * j + real = x + imag = y + else: + real = math.cos(y) + imag = math.sin(y) + if real != 0.: + real *= x + if imag != 0.: + imag *= abs(x) + return complex(real, imag) + return complex(math.cos(y) * math.sinh(x), + math.sin(y) * math.cosh(x)) + return sinh_impl + + +# @lower(cmath.tan, types.Complex) +def tan_impl(context, builder, sig, args): + def tan_impl(z): + """cmath.tan(z) = -j * cmath.tanh(z j)""" + r = cmath.tanh(complex(-z.imag, z.real)) + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, tan_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + + +# @overload(cmath.tanh) +def impl_cmath_tanh(z): + if not isinstance(z, types.Complex): + return + + def tanh_impl(z): + """cmath.tanh(z)""" + x = z.real + y = z.imag + if math.isinf(x): + real = math.copysign(1., x) + if math.isinf(y): + imag = 0. + else: + imag = math.copysign(0., math.sin(2. * y)) + return complex(real, imag) + # This is CPython's algorithm (see c_tanh() in cmathmodule.c). + # XXX how to force float constants into single precision? + tx = math.tanh(x) + ty = math.tan(y) + cx = 1. / math.cosh(x) + txty = tx * ty + denom = 1. + txty * txty + return complex( + tx * (1. + ty * ty) / denom, + ((ty / denom) * cx) * cx) + + return tanh_impl + + +# @lower(cmath.acos, types.Complex) +def acos_impl(context, builder, sig, args): + LN_4 = math.log(4) + THRES = mathimpl.FLT_MAX / 4 + + def acos_impl(z): + """cmath.acos(z)""" + # CPython's algorithm (see c_acos() in cmathmodule.c) + if abs(z.real) > THRES or abs(z.imag) > THRES: + # Avoid unnecessary overflow for large arguments + # (also handles infinities gracefully) + real = math.atan2(abs(z.imag), z.real) + imag = math.copysign( + math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4, + -z.imag) + return complex(real, imag) + else: + s1 = cmath.sqrt(complex(1. - z.real, -z.imag)) + s2 = cmath.sqrt(complex(1. + z.real, z.imag)) + real = 2. * math.atan2(s1.real, s2.real) + imag = math.asinh(s2.real * s1.imag - s2.imag * s1.real) + return complex(real, imag) + + res = context.compile_internal(builder, acos_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +# @overload(cmath.acosh) +def impl_cmath_acosh(z): + if not isinstance(z, types.Complex): + return + + LN_4 = math.log(4) + THRES = mathimpl.FLT_MAX / 4 + + def acosh_impl(z): + """cmath.acosh(z)""" + # CPython's algorithm (see c_acosh() in cmathmodule.c) + if abs(z.real) > THRES or abs(z.imag) > THRES: + # Avoid unnecessary overflow for large arguments + # (also handles infinities gracefully) + real = math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4 + imag = math.atan2(z.imag, z.real) + return complex(real, imag) + else: + s1 = cmath.sqrt(complex(z.real - 1., z.imag)) + s2 = cmath.sqrt(complex(z.real + 1., z.imag)) + real = math.asinh(s1.real * s2.real + s1.imag * s2.imag) + imag = 2. * math.atan2(s1.imag, s2.real) + return complex(real, imag) + # Condensed formula (NumPy) + #return cmath.log(z + cmath.sqrt(z + 1.) * cmath.sqrt(z - 1.)) + + return acosh_impl + + +# @lower(cmath.asinh, types.Complex) +def asinh_impl(context, builder, sig, args): + LN_4 = math.log(4) + THRES = mathimpl.FLT_MAX / 4 + + def asinh_impl(z): + """cmath.asinh(z)""" + # CPython's algorithm (see c_asinh() in cmathmodule.c) + if abs(z.real) > THRES or abs(z.imag) > THRES: + real = math.copysign( + math.log(math.hypot(z.real * 0.5, z.imag * 0.5)) + LN_4, + z.real) + imag = math.atan2(z.imag, abs(z.real)) + return complex(real, imag) + else: + s1 = cmath.sqrt(complex(1. + z.imag, -z.real)) + s2 = cmath.sqrt(complex(1. - z.imag, z.real)) + real = math.asinh(s1.real * s2.imag - s2.real * s1.imag) + imag = math.atan2(z.imag, s1.real * s2.real - s1.imag * s2.imag) + return complex(real, imag) + + res = context.compile_internal(builder, asinh_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +# @lower(cmath.asin, types.Complex) +def asin_impl(context, builder, sig, args): + def asin_impl(z): + """cmath.asin(z) = -j * cmath.asinh(z j)""" + r = cmath.asinh(complex(-z.imag, z.real)) + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, asin_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +# @lower(cmath.atan, types.Complex) +def atan_impl(context, builder, sig, args): + def atan_impl(z): + """cmath.atan(z) = -j * cmath.atanh(z j)""" + r = cmath.atanh(complex(-z.imag, z.real)) + if math.isinf(z.real) and math.isnan(z.imag): + # XXX this is odd but necessary + return complex(r.imag, r.real) + else: + return complex(r.imag, -r.real) + + res = context.compile_internal(builder, atan_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) + +# @lower(cmath.atanh, types.Complex) +def atanh_impl(context, builder, sig, args): + LN_4 = math.log(4) + THRES_LARGE = math.sqrt(mathimpl.FLT_MAX / 4) + THRES_SMALL = math.sqrt(mathimpl.FLT_MIN) + PI_12 = math.pi / 2 + + def atanh_impl(z): + """cmath.atanh(z)""" + # CPython's algorithm (see c_atanh() in cmathmodule.c) + if z.real < 0.: + # Reduce to case where z.real >= 0., using atanh(z) = -atanh(-z). + negate = True + z = -z + else: + negate = False + + ay = abs(z.imag) + if math.isnan(z.real) or z.real > THRES_LARGE or ay > THRES_LARGE: + if math.isinf(z.imag): + real = math.copysign(0., z.real) + elif math.isinf(z.real): + real = 0. + else: + # may be safe from overflow, depending on hypot's implementation... + h = math.hypot(z.real * 0.5, z.imag * 0.5) + real = z.real/4./h/h + imag = -math.copysign(PI_12, -z.imag) + elif z.real == 1. and ay < THRES_SMALL: + # C99 standard says: atanh(1+/-0.) should be inf +/- 0j + if ay == 0.: + real = INF + imag = z.imag + else: + real = -math.log(math.sqrt(ay) / + math.sqrt(math.hypot(ay, 2.))) + imag = math.copysign(math.atan2(2., -ay) / 2, z.imag) + else: + sqay = ay * ay + zr1 = 1 - z.real + real = math.log1p(4. * z.real / (zr1 * zr1 + sqay)) * 0.25 + imag = -math.atan2(-2. * z.imag, + zr1 * (1 + z.real) - sqay) * 0.5 + + if math.isnan(z.imag): + imag = NAN + if negate: + return complex(-real, -imag) + else: + return complex(real, imag) + + res = context.compile_internal(builder, atanh_impl, sig, args) + return impl_ret_untracked(context, builder, sig, res) diff --git a/venv/lib/python3.10/site-packages/numba/np/math/mathimpl.py b/venv/lib/python3.10/site-packages/numba/np/math/mathimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..d872b97e9eea6dd49b6725024743767391c8d4ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/math/mathimpl.py @@ -0,0 +1,455 @@ +""" +Provide math calls that uses intrinsics or libc math functions. +""" + +import math +import operator +import sys +import numpy as np + +import llvmlite.ir +from llvmlite.ir import Constant + +from numba.core.imputils import impl_ret_untracked +from numba.core import types, config, cgutils +from numba.core.extending import overload +from numba.core.typing import signature +from numba.cpython.unsafe.numbers import trailing_zeros + + +# registry = Registry('mathimpl') +# lower = registry.lower + + +# Helpers, shared with cmathimpl. +_NP_FLT_FINFO = np.finfo(np.dtype('float32')) +FLT_MAX = _NP_FLT_FINFO.max +FLT_MIN = _NP_FLT_FINFO.tiny + +_NP_DBL_FINFO = np.finfo(np.dtype('float64')) +DBL_MAX = _NP_DBL_FINFO.max +DBL_MIN = _NP_DBL_FINFO.tiny + +FLOAT_ABS_MASK = 0x7fffffff +FLOAT_SIGN_MASK = 0x80000000 +DOUBLE_ABS_MASK = 0x7fffffffffffffff +DOUBLE_SIGN_MASK = 0x8000000000000000 + + +def is_nan(builder, val): + """ + Return a condition testing whether *val* is a NaN. + """ + return builder.fcmp_unordered('uno', val, val) + +def is_inf(builder, val): + """ + Return a condition testing whether *val* is an infinite. + """ + pos_inf = Constant(val.type, float("+inf")) + neg_inf = Constant(val.type, float("-inf")) + isposinf = builder.fcmp_ordered('==', val, pos_inf) + isneginf = builder.fcmp_ordered('==', val, neg_inf) + return builder.or_(isposinf, isneginf) + +def is_finite(builder, val): + """ + Return a condition testing whether *val* is a finite. + """ + # is_finite(x) <=> x - x != NaN + val_minus_val = builder.fsub(val, val) + return builder.fcmp_ordered('ord', val_minus_val, val_minus_val) + +def f64_as_int64(builder, val): + """ + Bitcast a double into a 64-bit integer. + """ + assert val.type == llvmlite.ir.DoubleType() + return builder.bitcast(val, llvmlite.ir.IntType(64)) + +def int64_as_f64(builder, val): + """ + Bitcast a 64-bit integer into a double. + """ + assert val.type == llvmlite.ir.IntType(64) + return builder.bitcast(val, llvmlite.ir.DoubleType()) + +def f32_as_int32(builder, val): + """ + Bitcast a float into a 32-bit integer. + """ + assert val.type == llvmlite.ir.FloatType() + return builder.bitcast(val, llvmlite.ir.IntType(32)) + +def int32_as_f32(builder, val): + """ + Bitcast a 32-bit integer into a float. + """ + assert val.type == llvmlite.ir.IntType(32) + return builder.bitcast(val, llvmlite.ir.FloatType()) + +def negate_real(builder, val): + """ + Negate real number *val*, with proper handling of zeros. + """ + # The negative zero forces LLVM to handle signed zeros properly. + return builder.fsub(Constant(val.type, -0.0), val) + +def call_fp_intrinsic(builder, name, args): + """ + Call a LLVM intrinsic floating-point operation. + """ + mod = builder.module + intr = mod.declare_intrinsic(name, [a.type for a in args]) + return builder.call(intr, args) + + +def _unary_int_input_wrapper_impl(wrapped_impl): + """ + Return an implementation factory to convert the single integral input + argument to a float64, then defer to the *wrapped_impl*. + """ + def implementer(context, builder, sig, args): + val, = args + input_type = sig.args[0] + fpval = context.cast(builder, val, input_type, types.float64) + inner_sig = signature(types.float64, types.float64) + res = wrapped_impl(context, builder, inner_sig, (fpval,)) + return context.cast(builder, res, types.float64, sig.return_type) + + return implementer + +def unary_math_int_impl(fn, float_impl): + impl = _unary_int_input_wrapper_impl(float_impl) + # lower(fn, types.Integer)(impl) + +def unary_math_intr(fn, intrcode): + """ + Implement the math function *fn* using the LLVM intrinsic *intrcode*. + """ + # @lower(fn, types.Float) + def float_impl(context, builder, sig, args): + res = call_fp_intrinsic(builder, intrcode, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + unary_math_int_impl(fn, float_impl) + return float_impl + +def unary_math_extern(fn, f32extern, f64extern, int_restype=False): + """ + Register implementations of Python function *fn* using the + external function named *f32extern* and *f64extern* (for float32 + and float64 inputs, respectively). + If *int_restype* is true, then the function's return value should be + integral, otherwise floating-point. + """ + if config.USE_LEGACY_TYPE_SYSTEM: + f_restype = types.int64 if int_restype else None + else: + f_restype = types.np_int64 if int_restype else None + + def float_impl(context, builder, sig, args): + """ + Implement *fn* for a types.Float input. + """ + [val] = args + mod = builder.module + input_type = sig.args[0] + lty = context.get_value_type(input_type) + func_name = { + types.float32: f32extern, + types.float64: f64extern, + }[input_type] + fnty = llvmlite.ir.FunctionType(lty, [lty]) + fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name) + res = builder.call(fn, (val,)) + res = context.cast(builder, res, input_type, sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + # lower(fn, types.Float)(float_impl) + + # Implement wrapper for integer inputs + unary_math_int_impl(fn, float_impl) + + return float_impl + + +unary_math_intr(math.fabs, 'llvm.fabs') +exp_impl = unary_math_intr(math.exp, 'llvm.exp') +log_impl = unary_math_intr(math.log, 'llvm.log') +log10_impl = unary_math_intr(math.log10, 'llvm.log10') +sin_impl = unary_math_intr(math.sin, 'llvm.sin') +cos_impl = unary_math_intr(math.cos, 'llvm.cos') + +log1p_impl = unary_math_extern(math.log1p, "log1pf", "log1p") +expm1_impl = unary_math_extern(math.expm1, "expm1f", "expm1") +erf_impl = unary_math_extern(math.erf, "erff", "erf") +erfc_impl = unary_math_extern(math.erfc, "erfcf", "erfc") + +tan_impl = unary_math_extern(math.tan, "tanf", "tan") +asin_impl = unary_math_extern(math.asin, "asinf", "asin") +acos_impl = unary_math_extern(math.acos, "acosf", "acos") +atan_impl = unary_math_extern(math.atan, "atanf", "atan") + +asinh_impl = unary_math_extern(math.asinh, "asinhf", "asinh") +acosh_impl = unary_math_extern(math.acosh, "acoshf", "acosh") +atanh_impl = unary_math_extern(math.atanh, "atanhf", "atanh") +sinh_impl = unary_math_extern(math.sinh, "sinhf", "sinh") +cosh_impl = unary_math_extern(math.cosh, "coshf", "cosh") +tanh_impl = unary_math_extern(math.tanh, "tanhf", "tanh") + +log2_impl = unary_math_extern(math.log2, "log2f", "log2") +ceil_impl = unary_math_extern(math.ceil, "ceilf", "ceil", True) +floor_impl = unary_math_extern(math.floor, "floorf", "floor", True) + +gamma_impl = unary_math_extern(math.gamma, "numba_gammaf", "numba_gamma") # work-around +sqrt_impl = unary_math_extern(math.sqrt, "sqrtf", "sqrt") +trunc_impl = unary_math_extern(math.trunc, "truncf", "trunc", True) +lgamma_impl = unary_math_extern(math.lgamma, "lgammaf", "lgamma") + + +# @lower(math.isnan, types.Float) +def isnan_float_impl(context, builder, sig, args): + [val] = args + res = is_nan(builder, val) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# @lower(math.isnan, types.Integer) +def isnan_int_impl(context, builder, sig, args): + res = cgutils.false_bit + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.isinf, types.Float) +def isinf_float_impl(context, builder, sig, args): + [val] = args + res = is_inf(builder, val) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# @lower(math.isinf, types.Integer) +def isinf_int_impl(context, builder, sig, args): + res = cgutils.false_bit + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.isfinite, types.Float) +def isfinite_float_impl(context, builder, sig, args): + [val] = args + res = is_finite(builder, val) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.isfinite, types.Integer) +def isfinite_int_impl(context, builder, sig, args): + res = cgutils.true_bit + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.copysign, types.Float, types.Float) +def copysign_float_impl(context, builder, sig, args): + lty = args[0].type + mod = builder.module + fn = cgutils.get_or_insert_function(mod, llvmlite.ir.FunctionType(lty, (lty, lty)), + 'llvm.copysign.%s' % lty.intrinsic_name) + res = builder.call(fn, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ----------------------------------------------------------------------------- + + +# @lower(math.frexp, types.Float) +def frexp_impl(context, builder, sig, args): + val, = args + fltty = context.get_data_type(sig.args[0]) + intty = context.get_data_type(sig.return_type[1]) + expptr = cgutils.alloca_once(builder, intty, name='exp') + fnty = llvmlite.ir.FunctionType(fltty, (fltty, llvmlite.ir.PointerType(intty))) + fname = { + "float": "numba_frexpf", + "double": "numba_frexp", + }[str(fltty)] + fn = cgutils.get_or_insert_function(builder.module, fnty, fname) + res = builder.call(fn, (val, expptr)) + res = cgutils.make_anonymous_struct(builder, (res, builder.load(expptr))) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.ldexp, types.Float, types.intc) +def ldexp_impl(context, builder, sig, args): + val, exp = args + fltty, intty = map(context.get_data_type, sig.args) + fnty = llvmlite.ir.FunctionType(fltty, (fltty, intty)) + fname = { + "float": "numba_ldexpf", + "double": "numba_ldexp", + }[str(fltty)] + fn = cgutils.insert_pure_function(builder.module, fnty, name=fname) + res = builder.call(fn, (val, exp)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ----------------------------------------------------------------------------- + + +# @lower(math.atan2, types.int64, types.int64) +def atan2_s64_impl(context, builder, sig, args): + [y, x] = args + y = builder.sitofp(y, llvmlite.ir.DoubleType()) + x = builder.sitofp(x, llvmlite.ir.DoubleType()) + fsig = signature(types.float64, types.float64, types.float64) + return atan2_float_impl(context, builder, fsig, (y, x)) + +# @lower(math.atan2, types.uint64, types.uint64) +def atan2_u64_impl(context, builder, sig, args): + [y, x] = args + y = builder.uitofp(y, llvmlite.ir.DoubleType()) + x = builder.uitofp(x, llvmlite.ir.DoubleType()) + fsig = signature(types.float64, types.float64, types.float64) + return atan2_float_impl(context, builder, fsig, (y, x)) + +# @lower(math.atan2, types.Float, types.Float) +def atan2_float_impl(context, builder, sig, args): + assert len(args) == 2 + mod = builder.module + ty = sig.args[0] + lty = context.get_value_type(ty) + func_name = { + types.float32: "atan2f", + types.float64: "atan2" + }[ty] + fnty = llvmlite.ir.FunctionType(lty, (lty, lty)) + fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name) + res = builder.call(fn, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ----------------------------------------------------------------------------- + + +# @lower(math.hypot, types.int64, types.int64) +def hypot_s64_impl(context, builder, sig, args): + [x, y] = args + y = builder.sitofp(y, llvmlite.ir.DoubleType()) + x = builder.sitofp(x, llvmlite.ir.DoubleType()) + fsig = signature(types.float64, types.float64, types.float64) + res = hypot_float_impl(context, builder, fsig, (x, y)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.hypot, types.uint64, types.uint64) +def hypot_u64_impl(context, builder, sig, args): + [x, y] = args + y = builder.sitofp(y, llvmlite.ir.DoubleType()) + x = builder.sitofp(x, llvmlite.ir.DoubleType()) + fsig = signature(types.float64, types.float64, types.float64) + res = hypot_float_impl(context, builder, fsig, (x, y)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower(math.hypot, types.Float, types.Float) +def hypot_float_impl(context, builder, sig, args): + xty, yty = sig.args + assert xty == yty == sig.return_type + x, y = args + + # Windows has alternate names for hypot/hypotf, see + # https://msdn.microsoft.com/fr-fr/library/a9yb3dbt%28v=vs.80%29.aspx + fname = { + types.float32: "_hypotf" if sys.platform == 'win32' else "hypotf", + types.float64: "_hypot" if sys.platform == 'win32' else "hypot", + }[xty] + plat_hypot = types.ExternalFunction(fname, sig) + + if sys.platform == 'win32' and config.MACHINE_BITS == 32: + inf = xty(float('inf')) + + def hypot_impl(x, y): + if math.isinf(x) or math.isinf(y): + return inf + return plat_hypot(x, y) + else: + def hypot_impl(x, y): + return plat_hypot(x, y) + + res = context.compile_internal(builder, hypot_impl, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ----------------------------------------------------------------------------- + +# @lower(math.radians, types.Float) +def radians_float_impl(context, builder, sig, args): + [x] = args + coef = context.get_constant(sig.return_type, math.pi / 180) + res = builder.fmul(x, coef) + return impl_ret_untracked(context, builder, sig.return_type, res) + +unary_math_int_impl(math.radians, radians_float_impl) + +# ----------------------------------------------------------------------------- + +# @lower(math.degrees, types.Float) +def degrees_float_impl(context, builder, sig, args): + [x] = args + coef = context.get_constant(sig.return_type, 180 / math.pi) + res = builder.fmul(x, coef) + return impl_ret_untracked(context, builder, sig.return_type, res) + +unary_math_int_impl(math.degrees, degrees_float_impl) + +# ----------------------------------------------------------------------------- + +# @lower(math.pow, types.Float, types.Float) +# @lower(math.pow, types.Float, types.Integer) +def pow_impl(context, builder, sig, args): + impl = context.get_function(operator.pow, sig) + return impl(builder, args) + +# ----------------------------------------------------------------------------- + + +def _unsigned(T): + """Convert integer to unsigned integer of equivalent width.""" + pass + +@overload(_unsigned) +def _unsigned_impl(T): + if T in types.unsigned_domain: + return lambda T: T + elif T in types.signed_domain: + newT = getattr(types, 'uint{}'.format(T.bitwidth)) + return lambda T: newT(T) + + +def gcd_impl(context, builder, sig, args): + xty, yty = sig.args + assert xty == yty == sig.return_type + x, y = args + + def gcd(a, b): + """ + Stein's algorithm, heavily cribbed from Julia implementation. + """ + T = type(a) + if a == 0: return abs(b) + if b == 0: return abs(a) + za = trailing_zeros(a) + zb = trailing_zeros(b) + k = min(za, zb) + # Uses np.*_shift instead of operators due to return types + u = _unsigned(abs(np.right_shift(a, za))) + v = _unsigned(abs(np.right_shift(b, zb))) + while u != v: + if u > v: + u, v = v, u + v -= u + v = np.right_shift(v, trailing_zeros(v)) + r = np.left_shift(T(u), k) + return r + + res = context.compile_internal(builder, gcd, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# lower(math.gcd, types.Integer, types.Integer)(gcd_impl) diff --git a/venv/lib/python3.10/site-packages/numba/np/math/numbers.py b/venv/lib/python3.10/site-packages/numba/np/math/numbers.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1cda9c279cd62aed52eb9a785ef5a10eb3a8d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/math/numbers.py @@ -0,0 +1,1395 @@ +import math +import numbers + +import numpy as np + +from llvmlite import ir +from llvmlite.ir import Constant + +from numba.core.imputils import impl_ret_untracked +from numba.core import typing, types, errors, cgutils +from numba.cpython.unsafe.numbers import viewer + +def _int_arith_flags(rettype): + """ + Return the modifier flags for integer arithmetic. + """ + if rettype.signed: + # Ignore the effects of signed overflow. This is important for + # optimization of some indexing operations. For example + # array[i+1] could see `i+1` trigger a signed overflow and + # give a negative number. With Python's indexing, a negative + # index is treated differently: its resolution has a runtime cost. + # Telling LLVM to ignore signed overflows allows it to optimize + # away the check for a negative `i+1` if it knows `i` is positive. + return ['nsw'] + else: + return [] + + +def int_add_impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + a = context.cast(builder, va, ta, sig.return_type) + b = context.cast(builder, vb, tb, sig.return_type) + res = builder.add(a, b, flags=_int_arith_flags(sig.return_type)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_sub_impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + a = context.cast(builder, va, ta, sig.return_type) + b = context.cast(builder, vb, tb, sig.return_type) + res = builder.sub(a, b, flags=_int_arith_flags(sig.return_type)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_mul_impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + a = context.cast(builder, va, ta, sig.return_type) + b = context.cast(builder, vb, tb, sig.return_type) + res = builder.mul(a, b, flags=_int_arith_flags(sig.return_type)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_divmod_signed(context, builder, ty, x, y): + """ + Reference Objects/intobject.c + xdivy = x / y; + xmody = (long)(x - (unsigned long)xdivy * y); + /* If the signs of x and y differ, and the remainder is non-0, + * C89 doesn't define whether xdivy is now the floor or the + * ceiling of the infinitely precise quotient. We want the floor, + * and we have it iff the remainder's sign matches y's. + */ + if (xmody && ((y ^ xmody) < 0) /* i.e. and signs differ */) { + xmody += y; + --xdivy; + assert(xmody && ((y ^ xmody) >= 0)); + } + *p_xdivy = xdivy; + *p_xmody = xmody; + """ + assert x.type == y.type + + ZERO = y.type(0) + ONE = y.type(1) + + # NOTE: On x86 at least, dividing the lowest representable integer + # (e.g. 0x80000000 for int32) by -1 causes a SIFGPE (division overflow), + # causing the process to crash. + # We return 0, 0 instead (more or less like Numpy). + + resdiv = cgutils.alloca_once_value(builder, ZERO) + resmod = cgutils.alloca_once_value(builder, ZERO) + + is_overflow = builder.and_( + builder.icmp_signed('==', x, x.type(ty.minval)), + builder.icmp_signed('==', y, y.type(-1))) + + with builder.if_then(builder.not_(is_overflow), likely=True): + # Note LLVM will optimize this to a single divmod instruction, + # if available on the target CPU (e.g. x86). + xdivy = builder.sdiv(x, y) + xmody = builder.srem(x, y) + + y_xor_xmody_ltz = builder.icmp_signed('<', builder.xor(y, xmody), ZERO) + xmody_istrue = builder.icmp_signed('!=', xmody, ZERO) + cond = builder.and_(xmody_istrue, y_xor_xmody_ltz) + + with builder.if_else(cond) as (if_different_signs, if_same_signs): + with if_same_signs: + builder.store(xdivy, resdiv) + builder.store(xmody, resmod) + + with if_different_signs: + builder.store(builder.sub(xdivy, ONE), resdiv) + builder.store(builder.add(xmody, y), resmod) + + return builder.load(resdiv), builder.load(resmod) + + +def int_divmod(context, builder, ty, x, y): + """ + Integer divmod(x, y). The caller must ensure that y != 0. + """ + if ty.signed: + return int_divmod_signed(context, builder, ty, x, y) + else: + return builder.udiv(x, y), builder.urem(x, y) + + +def _int_divmod_impl(context, builder, sig, args, zerodiv_message): + va, vb = args + ta, tb = sig.args + + ty = sig.return_type + if isinstance(ty, types.UniTuple): + ty = ty.dtype + a = context.cast(builder, va, ta, ty) + b = context.cast(builder, vb, tb, ty) + quot = cgutils.alloca_once(builder, a.type, name="quot") + rem = cgutils.alloca_once(builder, a.type, name="rem") + + with builder.if_else(cgutils.is_scalar_zero(builder, b), likely=False + ) as (if_zero, if_non_zero): + with if_zero: + if not context.error_model.fp_zero_division( + builder, (zerodiv_message,)): + # No exception raised => return 0 + # XXX We should also set the FPU exception status, but + # there's no easy way to do that from LLVM. + builder.store(b, quot) + builder.store(b, rem) + with if_non_zero: + q, r = int_divmod(context, builder, ty, a, b) + builder.store(q, quot) + builder.store(r, rem) + + return quot, rem + + +# @lower_builtin(divmod, types.Integer, types.Integer) +def int_divmod_impl(context, builder, sig, args): + quot, rem = _int_divmod_impl(context, builder, sig, args, + "integer divmod by zero") + + return cgutils.pack_array(builder, + (builder.load(quot), builder.load(rem))) + + +# @lower_builtin(operator.floordiv, types.Integer, types.Integer) +# @lower_builtin(operator.ifloordiv, types.Integer, types.Integer) +def int_floordiv_impl(context, builder, sig, args): + quot, rem = _int_divmod_impl(context, builder, sig, args, + "integer division by zero") + return builder.load(quot) + + +# @lower_builtin(operator.truediv, types.Integer, types.Integer) +# @lower_builtin(operator.itruediv, types.Integer, types.Integer) +def int_truediv_impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + a = context.cast(builder, va, ta, sig.return_type) + b = context.cast(builder, vb, tb, sig.return_type) + with cgutils.if_zero(builder, b): + context.error_model.fp_zero_division(builder, ("division by zero",)) + res = builder.fdiv(a, b) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower_builtin(operator.mod, types.Integer, types.Integer) +# @lower_builtin(operator.imod, types.Integer, types.Integer) +def int_rem_impl(context, builder, sig, args): + quot, rem = _int_divmod_impl(context, builder, sig, args, + "integer modulo by zero") + return builder.load(rem) + + +def _get_power_zerodiv_return(context, return_type): + if (isinstance(return_type, types.Integer) + and not context.error_model.raise_on_fp_zero_division): + # If not raising, return 0x8000... when computing 0 ** + return -1 << (return_type.bitwidth - 1) + else: + return False + + +def int_power_impl(context, builder, sig, args): + """ + a ^ b, where a is an integer or real, and b an integer + """ + is_integer = isinstance(sig.args[0], types.Integer) + tp = sig.return_type + zerodiv_return = _get_power_zerodiv_return(context, tp) + + def int_power(a, b): + # Ensure computations are done with a large enough width + r = tp(1) + a = tp(a) + if b < 0: + invert = True + exp = -b + if exp < 0: + raise OverflowError + if is_integer: + if a == 0: + if zerodiv_return: + return zerodiv_return + else: + raise ZeroDivisionError("0 cannot be raised to a negative power") + if a != 1 and a != -1: + return 0 + else: + invert = False + exp = b + if exp > 0x10000: + # Optimization cutoff: fallback on the generic algorithm + return math.pow(a, float(b)) + while exp != 0: + if exp & 1: + r *= a + exp >>= 1 + a *= a + + return 1.0 / r if invert else r + + res = context.compile_internal(builder, int_power, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# @lower_builtin(operator.pow, types.Integer, types.IntegerLiteral) +# @lower_builtin(operator.ipow, types.Integer, types.IntegerLiteral) +# @lower_builtin(operator.pow, types.Float, types.IntegerLiteral) +# @lower_builtin(operator.ipow, types.Float, types.IntegerLiteral) +def static_power_impl(context, builder, sig, args): + """ + a ^ b, where a is an integer or real, and b a constant integer + """ + exp = sig.args[1].value + if not isinstance(exp, numbers.Integral): + raise NotImplementedError + if abs(exp) > 0x10000: + # Optimization cutoff: fallback on the generic algorithm above + raise NotImplementedError + invert = exp < 0 + exp = abs(exp) + + tp = sig.return_type + is_integer = isinstance(tp, types.Integer) + zerodiv_return = _get_power_zerodiv_return(context, tp) + + val = context.cast(builder, args[0], sig.args[0], tp) + lty = val.type + + def mul(a, b): + if is_integer: + return builder.mul(a, b) + else: + return builder.fmul(a, b) + + # Unroll the exponentiation loop + res = lty(1) + a = val + while exp != 0: + if exp & 1: + res = mul(res, val) + exp >>= 1 + val = mul(val, val) + + if invert: + # If the exponent was negative, fix the result by inverting it + if is_integer: + # Integer inversion + def invert_impl(a): + if a == 0: + if zerodiv_return: + return zerodiv_return + else: + raise ZeroDivisionError("0 cannot be raised to a negative power") + if a != 1 and a != -1: + return 0 + else: + return a + + else: + # Real inversion + def invert_impl(a): + return 1.0 / a + + res = context.compile_internal(builder, invert_impl, + typing.signature(tp, tp), (res,)) + + return res + + +def int_slt_impl(context, builder, sig, args): + res = builder.icmp_signed('<', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_sle_impl(context, builder, sig, args): + res = builder.icmp_signed('<=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_sgt_impl(context, builder, sig, args): + res = builder.icmp_signed('>', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_sge_impl(context, builder, sig, args): + res = builder.icmp_signed('>=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_ult_impl(context, builder, sig, args): + res = builder.icmp_unsigned('<', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_ule_impl(context, builder, sig, args): + res = builder.icmp_unsigned('<=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_ugt_impl(context, builder, sig, args): + res = builder.icmp_unsigned('>', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_uge_impl(context, builder, sig, args): + res = builder.icmp_unsigned('>=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_eq_impl(context, builder, sig, args): + res = builder.icmp_unsigned('==', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_ne_impl(context, builder, sig, args): + res = builder.icmp_unsigned('!=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_signed_unsigned_cmp(op): + def impl(context, builder, sig, args): + (left, right) = args + # This code is translated from the NumPy source. + # What we're going to do is divide the range of a signed value at zero. + # If the signed value is less than zero, then we can treat zero as the + # unsigned value since the unsigned value is necessarily zero or larger + # and any signed comparison between a negative value and zero/infinity + # will yield the same result. If the signed value is greater than or + # equal to zero, then we can safely cast it to an unsigned value and do + # the expected unsigned-unsigned comparison operation. + # Original: https://github.com/numpy/numpy/pull/23713 + cmp_zero = builder.icmp_signed('<', left, Constant(left.type, 0)) + lt_zero = builder.icmp_signed(op, left, Constant(left.type, 0)) + ge_zero = builder.icmp_unsigned(op, left, right) + res = builder.select(cmp_zero, lt_zero, ge_zero) + return impl_ret_untracked(context, builder, sig.return_type, res) + return impl + + +def int_unsigned_signed_cmp(op): + def impl(context, builder, sig, args): + (left, right) = args + # See the function `int_signed_unsigned_cmp` for implementation notes. + cmp_zero = builder.icmp_signed('<', right, Constant(right.type, 0)) + lt_zero = builder.icmp_signed(op, Constant(right.type, 0), right) + ge_zero = builder.icmp_unsigned(op, left, right) + res = builder.select(cmp_zero, lt_zero, ge_zero) + return impl_ret_untracked(context, builder, sig.return_type, res) + return impl + + +def int_abs_impl(context, builder, sig, args): + [x] = args + ZERO = Constant(x.type, None) + ltz = builder.icmp_signed('<', x, ZERO) + negated = builder.neg(x) + res = builder.select(ltz, negated, x) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def identity_impl(context, builder, sig, args): + [x] = args + return impl_ret_untracked(context, builder, sig.return_type, x) + + +def uint_abs_impl(context, builder, sig, args): + [x] = args + return impl_ret_untracked(context, builder, sig.return_type, x) + + +def int_shl_impl(context, builder, sig, args): + [valty, amtty] = sig.args + [val, amt] = args + val = context.cast(builder, val, valty, sig.return_type) + amt = context.cast(builder, amt, amtty, sig.return_type) + res = builder.shl(val, amt) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_shr_impl(context, builder, sig, args): + [valty, amtty] = sig.args + [val, amt] = args + val = context.cast(builder, val, valty, sig.return_type) + amt = context.cast(builder, amt, amtty, sig.return_type) + if sig.return_type.signed: + res = builder.ashr(val, amt) + else: + res = builder.lshr(val, amt) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_and_impl(context, builder, sig, args): + [at, bt] = sig.args + [av, bv] = args + cav = context.cast(builder, av, at, sig.return_type) + cbc = context.cast(builder, bv, bt, sig.return_type) + res = builder.and_(cav, cbc) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_or_impl(context, builder, sig, args): + [at, bt] = sig.args + [av, bv] = args + cav = context.cast(builder, av, at, sig.return_type) + cbc = context.cast(builder, bv, bt, sig.return_type) + res = builder.or_(cav, cbc) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_xor_impl(context, builder, sig, args): + [at, bt] = sig.args + [av, bv] = args + cav = context.cast(builder, av, at, sig.return_type) + cbc = context.cast(builder, bv, bt, sig.return_type) + res = builder.xor(cav, cbc) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_negate_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + # Negate before upcasting, for unsigned numbers + res = builder.neg(val) + res = context.cast(builder, res, typ, sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_positive_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + res = context.cast(builder, val, typ, sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_invert_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + # Invert before upcasting, for unsigned numbers + res = builder.xor(val, Constant(val.type, int('1' * val.type.width, 2))) + res = context.cast(builder, res, typ, sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def int_sign_impl(context, builder, sig, args): + """ + np.sign(int) + """ + [x] = args + POS = Constant(x.type, 1) + NEG = Constant(x.type, -1) + ZERO = Constant(x.type, 0) + + cmp_zero = builder.icmp_unsigned('==', x, ZERO) + cmp_pos = builder.icmp_signed('>', x, ZERO) + + presult = cgutils.alloca_once(builder, x.type) + + bb_zero = builder.append_basic_block(".zero") + bb_postest = builder.append_basic_block(".postest") + bb_pos = builder.append_basic_block(".pos") + bb_neg = builder.append_basic_block(".neg") + bb_exit = builder.append_basic_block(".exit") + + builder.cbranch(cmp_zero, bb_zero, bb_postest) + + with builder.goto_block(bb_zero): + builder.store(ZERO, presult) + builder.branch(bb_exit) + + with builder.goto_block(bb_postest): + builder.cbranch(cmp_pos, bb_pos, bb_neg) + + with builder.goto_block(bb_pos): + builder.store(POS, presult) + builder.branch(bb_exit) + + with builder.goto_block(bb_neg): + builder.store(NEG, presult) + builder.branch(bb_exit) + + builder.position_at_end(bb_exit) + res = builder.load(presult) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def bool_negate_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + res = context.cast(builder, val, typ, sig.return_type) + res = builder.neg(res) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def bool_unary_positive_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + res = context.cast(builder, val, typ, sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# lower_builtin(operator.eq, types.boolean, types.boolean)(int_eq_impl) +# lower_builtin(operator.ne, types.boolean, types.boolean)(int_ne_impl) +# lower_builtin(operator.lt, types.boolean, types.boolean)(int_ult_impl) +# lower_builtin(operator.le, types.boolean, types.boolean)(int_ule_impl) +# lower_builtin(operator.gt, types.boolean, types.boolean)(int_ugt_impl) +# lower_builtin(operator.ge, types.boolean, types.boolean)(int_uge_impl) +# lower_builtin(operator.neg, types.boolean)(bool_negate_impl) +# lower_builtin(operator.pos, types.boolean)(bool_unary_positive_impl) + + +# def _implement_integer_operators(): +# ty = types.Integer + +# lower_builtin(operator.add, ty, ty)(int_add_impl) +# lower_builtin(operator.iadd, ty, ty)(int_add_impl) +# lower_builtin(operator.sub, ty, ty)(int_sub_impl) +# lower_builtin(operator.isub, ty, ty)(int_sub_impl) +# lower_builtin(operator.mul, ty, ty)(int_mul_impl) +# lower_builtin(operator.imul, ty, ty)(int_mul_impl) +# lower_builtin(operator.eq, ty, ty)(int_eq_impl) +# lower_builtin(operator.ne, ty, ty)(int_ne_impl) + +# lower_builtin(operator.lshift, ty, ty)(int_shl_impl) +# lower_builtin(operator.ilshift, ty, ty)(int_shl_impl) +# lower_builtin(operator.rshift, ty, ty)(int_shr_impl) +# lower_builtin(operator.irshift, ty, ty)(int_shr_impl) + +# lower_builtin(operator.neg, ty)(int_negate_impl) +# lower_builtin(operator.pos, ty)(int_positive_impl) + +# lower_builtin(operator.pow, ty, ty)(int_power_impl) +# lower_builtin(operator.ipow, ty, ty)(int_power_impl) +# lower_builtin(pow, ty, ty)(int_power_impl) + +# for ty in types.unsigned_domain: +# lower_builtin(operator.lt, ty, ty)(int_ult_impl) +# lower_builtin(operator.le, ty, ty)(int_ule_impl) +# lower_builtin(operator.gt, ty, ty)(int_ugt_impl) +# lower_builtin(operator.ge, ty, ty)(int_uge_impl) +# lower_builtin(operator.pow, types.Float, ty)(int_power_impl) +# lower_builtin(operator.ipow, types.Float, ty)(int_power_impl) +# lower_builtin(pow, types.Float, ty)(int_power_impl) +# lower_builtin(abs, ty)(uint_abs_impl) + +# lower_builtin(operator.lt, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl) +# lower_builtin(operator.gt, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl) +# lower_builtin(operator.le, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl) +# lower_builtin(operator.ge, types.IntegerLiteral, types.IntegerLiteral)(int_slt_impl) +# for ty in types.signed_domain: +# lower_builtin(operator.lt, ty, ty)(int_slt_impl) +# lower_builtin(operator.le, ty, ty)(int_sle_impl) +# lower_builtin(operator.gt, ty, ty)(int_sgt_impl) +# lower_builtin(operator.ge, ty, ty)(int_sge_impl) +# lower_builtin(operator.pow, types.Float, ty)(int_power_impl) +# lower_builtin(operator.ipow, types.Float, ty)(int_power_impl) +# lower_builtin(pow, types.Float, ty)(int_power_impl) +# lower_builtin(abs, ty)(int_abs_impl) + +# def _implement_bitwise_operators(): +# for ty in (types.Boolean, types.Integer): +# lower_builtin(operator.and_, ty, ty)(int_and_impl) +# lower_builtin(operator.iand, ty, ty)(int_and_impl) +# lower_builtin(operator.or_, ty, ty)(int_or_impl) +# lower_builtin(operator.ior, ty, ty)(int_or_impl) +# lower_builtin(operator.xor, ty, ty)(int_xor_impl) +# lower_builtin(operator.ixor, ty, ty)(int_xor_impl) + +# lower_builtin(operator.invert, ty)(int_invert_impl) + +# _implement_integer_operators() + +# _implement_bitwise_operators() + + +def real_add_impl(context, builder, sig, args): + res = builder.fadd(*args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_sub_impl(context, builder, sig, args): + res = builder.fsub(*args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_mul_impl(context, builder, sig, args): + res = builder.fmul(*args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_div_impl(context, builder, sig, args): + with cgutils.if_zero(builder, args[1]): + context.error_model.fp_zero_division(builder, ("division by zero",)) + res = builder.fdiv(*args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_divmod(context, builder, x, y): + assert x.type == y.type + floatty = x.type + + module = builder.module + fname = context.mangler(".numba.python.rem", [x.type]) + fnty = ir.FunctionType(floatty, (floatty, floatty, ir.PointerType(floatty))) + fn = cgutils.get_or_insert_function(module, fnty, fname) + + if fn.is_declaration: + fn.linkage = 'linkonce_odr' + fnbuilder = ir.IRBuilder(fn.append_basic_block('entry')) + fx, fy, pmod = fn.args + div, mod = real_divmod_func_body(context, fnbuilder, fx, fy) + fnbuilder.store(mod, pmod) + fnbuilder.ret(div) + + pmod = cgutils.alloca_once(builder, floatty) + quotient = builder.call(fn, (x, y, pmod)) + return quotient, builder.load(pmod) + + +def real_divmod_func_body(context, builder, vx, wx): + # Reference Objects/floatobject.c + # + # float_divmod(PyObject *v, PyObject *w) + # { + # double vx, wx; + # double div, mod, floordiv; + # CONVERT_TO_DOUBLE(v, vx); + # CONVERT_TO_DOUBLE(w, wx); + # mod = fmod(vx, wx); + # /* fmod is typically exact, so vx-mod is *mathematically* an + # exact multiple of wx. But this is fp arithmetic, and fp + # vx - mod is an approximation; the result is that div may + # not be an exact integral value after the division, although + # it will always be very close to one. + # */ + # div = (vx - mod) / wx; + # if (mod) { + # /* ensure the remainder has the same sign as the denominator */ + # if ((wx < 0) != (mod < 0)) { + # mod += wx; + # div -= 1.0; + # } + # } + # else { + # /* the remainder is zero, and in the presence of signed zeroes + # fmod returns different results across platforms; ensure + # it has the same sign as the denominator; we'd like to do + # "mod = wx * 0.0", but that may get optimized away */ + # mod *= mod; /* hide "mod = +0" from optimizer */ + # if (wx < 0.0) + # mod = -mod; + # } + # /* snap quotient to nearest integral value */ + # if (div) { + # floordiv = floor(div); + # if (div - floordiv > 0.5) + # floordiv += 1.0; + # } + # else { + # /* div is zero - get the same sign as the true quotient */ + # div *= div; /* hide "div = +0" from optimizers */ + # floordiv = div * vx / wx; /* zero w/ sign of vx/wx */ + # } + # return Py_BuildValue("(dd)", floordiv, mod); + # } + pmod = cgutils.alloca_once(builder, vx.type) + pdiv = cgutils.alloca_once(builder, vx.type) + pfloordiv = cgutils.alloca_once(builder, vx.type) + + mod = builder.frem(vx, wx) + div = builder.fdiv(builder.fsub(vx, mod), wx) + + builder.store(mod, pmod) + builder.store(div, pdiv) + + # Note the use of negative zero for proper negating with `ZERO - x` + ZERO = vx.type(0.0) + NZERO = vx.type(-0.0) + ONE = vx.type(1.0) + mod_istrue = builder.fcmp_unordered('!=', mod, ZERO) + wx_ltz = builder.fcmp_ordered('<', wx, ZERO) + mod_ltz = builder.fcmp_ordered('<', mod, ZERO) + + with builder.if_else(mod_istrue, likely=True) as (if_nonzero_mod, if_zero_mod): + with if_nonzero_mod: + # `mod` is non-zero or NaN + # Ensure the remainder has the same sign as the denominator + wx_ltz_ne_mod_ltz = builder.icmp_unsigned('!=', wx_ltz, mod_ltz) + + with builder.if_then(wx_ltz_ne_mod_ltz): + builder.store(builder.fsub(div, ONE), pdiv) + builder.store(builder.fadd(mod, wx), pmod) + + with if_zero_mod: + # `mod` is zero, select the proper sign depending on + # the denominator's sign + mod = builder.select(wx_ltz, NZERO, ZERO) + builder.store(mod, pmod) + + del mod, div + + div = builder.load(pdiv) + div_istrue = builder.fcmp_ordered('!=', div, ZERO) + + with builder.if_then(div_istrue): + realtypemap = {'float': types.float32, + 'double': types.float64} + realtype = realtypemap[str(wx.type)] + floorfn = context.get_function(math.floor, + typing.signature(realtype, realtype)) + floordiv = floorfn(builder, [div]) + floordivdiff = builder.fsub(div, floordiv) + floordivincr = builder.fadd(floordiv, ONE) + HALF = Constant(wx.type, 0.5) + pred = builder.fcmp_ordered('>', floordivdiff, HALF) + floordiv = builder.select(pred, floordivincr, floordiv) + builder.store(floordiv, pfloordiv) + + with cgutils.ifnot(builder, div_istrue): + div = builder.fmul(div, div) + builder.store(div, pdiv) + floordiv = builder.fdiv(builder.fmul(div, vx), wx) + builder.store(floordiv, pfloordiv) + + return builder.load(pfloordiv), builder.load(pmod) + + +# @lower_builtin(divmod, types.Float, types.Float) +def real_divmod_impl(context, builder, sig, args, loc=None): + x, y = args + quot = cgutils.alloca_once(builder, x.type, name="quot") + rem = cgutils.alloca_once(builder, x.type, name="rem") + + with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False + ) as (if_zero, if_non_zero): + with if_zero: + if not context.error_model.fp_zero_division( + builder, ("modulo by zero",), loc): + # No exception raised => compute the nan result, + # and set the FP exception word for Numpy warnings. + q = builder.fdiv(x, y) + r = builder.frem(x, y) + builder.store(q, quot) + builder.store(r, rem) + with if_non_zero: + q, r = real_divmod(context, builder, x, y) + builder.store(q, quot) + builder.store(r, rem) + + return cgutils.pack_array(builder, + (builder.load(quot), builder.load(rem))) + + +def real_mod_impl(context, builder, sig, args, loc=None): + x, y = args + res = cgutils.alloca_once(builder, x.type) + with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False + ) as (if_zero, if_non_zero): + with if_zero: + if not context.error_model.fp_zero_division( + builder, ("modulo by zero",), loc): + # No exception raised => compute the nan result, + # and set the FP exception word for Numpy warnings. + rem = builder.frem(x, y) + builder.store(rem, res) + with if_non_zero: + _, rem = real_divmod(context, builder, x, y) + builder.store(rem, res) + return impl_ret_untracked(context, builder, sig.return_type, + builder.load(res)) + + +def real_floordiv_impl(context, builder, sig, args, loc=None): + x, y = args + res = cgutils.alloca_once(builder, x.type) + with builder.if_else(cgutils.is_scalar_zero(builder, y), likely=False + ) as (if_zero, if_non_zero): + with if_zero: + if not context.error_model.fp_zero_division( + builder, ("division by zero",), loc): + # No exception raised => compute the +/-inf or nan result, + # and set the FP exception word for Numpy warnings. + quot = builder.fdiv(x, y) + builder.store(quot, res) + with if_non_zero: + quot, _ = real_divmod(context, builder, x, y) + builder.store(quot, res) + return impl_ret_untracked(context, builder, sig.return_type, + builder.load(res)) + + +def real_power_impl(context, builder, sig, args): + x, y = args + module = builder.module + if context.implement_powi_as_math_call: + imp = context.get_function(math.pow, sig) + res = imp(builder, args) + else: + fn = module.declare_intrinsic('llvm.pow', [y.type]) + res = builder.call(fn, (x, y)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_lt_impl(context, builder, sig, args): + res = builder.fcmp_ordered('<', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_le_impl(context, builder, sig, args): + res = builder.fcmp_ordered('<=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_gt_impl(context, builder, sig, args): + res = builder.fcmp_ordered('>', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_ge_impl(context, builder, sig, args): + res = builder.fcmp_ordered('>=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_eq_impl(context, builder, sig, args): + res = builder.fcmp_ordered('==', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_ne_impl(context, builder, sig, args): + res = builder.fcmp_unordered('!=', *args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_abs_impl(context, builder, sig, args): + [ty] = sig.args + sig = typing.signature(ty, ty) + impl = context.get_function(math.fabs, sig) + return impl(builder, args) + + +def real_negate_impl(context, builder, sig, args): + from numba.cpython import mathimpl + res = mathimpl.negate_real(builder, args[0]) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_positive_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + res = context.cast(builder, val, typ, sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def real_sign_impl(context, builder, sig, args): + """ + np.sign(float) + """ + [x] = args + POS = Constant(x.type, 1) + NEG = Constant(x.type, -1) + ZERO = Constant(x.type, 0) + + presult = cgutils.alloca_once(builder, x.type) + + is_pos = builder.fcmp_ordered('>', x, ZERO) + is_neg = builder.fcmp_ordered('<', x, ZERO) + + with builder.if_else(is_pos) as (gt_zero, not_gt_zero): + with gt_zero: + builder.store(POS, presult) + with not_gt_zero: + with builder.if_else(is_neg) as (lt_zero, not_lt_zero): + with lt_zero: + builder.store(NEG, presult) + with not_lt_zero: + # For both NaN and 0, the result of sign() is simply + # the input value. + builder.store(x, presult) + + res = builder.load(presult) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ty = types.Float + +# lower_builtin(operator.add, ty, ty)(real_add_impl) +# lower_builtin(operator.iadd, ty, ty)(real_add_impl) +# lower_builtin(operator.sub, ty, ty)(real_sub_impl) +# lower_builtin(operator.isub, ty, ty)(real_sub_impl) +# lower_builtin(operator.mul, ty, ty)(real_mul_impl) +# lower_builtin(operator.imul, ty, ty)(real_mul_impl) +# lower_builtin(operator.floordiv, ty, ty)(real_floordiv_impl) +# lower_builtin(operator.ifloordiv, ty, ty)(real_floordiv_impl) +# lower_builtin(operator.truediv, ty, ty)(real_div_impl) +# lower_builtin(operator.itruediv, ty, ty)(real_div_impl) +# lower_builtin(operator.mod, ty, ty)(real_mod_impl) +# lower_builtin(operator.imod, ty, ty)(real_mod_impl) +# lower_builtin(operator.pow, ty, ty)(real_power_impl) +# lower_builtin(operator.ipow, ty, ty)(real_power_impl) +# lower_builtin(pow, ty, ty)(real_power_impl) + +# lower_builtin(operator.eq, ty, ty)(real_eq_impl) +# lower_builtin(operator.ne, ty, ty)(real_ne_impl) +# lower_builtin(operator.lt, ty, ty)(real_lt_impl) +# lower_builtin(operator.le, ty, ty)(real_le_impl) +# lower_builtin(operator.gt, ty, ty)(real_gt_impl) +# lower_builtin(operator.ge, ty, ty)(real_ge_impl) + +# lower_builtin(abs, ty)(real_abs_impl) + +# lower_builtin(operator.neg, ty)(real_negate_impl) +# lower_builtin(operator.pos, ty)(real_positive_impl) + +# del ty + + +# @lower_getattr(types.Complex, "real") +def complex_real_impl(context, builder, typ, value): + cplx = context.make_complex(builder, typ, value=value) + res = cplx.real + return impl_ret_untracked(context, builder, typ, res) + +# @lower_getattr(types.Complex, "imag") +def complex_imag_impl(context, builder, typ, value): + cplx = context.make_complex(builder, typ, value=value) + res = cplx.imag + return impl_ret_untracked(context, builder, typ, res) + +# @lower_builtin("complex.conjugate", types.Complex) +def complex_conjugate_impl(context, builder, sig, args): + from numba.cpython import mathimpl + z = context.make_complex(builder, sig.args[0], args[0]) + z.imag = mathimpl.negate_real(builder, z.imag) + res = z._getvalue() + return impl_ret_untracked(context, builder, sig.return_type, res) + +def real_real_impl(context, builder, typ, value): + return impl_ret_untracked(context, builder, typ, value) + +def real_imag_impl(context, builder, typ, value): + res = cgutils.get_null_value(value.type) + return impl_ret_untracked(context, builder, typ, res) + +def real_conjugate_impl(context, builder, sig, args): + return impl_ret_untracked(context, builder, sig.return_type, args[0]) + +# for cls in (types.Float, types.Integer): +# lower_getattr(cls, "real")(real_real_impl) +# lower_getattr(cls, "imag")(real_imag_impl) +# lower_builtin("complex.conjugate", cls)(real_conjugate_impl) + + +# @lower_builtin(operator.pow, types.Complex, types.Complex) +# @lower_builtin(operator.ipow, types.Complex, types.Complex) +# @lower_builtin(pow, types.Complex, types.Complex) +def complex_power_impl(context, builder, sig, args): + [ca, cb] = args + ty = sig.args[0] + fty = ty.underlying_float + a = context.make_helper(builder, ty, value=ca) + b = context.make_helper(builder, ty, value=cb) + c = context.make_helper(builder, ty) + module = builder.module + pa = a._getpointer() + pb = b._getpointer() + pc = c._getpointer() + + # Optimize for square because cpow loses a lot of precision + TWO = context.get_constant(fty, 2) + ZERO = context.get_constant(fty, 0) + + b_real_is_two = builder.fcmp_ordered('==', b.real, TWO) + b_imag_is_zero = builder.fcmp_ordered('==', b.imag, ZERO) + b_is_two = builder.and_(b_real_is_two, b_imag_is_zero) + + with builder.if_else(b_is_two) as (then, otherwise): + with then: + # Lower as multiplication + res = complex_mul_impl(context, builder, sig, (ca, ca)) + cres = context.make_helper(builder, ty, value=res) + c.real = cres.real + c.imag = cres.imag + + with otherwise: + # Lower with call to external function + func_name = { + types.complex64: "numba_cpowf", + types.complex128: "numba_cpow", + }[ty] + fnty = ir.FunctionType(ir.VoidType(), [pa.type] * 3) + cpow = cgutils.get_or_insert_function(module, fnty, func_name) + builder.call(cpow, (pa, pb, pc)) + + res = builder.load(pc) + return impl_ret_untracked(context, builder, sig.return_type, res) + +def complex_add_impl(context, builder, sig, args): + [cx, cy] = args + ty = sig.args[0] + x = context.make_complex(builder, ty, value=cx) + y = context.make_complex(builder, ty, value=cy) + z = context.make_complex(builder, ty) + a = x.real + b = x.imag + c = y.real + d = y.imag + z.real = builder.fadd(a, c) + z.imag = builder.fadd(b, d) + res = z._getvalue() + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def complex_sub_impl(context, builder, sig, args): + [cx, cy] = args + ty = sig.args[0] + x = context.make_complex(builder, ty, value=cx) + y = context.make_complex(builder, ty, value=cy) + z = context.make_complex(builder, ty) + a = x.real + b = x.imag + c = y.real + d = y.imag + z.real = builder.fsub(a, c) + z.imag = builder.fsub(b, d) + res = z._getvalue() + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def complex_mul_impl(context, builder, sig, args): + """ + (a+bi)(c+di)=(ac-bd)+i(ad+bc) + """ + [cx, cy] = args + ty = sig.args[0] + x = context.make_complex(builder, ty, value=cx) + y = context.make_complex(builder, ty, value=cy) + z = context.make_complex(builder, ty) + a = x.real + b = x.imag + c = y.real + d = y.imag + ac = builder.fmul(a, c) + bd = builder.fmul(b, d) + ad = builder.fmul(a, d) + bc = builder.fmul(b, c) + z.real = builder.fsub(ac, bd) + z.imag = builder.fadd(ad, bc) + res = z._getvalue() + return impl_ret_untracked(context, builder, sig.return_type, res) + + +NAN = float('nan') + +def complex_div_impl(context, builder, sig, args): + def complex_div(a, b): + # This is CPython's algorithm (in _Py_c_quot()). + areal = a.real + aimag = a.imag + breal = b.real + bimag = b.imag + if not breal and not bimag: + raise ZeroDivisionError("complex division by zero") + if abs(breal) >= abs(bimag): + # Divide tops and bottom by b.real + if not breal: + return complex(NAN, NAN) + ratio = bimag / breal + denom = breal + bimag * ratio + return complex( + (areal + aimag * ratio) / denom, + (aimag - areal * ratio) / denom) + else: + # Divide tops and bottom by b.imag + if not bimag: + return complex(NAN, NAN) + ratio = breal / bimag + denom = breal * ratio + bimag + return complex( + (a.real * ratio + a.imag) / denom, + (a.imag * ratio - a.real) / denom) + + res = context.compile_internal(builder, complex_div, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def complex_negate_impl(context, builder, sig, args): + from numba.cpython import mathimpl + [typ] = sig.args + [val] = args + cmplx = context.make_complex(builder, typ, value=val) + res = context.make_complex(builder, typ) + res.real = mathimpl.negate_real(builder, cmplx.real) + res.imag = mathimpl.negate_real(builder, cmplx.imag) + res = res._getvalue() + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def complex_positive_impl(context, builder, sig, args): + [val] = args + return impl_ret_untracked(context, builder, sig.return_type, val) + + +def complex_eq_impl(context, builder, sig, args): + [cx, cy] = args + typ = sig.args[0] + x = context.make_complex(builder, typ, value=cx) + y = context.make_complex(builder, typ, value=cy) + + reals_are_eq = builder.fcmp_ordered('==', x.real, y.real) + imags_are_eq = builder.fcmp_ordered('==', x.imag, y.imag) + res = builder.and_(reals_are_eq, imags_are_eq) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def complex_ne_impl(context, builder, sig, args): + [cx, cy] = args + typ = sig.args[0] + x = context.make_complex(builder, typ, value=cx) + y = context.make_complex(builder, typ, value=cy) + + reals_are_ne = builder.fcmp_unordered('!=', x.real, y.real) + imags_are_ne = builder.fcmp_unordered('!=', x.imag, y.imag) + res = builder.or_(reals_are_ne, imags_are_ne) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def complex_abs_impl(context, builder, sig, args): + """ + abs(z) := hypot(z.real, z.imag) + """ + def complex_abs(z): + return math.hypot(z.real, z.imag) + + res = context.compile_internal(builder, complex_abs, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +# ty = types.Complex + +# lower_builtin(operator.add, ty, ty)(complex_add_impl) +# lower_builtin(operator.iadd, ty, ty)(complex_add_impl) +# lower_builtin(operator.sub, ty, ty)(complex_sub_impl) +# lower_builtin(operator.isub, ty, ty)(complex_sub_impl) +# lower_builtin(operator.mul, ty, ty)(complex_mul_impl) +# lower_builtin(operator.imul, ty, ty)(complex_mul_impl) +# lower_builtin(operator.truediv, ty, ty)(complex_div_impl) +# lower_builtin(operator.itruediv, ty, ty)(complex_div_impl) +# lower_builtin(operator.neg, ty)(complex_negate_impl) +# lower_builtin(operator.pos, ty)(complex_positive_impl) +# # Complex modulo is deprecated in python3 + +# lower_builtin(operator.eq, ty, ty)(complex_eq_impl) +# lower_builtin(operator.ne, ty, ty)(complex_ne_impl) + +# lower_builtin(abs, ty)(complex_abs_impl) + +# del ty + + +# @lower_builtin("number.item", types.Boolean) +# @lower_builtin("number.item", types.Number) +def number_item_impl(context, builder, sig, args): + """ + The no-op .item() method on booleans and numbers. + """ + return args[0] + + +#------------------------------------------------------------------------------ + + +def number_not_impl(context, builder, sig, args): + [typ] = sig.args + [val] = args + istrue = context.cast(builder, val, typ, sig.return_type) + res = builder.not_(istrue) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# @lower_builtin(bool, types.Boolean) +def bool_as_bool(context, builder, sig, args): + [val] = args + return val + +# @lower_builtin(bool, types.Integer) +def int_as_bool(context, builder, sig, args): + [val] = args + return builder.icmp_unsigned('!=', val, Constant(val.type, 0)) + +# @lower_builtin(bool, types.Float) +def float_as_bool(context, builder, sig, args): + [val] = args + return builder.fcmp_unordered('!=', val, Constant(val.type, 0.0)) + +# @lower_builtin(bool, types.Complex) +def complex_as_bool(context, builder, sig, args): + [typ] = sig.args + [val] = args + cmplx = context.make_complex(builder, typ, val) + real, imag = cmplx.real, cmplx.imag + zero = Constant(real.type, 0.0) + real_istrue = builder.fcmp_unordered('!=', real, zero) + imag_istrue = builder.fcmp_unordered('!=', imag, zero) + return builder.or_(real_istrue, imag_istrue) + + +# for ty in (types.Integer, types.Float, types.Complex): +# lower_builtin(operator.not_, ty)(number_not_impl) + +# lower_builtin(operator.not_, types.boolean)(number_not_impl) + + +#------------------------------------------------------------------------------ +# Hashing numbers, see hashing.py + +#------------------------------------------------------------------------------- +# Implicit casts between numerics + +# @lower_cast(types.IntegerLiteral, types.Integer) +# @lower_cast(types.IntegerLiteral, types.Float) +# @lower_cast(types.IntegerLiteral, types.Complex) +def literal_int_to_number(context, builder, fromty, toty, val): + lit = context.get_constant_generic( + builder, + fromty.literal_type, + fromty.literal_value, + ) + return context.cast(builder, lit, fromty.literal_type, toty) + + +# @lower_cast(types.Integer, types.Integer) +def integer_to_integer(context, builder, fromty, toty, val): + if toty.bitwidth == fromty.bitwidth: + # Just a change of signedness + return val + elif toty.bitwidth < fromty.bitwidth: + # Downcast + return builder.trunc(val, context.get_value_type(toty)) + elif fromty.signed: + # Signed upcast + return builder.sext(val, context.get_value_type(toty)) + else: + # Unsigned upcast + return builder.zext(val, context.get_value_type(toty)) + +# @lower_cast(types.Integer, types.voidptr) +def integer_to_voidptr(context, builder, fromty, toty, val): + return builder.inttoptr(val, context.get_value_type(toty)) + +# @lower_cast(types.Float, types.Float) +def float_to_float(context, builder, fromty, toty, val): + lty = context.get_value_type(toty) + if fromty.bitwidth < toty.bitwidth: + return builder.fpext(val, lty) + else: + return builder.fptrunc(val, lty) + +# @lower_cast(types.Integer, types.Float) +def integer_to_float(context, builder, fromty, toty, val): + lty = context.get_value_type(toty) + if fromty.signed: + return builder.sitofp(val, lty) + else: + return builder.uitofp(val, lty) + +# @lower_cast(types.Float, types.Integer) +def float_to_integer(context, builder, fromty, toty, val): + lty = context.get_value_type(toty) + if toty.signed: + return builder.fptosi(val, lty) + else: + return builder.fptoui(val, lty) + +# @lower_cast(types.Float, types.Complex) +# @lower_cast(types.Integer, types.Complex) +def non_complex_to_complex(context, builder, fromty, toty, val): + real = context.cast(builder, val, fromty, toty.underlying_float) + imag = context.get_constant(toty.underlying_float, 0) + + cmplx = context.make_complex(builder, toty) + cmplx.real = real + cmplx.imag = imag + return cmplx._getvalue() + +# @lower_cast(types.Complex, types.Complex) +def complex_to_complex(context, builder, fromty, toty, val): + srcty = fromty.underlying_float + dstty = toty.underlying_float + + src = context.make_complex(builder, fromty, value=val) + dst = context.make_complex(builder, toty) + dst.real = context.cast(builder, src.real, srcty, dstty) + dst.imag = context.cast(builder, src.imag, srcty, dstty) + return dst._getvalue() + +# @lower_cast(types.Any, types.Boolean) +def any_to_boolean(context, builder, fromty, toty, val): + return context.is_true(builder, fromty, val) + +# @lower_cast(types.Boolean, types.Number) +def boolean_to_any(context, builder, fromty, toty, val): + # Casting from boolean to anything first casts to int32 + asint = builder.zext(val, ir.IntType(32)) + return context.cast(builder, asint, types.int32, toty) + +# @lower_cast(types.IntegerLiteral, types.Boolean) +# @lower_cast(types.BooleanLiteral, types.Boolean) +def literal_int_to_boolean(context, builder, fromty, toty, val): + lit = context.get_constant_generic( + builder, + fromty.literal_type, + fromty.literal_value, + ) + return context.is_true(builder, fromty.literal_type, lit) + +#------------------------------------------------------------------------------- +# Constants + +# @lower_constant(types.Complex) +def constant_complex(context, builder, ty, pyval): + fty = ty.underlying_float + real = context.get_constant_generic(builder, fty, pyval.real) + imag = context.get_constant_generic(builder, fty, pyval.imag) + return Constant.literal_struct((real, imag)) + +# @lower_constant(types.Integer) +# @lower_constant(types.Float) +# @lower_constant(types.Boolean) +def constant_integer(context, builder, ty, pyval): + # See https://github.com/numba/numba/issues/6979 + # llvmlite ir.IntType specialises the formatting of the constant for a + # cpython bool. A NumPy np.bool_ is not a cpython bool so force it to be one + # so that the constant renders correctly! + if isinstance(pyval, np.bool_): + pyval = bool(pyval) + lty = context.get_value_type(ty) + return lty(pyval) + + +#------------------------------------------------------------------------------- +# View + +def scalar_view(scalar, viewty): + """ Typing for the np scalar 'view' method. """ + if (isinstance(scalar, (types.Float, types.Integer)) + and isinstance(viewty, types.abstract.DTypeSpec)): + if scalar.bitwidth != viewty.dtype.bitwidth: + raise errors.TypingError( + "Changing the dtype of a 0d array is only supported if the " + "itemsize is unchanged") + + def impl(scalar, viewty): + return viewer(scalar, viewty) + return impl + + +# overload_method(types.Float, 'view')(scalar_view) +# overload_method(types.Integer, 'view')(scalar_view) diff --git a/venv/lib/python3.10/site-packages/numba/np/new_arraymath.py b/venv/lib/python3.10/site-packages/numba/np/new_arraymath.py new file mode 100644 index 0000000000000000000000000000000000000000..dac520649afd0d3eb32288a2e5c51a2ca0213a5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/new_arraymath.py @@ -0,0 +1,4983 @@ +""" +Implementation of math operations on Array objects. +""" + + +import math +from collections import namedtuple +import operator + +import llvmlite.ir +import numpy as np + +from numba.core import types, cgutils +from numba.core.extending import overload, overload_method, register_jitable +from numba.np.numpy_support import (as_dtype, type_can_asarray, type_is_scalar, + numpy_version, is_nonelike, + check_is_integer, lt_floats, lt_complex) +from numba.core.imputils import (lower_builtin, impl_ret_borrowed, + impl_ret_new_ref, impl_ret_untracked) +from numba.np.arrayobj import (make_array, load_item, store_item, + _empty_nd_impl) +from numba.np.linalg import ensure_blas + +from numba.core.extending import intrinsic +from numba.core.errors import (RequireLiteralValue, TypingError, + NumbaValueError, NumbaNotImplementedError, + NumbaTypeError) +from numba.cpython.unsafe.tuple import tuple_setitem + + +def _check_blas(): + # Checks if a BLAS is available so e.g. dot will work + try: + ensure_blas() + except ImportError: + return False + return True + + +_HAVE_BLAS = _check_blas() + + +@intrinsic +def _create_tuple_result_shape(tyctx, shape_list, shape_tuple): + """ + This routine converts shape list where the axis dimension has already + been popped to a tuple for indexing of the same size. The original shape + tuple is also required because it contains a length field at compile time + whereas the shape list does not. + """ + + # The new tuple's size is one less than the original tuple since axis + # dimension removed. + nd = len(shape_tuple) - 1 + # The return type of this intrinsic is an int tuple of length nd. + tupty = types.UniTuple(types.intp, nd) + # The function signature for this intrinsic. + function_sig = tupty(shape_list, shape_tuple) + + def codegen(cgctx, builder, signature, args): + lltupty = cgctx.get_value_type(tupty) + # Create an empty int tuple. + tup = cgutils.get_null_value(lltupty) + + # Get the shape list from the args and we don't need shape tuple. + [in_shape, _] = args + + def array_indexer(a, i): + return a[i] + + # loop to fill the tuple + for i in range(nd): + dataidx = cgctx.get_constant(types.intp, i) + # compile and call array_indexer + data = cgctx.compile_internal(builder, array_indexer, + types.intp(shape_list, types.intp), + [in_shape, dataidx]) + tup = builder.insert_value(tup, data, i) + return tup + + return function_sig, codegen + + +@intrinsic +def _gen_index_tuple(tyctx, shape_tuple, value, axis): + """ + Generates a tuple that can be used to index a specific slice from an + array for sum with axis. shape_tuple is the size of the dimensions of + the input array. 'value' is the value to put in the indexing tuple + in the axis dimension and 'axis' is that dimension. For this to work, + axis has to be a const. + """ + if not isinstance(axis, types.Literal): + raise RequireLiteralValue('axis argument must be a constant') + # Get the value of the axis constant. + axis_value = axis.literal_value + # The length of the indexing tuple to be output. + nd = len(shape_tuple) + + # If the axis value is impossible for the given size array then + # just fake it like it was for axis 0. This will stop compile errors + # when it looks like it could be called from array_sum_axis but really + # can't because that routine checks the axis mismatch and raise an + # exception. + if axis_value >= nd: + axis_value = 0 + + # Calculate the type of the indexing tuple. All the non-axis + # dimensions have slice2 type and the axis dimension has int type. + before = axis_value + after = nd - before - 1 + + types_list = [] + types_list += [types.slice2_type] * before + types_list += [types.intp] + types_list += [types.slice2_type] * after + + # Creates the output type of the function. + tupty = types.Tuple(types_list) + # Defines the signature of the intrinsic. + function_sig = tupty(shape_tuple, value, axis) + + def codegen(cgctx, builder, signature, args): + lltupty = cgctx.get_value_type(tupty) + # Create an empty indexing tuple. + tup = cgutils.get_null_value(lltupty) + + # We only need value of the axis dimension here. + # The rest are constants defined above. + [_, value_arg, _] = args + + def create_full_slice(): + return slice(None, None) + + # loop to fill the tuple with slice(None,None) before + # the axis dimension. + + # compile and call create_full_slice + slice_data = cgctx.compile_internal(builder, create_full_slice, + types.slice2_type(), + []) + for i in range(0, axis_value): + tup = builder.insert_value(tup, slice_data, i) + + # Add the axis dimension 'value'. + tup = builder.insert_value(tup, value_arg, axis_value) + + # loop to fill the tuple with slice(None,None) after + # the axis dimension. + for i in range(axis_value + 1, nd): + tup = builder.insert_value(tup, slice_data, i) + return tup + + return function_sig, codegen + + +#---------------------------------------------------------------------------- +# Basic stats and aggregates + +@lower_builtin(np.sum, types.Array) +@lower_builtin("array.sum", types.Array) +def array_sum(context, builder, sig, args): + zero = sig.return_type(0) + + def array_sum_impl(arr): + c = zero + for v in np.nditer(arr): + c += v.item() + return c + + res = context.compile_internal(builder, array_sum_impl, sig, args, + locals=dict(c=sig.return_type)) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@register_jitable +def _array_sum_axis_nop(arr, v): + return arr + + +def gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero): + def inner(arr, axis): + """ + function that performs sums over one specific axis + + The third parameter to gen_index_tuple that generates the indexing + tuples has to be a const so we can't just pass "axis" through since + that isn't const. We can check for specific values and have + different instances that do take consts. Supporting axis summation + only up to the fourth dimension for now. + + typing/arraydecl.py:sum_expand defines the return type for sum with + axis. It is one dimension less than the input array. + """ + ndim = arr.ndim + + if not is_axis_const: + # Catch where axis is negative or greater than 3. + if axis < 0 or axis > 3: + raise ValueError("Numba does not support sum with axis " + "parameter outside the range 0 to 3.") + + # Catch the case where the user misspecifies the axis to be + # more than the number of the array's dimensions. + if axis >= ndim: + raise ValueError("axis is out of bounds for array") + + # Convert the shape of the input array to a list. + ashape = list(arr.shape) + # Get the length of the axis dimension. + axis_len = ashape[axis] + # Remove the axis dimension from the list of dimensional lengths. + ashape.pop(axis) + # Convert this shape list back to a tuple using above intrinsic. + ashape_without_axis = _create_tuple_result_shape(ashape, arr.shape) + # Tuple needed here to create output array with correct size. + result = np.full(ashape_without_axis, zero, type(zero)) + + # Iterate through the axis dimension. + for axis_index in range(axis_len): + if is_axis_const: + # constant specialized version works for any valid axis value + index_tuple_generic = _gen_index_tuple(arr.shape, axis_index, + const_axis_val) + result += arr[index_tuple_generic] + else: + # Generate a tuple used to index the input array. + # The tuple is ":" in all dimensions except the axis + # dimension where it is "axis_index". + if axis == 0: + index_tuple1 = _gen_index_tuple(arr.shape, axis_index, 0) + result += arr[index_tuple1] + elif axis == 1: + index_tuple2 = _gen_index_tuple(arr.shape, axis_index, 1) + result += arr[index_tuple2] + elif axis == 2: + index_tuple3 = _gen_index_tuple(arr.shape, axis_index, 2) + result += arr[index_tuple3] + elif axis == 3: + index_tuple4 = _gen_index_tuple(arr.shape, axis_index, 3) + result += arr[index_tuple4] + return op(result, 0) + return inner + + +@lower_builtin(np.sum, types.Array, types.np_intp, types.DTypeSpec) +@lower_builtin(np.sum, types.Array, types.IntegerLiteral, types.DTypeSpec) +@lower_builtin("array.sum", types.Array, types.np_intp, types.DTypeSpec) +@lower_builtin("array.sum", types.Array, types.IntegerLiteral, types.DTypeSpec) +def array_sum_axis_dtype(context, builder, sig, args): + retty = sig.return_type + zero = getattr(retty, 'dtype', retty)(0) + # if the return is scalar in type then "take" the 0th element of the + # 0d array accumulator as the return value + if getattr(retty, 'ndim', None) is None: + op = np.take + else: + op = _array_sum_axis_nop + [ty_array, ty_axis, ty_dtype] = sig.args + is_axis_const = False + const_axis_val = 0 + if isinstance(ty_axis, types.Literal): + # this special-cases for constant axis + const_axis_val = ty_axis.literal_value + # fix negative axis + if const_axis_val < 0: + const_axis_val = ty_array.ndim + const_axis_val + if const_axis_val < 0 or const_axis_val > ty_array.ndim: + raise ValueError("'axis' entry is out of bounds") + + ty_axis = context.typing_context.resolve_value_type(const_axis_val) + axis_val = context.get_constant(ty_axis, const_axis_val) + # rewrite arguments + args = args[0], axis_val, args[2] + # rewrite sig + sig = sig.replace(args=[ty_array, ty_axis, ty_dtype]) + is_axis_const = True + + gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero) + compiled = register_jitable(gen_impl) + + def array_sum_impl_axis(arr, axis, dtype): + return compiled(arr, axis) + + res = context.compile_internal(builder, array_sum_impl_axis, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +@lower_builtin(np.sum, types.Array, types.DTypeSpec) +@lower_builtin("array.sum", types.Array, types.DTypeSpec) +def array_sum_dtype(context, builder, sig, args): + zero = sig.return_type(0) + + def array_sum_impl(arr, dtype): + c = zero + for v in np.nditer(arr): + c += v.item() + return c + + res = context.compile_internal(builder, array_sum_impl, sig, args, + locals=dict(c=sig.return_type)) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin(np.sum, types.Array, types.np_intp) +@lower_builtin(np.sum, types.Array, types.IntegerLiteral) +@lower_builtin("array.sum", types.Array, types.np_intp) +@lower_builtin("array.sum", types.Array, types.IntegerLiteral) +def array_sum_axis(context, builder, sig, args): + retty = sig.return_type + zero = getattr(retty, 'dtype', retty)(0) + # if the return is scalar in type then "take" the 0th element of the + # 0d array accumulator as the return value + if getattr(retty, 'ndim', None) is None: + op = np.take + else: + op = _array_sum_axis_nop + [ty_array, ty_axis] = sig.args + is_axis_const = False + const_axis_val = 0 + if isinstance(ty_axis, types.Literal): + # this special-cases for constant axis + const_axis_val = ty_axis.literal_value + # fix negative axis + if const_axis_val < 0: + const_axis_val = ty_array.ndim + const_axis_val + if const_axis_val < 0 or const_axis_val > ty_array.ndim: + msg = f"'axis' entry ({const_axis_val}) is out of bounds" + raise NumbaValueError(msg) + + ty_axis = context.typing_context.resolve_value_type(const_axis_val) + axis_val = context.get_constant(ty_axis, const_axis_val) + # rewrite arguments + args = args[0], axis_val + # rewrite sig + sig = sig.replace(args=[ty_array, ty_axis]) + is_axis_const = True + + gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero) + compiled = register_jitable(gen_impl) + + def array_sum_impl_axis(arr, axis): + return compiled(arr, axis) + + res = context.compile_internal(builder, array_sum_impl_axis, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +def get_accumulator(dtype, value): + if dtype.type == np.timedelta64: + acc_init = np.int64(value).view(dtype) + else: + acc_init = dtype.type(value) + return acc_init + + +@overload(np.prod) +@overload_method(types.Array, "prod") +def array_prod(a): + if isinstance(a, types.Array): + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 1) + + def array_prod_impl(a): + c = acc_init + for v in np.nditer(a): + c *= v.item() + return c + + return array_prod_impl + + +@overload(np.cumsum) +@overload_method(types.Array, "cumsum") +def array_cumsum(a): + if isinstance(a, types.Array): + is_integer = a.dtype in types.signed_domain + is_bool = a.dtype == types.bool_ + if (is_integer and a.dtype.bitwidth < types.intp.bitwidth)\ + or is_bool: + dtype = as_dtype(types.intp) + else: + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 0) + + def array_cumsum_impl(a): + out = np.empty(a.size, dtype) + c = acc_init + for idx, v in enumerate(a.flat): + c += v + out[idx] = c + return out + + return array_cumsum_impl + + +@overload(np.cumprod) +@overload_method(types.Array, "cumprod") +def array_cumprod(a): + if isinstance(a, types.Array): + is_integer = a.dtype in types.signed_domain + is_bool = a.dtype == types.bool_ + if (is_integer and a.dtype.bitwidth < types.intp.bitwidth)\ + or is_bool: + dtype = as_dtype(types.intp) + else: + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 1) + + def array_cumprod_impl(a): + out = np.empty(a.size, dtype) + c = acc_init + for idx, v in enumerate(a.flat): + c *= v + out[idx] = c + return out + + return array_cumprod_impl + + +@overload(np.mean) +@overload_method(types.Array, "mean") +def array_mean(a): + if isinstance(a, types.Array): + is_number = a.dtype in types.integer_domain | frozenset([types.bool_]) + if is_number: + dtype = as_dtype(types.float64) + else: + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 0) + + def array_mean_impl(a): + # Can't use the naive `arr.sum() / arr.size`, as it would return + # a wrong result on integer sum overflow. + c = acc_init + for v in np.nditer(a): + c += v.item() + return c / a.size + + return array_mean_impl + + +@overload(np.var) +@overload_method(types.Array, "var") +def array_var(a): + if isinstance(a, types.Array): + def array_var_impl(a): + # Compute the mean + m = a.mean() + + # Compute the sum of square diffs + ssd = 0 + for v in np.nditer(a): + val = (v.item() - m) + ssd += np.real(val * np.conj(val)) + return ssd / a.size + + return array_var_impl + + +@overload(np.std) +@overload_method(types.Array, "std") +def array_std(a): + if isinstance(a, types.Array): + def array_std_impl(a): + return a.var() ** 0.5 + + return array_std_impl + + +@register_jitable +def min_comparator(a, min_val): + return a < min_val + + +@register_jitable +def max_comparator(a, min_val): + return a > min_val + + +@register_jitable +def return_false(a): + return False + + +@overload(np.min) +@overload(np.amin) +@overload_method(types.Array, "min") +def npy_min(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + pre_return_func = np.isnat + comparator = min_comparator + elif isinstance(a.dtype, types.Complex): + pre_return_func = return_false + + def comp_func(a, min_val): + if a.real < min_val.real: + return True + elif a.real == min_val.real: + if a.imag < min_val.imag: + return True + return False + + comparator = register_jitable(comp_func) + elif isinstance(a.dtype, types.Float): + pre_return_func = np.isnan + comparator = min_comparator + else: + pre_return_func = return_false + comparator = min_comparator + + def impl_min(a): + if a.size == 0: + raise ValueError("zero-size array to reduction operation " + "minimum which has no identity") + + it = np.nditer(a) + min_value = next(it).take(0) + if pre_return_func(min_value): + return min_value + + for view in it: + v = view.item() + if pre_return_func(v): + return v + if comparator(v, min_value): + min_value = v + return min_value + + return impl_min + + +@overload(np.max) +@overload(np.amax) +@overload_method(types.Array, "max") +def npy_max(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + pre_return_func = np.isnat + comparator = max_comparator + elif isinstance(a.dtype, types.Complex): + pre_return_func = return_false + + def comp_func(a, max_val): + if a.real > max_val.real: + return True + elif a.real == max_val.real: + if a.imag > max_val.imag: + return True + return False + + comparator = register_jitable(comp_func) + elif isinstance(a.dtype, types.Float): + pre_return_func = np.isnan + comparator = max_comparator + else: + pre_return_func = return_false + comparator = max_comparator + + def impl_max(a): + if a.size == 0: + raise ValueError("zero-size array to reduction operation " + "maximum which has no identity") + + it = np.nditer(a) + max_value = next(it).take(0) + if pre_return_func(max_value): + return max_value + + for view in it: + v = view.item() + if pre_return_func(v): + return v + if comparator(v, max_value): + max_value = v + return max_value + + return impl_max + + +@register_jitable +def array_argmin_impl_datetime(arry): + if arry.size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + it = np.nditer(arry) + min_value = next(it).take(0) + min_idx = 0 + if np.isnat(min_value): + return min_idx + + idx = 1 + for view in it: + v = view.item() + if np.isnat(v): + return idx + if v < min_value: + min_value = v + min_idx = idx + idx += 1 + return min_idx + + +@register_jitable +def array_argmin_impl_float(arry): + if arry.size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + for v in arry.flat: + min_value = v + min_idx = 0 + break + if np.isnan(min_value): + return min_idx + + idx = 0 + for v in arry.flat: + if np.isnan(v): + return idx + if v < min_value: + min_value = v + min_idx = idx + idx += 1 + return min_idx + + +@register_jitable +def array_argmin_impl_generic(arry): + if arry.size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + for v in arry.flat: + min_value = v + min_idx = 0 + break + else: + raise RuntimeError('unreachable') + + idx = 0 + for v in arry.flat: + if v < min_value: + min_value = v + min_idx = idx + idx += 1 + return min_idx + + +@overload(np.argmin) +@overload_method(types.Array, "argmin") +def array_argmin(a, axis=None): + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + flatten_impl = array_argmin_impl_datetime + elif isinstance(a.dtype, types.Float): + flatten_impl = array_argmin_impl_float + else: + flatten_impl = array_argmin_impl_generic + + if is_nonelike(axis): + def array_argmin_impl(a, axis=None): + return flatten_impl(a) + else: + array_argmin_impl = build_argmax_or_argmin_with_axis_impl( + a, axis, flatten_impl + ) + return array_argmin_impl + + +@register_jitable +def array_argmax_impl_datetime(arry): + if arry.size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + it = np.nditer(arry) + max_value = next(it).take(0) + max_idx = 0 + if np.isnat(max_value): + return max_idx + + idx = 1 + for view in it: + v = view.item() + if np.isnat(v): + return idx + if v > max_value: + max_value = v + max_idx = idx + idx += 1 + return max_idx + + +@register_jitable +def array_argmax_impl_float(arry): + if arry.size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + for v in arry.flat: + max_value = v + max_idx = 0 + break + if np.isnan(max_value): + return max_idx + + idx = 0 + for v in arry.flat: + if np.isnan(v): + return idx + if v > max_value: + max_value = v + max_idx = idx + idx += 1 + return max_idx + + +@register_jitable +def array_argmax_impl_generic(arry): + if arry.size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + for v in arry.flat: + max_value = v + max_idx = 0 + break + + idx = 0 + for v in arry.flat: + if v > max_value: + max_value = v + max_idx = idx + idx += 1 + return max_idx + + +def build_argmax_or_argmin_with_axis_impl(a, axis, flatten_impl): + """ + Given a function that implements the logic for handling a flattened + array, return the implementation function. + """ + check_is_integer(axis, "axis") + retty = types.intp + + tuple_buffer = tuple(range(a.ndim)) + + def impl(a, axis=None): + if axis < 0: + axis = a.ndim + axis + + if axis < 0 or axis >= a.ndim: + raise ValueError("axis is out of bounds") + + # Short circuit 1-dimensional arrays: + if a.ndim == 1: + return flatten_impl(a) + + # Make chosen axis the last axis: + tmp = tuple_buffer + for i in range(axis, a.ndim - 1): + tmp = tuple_setitem(tmp, i, i + 1) + transpose_index = tuple_setitem(tmp, a.ndim - 1, axis) + transposed_arr = a.transpose(transpose_index) + + # Flatten along that axis; since we've transposed, we can just get + # batches off the overall flattened array. + m = transposed_arr.shape[-1] + raveled = transposed_arr.ravel() + assert raveled.size == a.size + assert transposed_arr.size % m == 0 + out = np.empty(transposed_arr.size // m, retty) + for i in range(out.size): + out[i] = flatten_impl(raveled[i * m:(i + 1) * m]) + + # Reshape based on axis we didn't flatten over: + return out.reshape(transposed_arr.shape[:-1]) + + return impl + + +@overload(np.argmax) +@overload_method(types.Array, "argmax") +def array_argmax(a, axis=None): + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + flatten_impl = array_argmax_impl_datetime + elif isinstance(a.dtype, types.Float): + flatten_impl = array_argmax_impl_float + else: + flatten_impl = array_argmax_impl_generic + + if is_nonelike(axis): + def array_argmax_impl(a, axis=None): + return flatten_impl(a) + else: + array_argmax_impl = build_argmax_or_argmin_with_axis_impl( + a, axis, flatten_impl + ) + return array_argmax_impl + + +@overload(np.all) +@overload_method(types.Array, "all") +def np_all(a): + def flat_all(a): + for v in np.nditer(a): + if not v.item(): + return False + return True + + return flat_all + + +@register_jitable +def _allclose_scalars(a_v, b_v, rtol=1e-05, atol=1e-08, equal_nan=False): + a_v_isnan = np.isnan(a_v) + b_v_isnan = np.isnan(b_v) + + # only one of the values is NaN and the + # other is not. + if ( (not a_v_isnan and b_v_isnan) or + (a_v_isnan and not b_v_isnan) ): + return False + + # either both of the values are NaN + # or both are numbers + if a_v_isnan and b_v_isnan: + if not equal_nan: + return False + else: + if np.isinf(a_v) or np.isinf(b_v): + return a_v == b_v + + if np.abs(a_v - b_v) > atol + rtol * np.abs(b_v * 1.0): + return False + + return True + + +@overload(np.allclose) +@overload_method(types.Array, "allclose") +def np_allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + + if not type_can_asarray(a): + raise TypingError('The first argument "a" must be array-like') + + if not type_can_asarray(b): + raise TypingError('The second argument "b" must be array-like') + + if not isinstance(rtol, (float, types.Float)): + raise TypingError('The third argument "rtol" must be a ' + 'floating point') + + if not isinstance(atol, (float, types.Float)): + raise TypingError('The fourth argument "atol" must be a ' + 'floating point') + + if not isinstance(equal_nan, (bool, types.Boolean)): + raise TypingError('The fifth argument "equal_nan" must be a ' + 'boolean') + + is_a_scalar = isinstance(a, types.Number) + is_b_scalar = isinstance(b, types.Number) + + if is_a_scalar and is_b_scalar: + def np_allclose_impl_scalar_scalar(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + return _allclose_scalars(a, b, rtol=rtol, atol=atol, + equal_nan=equal_nan) + return np_allclose_impl_scalar_scalar + elif is_a_scalar and not is_b_scalar: + def np_allclose_impl_scalar_array(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + b = np.asarray(b) + for bv in np.nditer(b): + if not _allclose_scalars(a, bv.item(), rtol=rtol, atol=atol, + equal_nan=equal_nan): + return False + return True + return np_allclose_impl_scalar_array + elif not is_a_scalar and is_b_scalar: + def np_allclose_impl_array_scalar(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + a = np.asarray(a) + for av in np.nditer(a): + if not _allclose_scalars(av.item(), b, rtol=rtol, atol=atol, + equal_nan=equal_nan): + return False + return True + return np_allclose_impl_array_scalar + elif not is_a_scalar and not is_b_scalar: + def np_allclose_impl_array_array(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + a = np.asarray(a) + b = np.asarray(b) + a_a, b_b = np.broadcast_arrays(a, b) + + for av, bv in np.nditer((a_a, b_b)): + if not _allclose_scalars(av.item(), bv.item(), rtol=rtol, + atol=atol, equal_nan=equal_nan): + return False + + return True + + return np_allclose_impl_array_array + + +@overload(np.any) +@overload_method(types.Array, "any") +def np_any(a): + def flat_any(a): + for v in np.nditer(a): + if v.item(): + return True + return False + + return flat_any + + +@overload(np.average) +def np_average(a, axis=None, weights=None): + + if weights is None or isinstance(weights, types.NoneType): + def np_average_impl(a, axis=None, weights=None): + arr = np.asarray(a) + return np.mean(arr) + else: + if axis is None or isinstance(axis, types.NoneType): + def np_average_impl(a, axis=None, weights=None): + arr = np.asarray(a) + weights = np.asarray(weights) + + if arr.shape != weights.shape: + if axis is None: + raise TypeError( + "Numba does not support average when shapes of " + "a and weights differ.") + if weights.ndim != 1: + raise TypeError( + "1D weights expected when shapes of " + "a and weights differ.") + + scl = np.sum(weights) + if scl == 0.0: + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized.") + + avg = np.sum(np.multiply(arr, weights)) / scl + return avg + else: + def np_average_impl(a, axis=None, weights=None): + raise TypeError("Numba does not support average with axis.") + + return np_average_impl + + +def get_isnan(dtype): + """ + A generic isnan() function + """ + if isinstance(dtype, (types.Float, types.Complex)): + return np.isnan + else: + @register_jitable + def _trivial_isnan(x): + return False + return _trivial_isnan + + +@overload(np.iscomplex) +def np_iscomplex(x): + if type_can_asarray(x): + # NumPy uses asanyarray here! + return lambda x: np.asarray(x).imag != 0 + return None + + +@overload(np.isreal) +def np_isreal(x): + if type_can_asarray(x): + # NumPy uses asanyarray here! + return lambda x: np.asarray(x).imag == 0 + return None + + +@overload(np.iscomplexobj) +def iscomplexobj(x): + # Implementation based on NumPy + # https://github.com/numpy/numpy/blob/d9b1e32cb8ef90d6b4a47853241db2a28146a57d/numpy/lib/type_check.py#L282-L320 + dt = determine_dtype(x) + if isinstance(x, types.Optional): + dt = determine_dtype(x.type) + iscmplx = np.issubdtype(dt, np.complexfloating) + + if isinstance(x, types.Optional): + def impl(x): + if x is None: + return False + return iscmplx + else: + def impl(x): + return iscmplx + return impl + + +@overload(np.isrealobj) +def isrealobj(x): + # Return True if x is not a complex type. + # Implementation based on NumPy + # https://github.com/numpy/numpy/blob/ccfbcc1cd9a4035a467f2e982a565ab27de25b6b/numpy/lib/type_check.py#L290-L322 + def impl(x): + return not np.iscomplexobj(x) + return impl + + +@overload(np.isscalar) +def np_isscalar(element): + res = type_is_scalar(element) + + def impl(element): + return res + return impl + + +def is_np_inf_impl(x, out, fn): + + # if/else branch should be unified after PR #5606 is merged + if is_nonelike(out): + def impl(x, out=None): + return np.logical_and(np.isinf(x), fn(np.signbit(x))) + else: + def impl(x, out=None): + return np.logical_and(np.isinf(x), fn(np.signbit(x)), out) + + return impl + + +@overload(np.isneginf) +def isneginf(x, out=None): + fn = register_jitable(lambda x: x) + return is_np_inf_impl(x, out, fn) + + +@overload(np.isposinf) +def isposinf(x, out=None): + fn = register_jitable(lambda x: ~x) + return is_np_inf_impl(x, out, fn) + + +@register_jitable +def less_than(a, b): + return a < b + + +@register_jitable +def greater_than(a, b): + return a > b + + +@register_jitable +def check_array(a): + if a.size == 0: + raise ValueError('zero-size array to reduction operation not possible') + + +def nan_min_max_factory(comparison_op, is_complex_dtype): + if is_complex_dtype: + def impl(a): + arr = np.asarray(a) + check_array(arr) + it = np.nditer(arr) + return_val = next(it).take(0) + for view in it: + v = view.item() + if np.isnan(return_val.real) and not np.isnan(v.real): + return_val = v + else: + if comparison_op(v.real, return_val.real): + return_val = v + elif v.real == return_val.real: + if comparison_op(v.imag, return_val.imag): + return_val = v + return return_val + else: + def impl(a): + arr = np.asarray(a) + check_array(arr) + it = np.nditer(arr) + return_val = next(it).take(0) + for view in it: + v = view.item() + if not np.isnan(v): + if not comparison_op(return_val, v): + return_val = v + return return_val + + return impl + + +real_nanmin = register_jitable( + nan_min_max_factory(less_than, is_complex_dtype=False) +) +real_nanmax = register_jitable( + nan_min_max_factory(greater_than, is_complex_dtype=False) +) +complex_nanmin = register_jitable( + nan_min_max_factory(less_than, is_complex_dtype=True) +) +complex_nanmax = register_jitable( + nan_min_max_factory(greater_than, is_complex_dtype=True) +) + + +@register_jitable +def _isclose_item(x, y, rtol, atol, equal_nan): + if np.isnan(x) and np.isnan(y): + return equal_nan + elif np.isinf(x) and np.isinf(y): + return (x > 0) == (y > 0) + elif np.isinf(x) or np.isinf(y): + return False + else: + return abs(x - y) <= atol + rtol * abs(y) + + +@overload(np.isclose) +def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + if not type_can_asarray(a): + raise TypingError('The first argument "a" must be array-like') + + if not type_can_asarray(b): + raise TypingError('The second argument "b" must be array-like') + + if not isinstance(rtol, (float, types.Float)): + raise TypingError('The third argument "rtol" must be a ' + 'floating point') + + if not isinstance(atol, (float, types.Float)): + raise TypingError('The fourth argument "atol" must be a ' + 'floating point') + + if not isinstance(equal_nan, (bool, types.Boolean)): + raise TypingError('The fifth argument "equal_nan" must be a ' + 'boolean') + + if isinstance(a, types.Array) and isinstance(b, types.Number): + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + x = a.reshape(-1) + y = b + out = np.zeros(len(x), np.bool_) + for i in range(len(out)): + out[i] = _isclose_item(x[i], y, rtol, atol, equal_nan) + return out.reshape(a.shape) + + elif isinstance(a, types.Number) and isinstance(b, types.Array): + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + x = a + y = b.reshape(-1) + out = np.zeros(len(y), np.bool_) + for i in range(len(out)): + out[i] = _isclose_item(x, y[i], rtol, atol, equal_nan) + return out.reshape(b.shape) + + elif isinstance(a, types.Array) and isinstance(b, types.Array): + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + shape = np.broadcast_shapes(a.shape, b.shape) + a_ = np.broadcast_to(a, shape) + b_ = np.broadcast_to(b, shape) + + out = np.zeros(len(a_), dtype=np.bool_) + for i, (av, bv) in enumerate(np.nditer((a_, b_))): + out[i] = _isclose_item(av.item(), bv.item(), rtol, atol, + equal_nan) + return np.broadcast_to(out, shape) + + else: + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + return _isclose_item(a, b, rtol, atol, equal_nan) + + return isclose_impl + + +@overload(np.nanmin) +def np_nanmin(a): + dt = determine_dtype(a) + if np.issubdtype(dt, np.complexfloating): + return complex_nanmin + else: + return real_nanmin + + +@overload(np.nanmax) +def np_nanmax(a): + dt = determine_dtype(a) + if np.issubdtype(dt, np.complexfloating): + return complex_nanmax + else: + return real_nanmax + + +@overload(np.nanmean) +def np_nanmean(a): + if not isinstance(a, types.Array): + return + isnan = get_isnan(a.dtype) + + def nanmean_impl(a): + c = 0.0 + count = 0 + for view in np.nditer(a): + v = view.item() + if not isnan(v): + c += v.item() + count += 1 + # np.divide() doesn't raise ZeroDivisionError + return np.divide(c, count) + + return nanmean_impl + + +@overload(np.nanvar) +def np_nanvar(a): + if not isinstance(a, types.Array): + return + isnan = get_isnan(a.dtype) + + def nanvar_impl(a): + # Compute the mean + m = np.nanmean(a) + + # Compute the sum of square diffs + ssd = 0.0 + count = 0 + for view in np.nditer(a): + v = view.item() + if not isnan(v): + val = (v.item() - m) + ssd += np.real(val * np.conj(val)) + count += 1 + # np.divide() doesn't raise ZeroDivisionError + return np.divide(ssd, count) + + return nanvar_impl + + +@overload(np.nanstd) +def np_nanstd(a): + if not isinstance(a, types.Array): + return + + def nanstd_impl(a): + return np.nanvar(a) ** 0.5 + + return nanstd_impl + + +@overload(np.nansum) +def np_nansum(a): + if not isinstance(a, types.Array): + return + if isinstance(a.dtype, types.Integer): + retty = types.intp + else: + retty = a.dtype + zero = retty(0) + isnan = get_isnan(a.dtype) + + def nansum_impl(a): + c = zero + for view in np.nditer(a): + v = view.item() + if not isnan(v): + c += v + return c + + return nansum_impl + + +@overload(np.nanprod) +def np_nanprod(a): + if not isinstance(a, types.Array): + return + if isinstance(a.dtype, types.Integer): + retty = types.intp + else: + retty = a.dtype + one = retty(1) + isnan = get_isnan(a.dtype) + + def nanprod_impl(a): + c = one + for view in np.nditer(a): + v = view.item() + if not isnan(v): + c *= v + return c + + return nanprod_impl + + +@overload(np.nancumprod) +def np_nancumprod(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.Boolean, types.Integer)): + # dtype cannot possibly contain NaN + return lambda a: np.cumprod(a) + else: + retty = a.dtype + is_nan = get_isnan(retty) + one = retty(1) + + def nancumprod_impl(a): + out = np.empty(a.size, retty) + c = one + for idx, v in enumerate(a.flat): + if ~is_nan(v): + c *= v + out[idx] = c + return out + + return nancumprod_impl + + +@overload(np.nancumsum) +def np_nancumsum(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.Boolean, types.Integer)): + # dtype cannot possibly contain NaN + return lambda a: np.cumsum(a) + else: + retty = a.dtype + is_nan = get_isnan(retty) + zero = retty(0) + + def nancumsum_impl(a): + out = np.empty(a.size, retty) + c = zero + for idx, v in enumerate(a.flat): + if ~is_nan(v): + c += v + out[idx] = c + return out + + return nancumsum_impl + + +@register_jitable +def prepare_ptp_input(a): + arr = _asarray(a) + if len(arr) == 0: + raise ValueError('zero-size array reduction not possible') + else: + return arr + + +def _compute_current_val_impl_gen(op, current_val, val): + if isinstance(current_val, types.Complex): + # The sort order for complex numbers is lexicographic. If both the + # real and imaginary parts are non-nan then the order is determined + # by the real parts except when they are equal, in which case the + # order is determined by the imaginary parts. + # https://github.com/numpy/numpy/blob/577a86e/numpy/core/fromnumeric.py#L874-L877 # noqa: E501 + def impl(current_val, val): + if op(val.real, current_val.real): + return val + elif (val.real == current_val.real + and op(val.imag, current_val.imag)): + return val + return current_val + else: + def impl(current_val, val): + return val if op(val, current_val) else current_val + return impl + + +def _compute_a_max(current_val, val): + pass + + +def _compute_a_min(current_val, val): + pass + + +@overload(_compute_a_max) +def _compute_a_max_impl(current_val, val): + return _compute_current_val_impl_gen(operator.gt, current_val, val) + + +@overload(_compute_a_min) +def _compute_a_min_impl(current_val, val): + return _compute_current_val_impl_gen(operator.lt, current_val, val) + + +def _early_return(val): + pass + + +@overload(_early_return) +def _early_return_impl(val): + UNUSED = 0 + if isinstance(val, types.Complex): + def impl(val): + if np.isnan(val.real): + if np.isnan(val.imag): + return True, np.nan + np.nan * 1j + else: + return True, np.nan + 0j + else: + return False, UNUSED + elif isinstance(val, types.Float): + def impl(val): + if np.isnan(val): + return True, np.nan + else: + return False, UNUSED + else: + def impl(val): + return False, UNUSED + return impl + + +@overload(np.ptp) +def np_ptp(a): + + if hasattr(a, 'dtype'): + if isinstance(a.dtype, types.Boolean): + raise TypingError("Boolean dtype is unsupported (as per NumPy)") + # Numpy raises a TypeError + + def np_ptp_impl(a): + arr = prepare_ptp_input(a) + + a_flat = arr.flat + a_min = a_flat[0] + a_max = a_flat[0] + + for i in range(arr.size): + val = a_flat[i] + take_branch, retval = _early_return(val) + if take_branch: + return retval + a_max = _compute_a_max(a_max, val) + a_min = _compute_a_min(a_min, val) + + return a_max - a_min + + return np_ptp_impl + + +if numpy_version < (2, 0): + overload_method(types.Array, 'ptp')(np_ptp) + +#---------------------------------------------------------------------------- +# Median and partitioning + + +@register_jitable +def nan_aware_less_than(a, b): + if np.isnan(a): + return False + else: + if np.isnan(b): + return True + else: + return a < b + + +def _partition_factory(pivotimpl, argpartition=False): + def _partition(A, low, high, I=None): + mid = (low + high) >> 1 + # NOTE: the pattern of swaps below for the pivot choice and the + # partitioning gives good results (i.e. regular O(n log n)) + # on sorted, reverse-sorted, and uniform arrays. Subtle changes + # risk breaking this property. + + # Use median of three {low, middle, high} as the pivot + if pivotimpl(A[mid], A[low]): + A[low], A[mid] = A[mid], A[low] + if argpartition: + I[low], I[mid] = I[mid], I[low] + if pivotimpl(A[high], A[mid]): + A[high], A[mid] = A[mid], A[high] + if argpartition: + I[high], I[mid] = I[mid], I[high] + if pivotimpl(A[mid], A[low]): + A[low], A[mid] = A[mid], A[low] + if argpartition: + I[low], I[mid] = I[mid], I[low] + pivot = A[mid] + + A[high], A[mid] = A[mid], A[high] + if argpartition: + I[high], I[mid] = I[mid], I[high] + i = low + j = high - 1 + while True: + while i < high and pivotimpl(A[i], pivot): + i += 1 + while j >= low and pivotimpl(pivot, A[j]): + j -= 1 + if i >= j: + break + A[i], A[j] = A[j], A[i] + if argpartition: + I[i], I[j] = I[j], I[i] + i += 1 + j -= 1 + # Put the pivot back in its final place (all items before `i` + # are smaller than the pivot, all items at/after `i` are larger) + A[i], A[high] = A[high], A[i] + if argpartition: + I[i], I[high] = I[high], I[i] + return i + return _partition + + +_partition = register_jitable(_partition_factory(less_than)) +_partition_w_nan = register_jitable(_partition_factory(nan_aware_less_than)) +_argpartition_w_nan = register_jitable(_partition_factory( + nan_aware_less_than, + argpartition=True) +) + + +def _select_factory(partitionimpl): + def _select(arry, k, low, high, idx=None): + """ + Select the k'th smallest element in array[low:high + 1]. + """ + i = partitionimpl(arry, low, high, idx) + while i != k: + if i < k: + low = i + 1 + i = partitionimpl(arry, low, high, idx) + else: + high = i - 1 + i = partitionimpl(arry, low, high, idx) + return arry[k] + return _select + + +_select = register_jitable(_select_factory(_partition)) +_select_w_nan = register_jitable(_select_factory(_partition_w_nan)) +_arg_select_w_nan = register_jitable(_select_factory(_argpartition_w_nan)) + + +@register_jitable +def _select_two(arry, k, low, high): + """ + Select the k'th and k+1'th smallest elements in array[low:high + 1]. + + This is significantly faster than doing two independent selections + for k and k+1. + """ + while True: + assert high > low # by construction + i = _partition(arry, low, high) + if i < k: + low = i + 1 + elif i > k + 1: + high = i - 1 + elif i == k: + _select(arry, k + 1, i + 1, high) + break + else: # i == k + 1 + _select(arry, k, low, i - 1) + break + + return arry[k], arry[k + 1] + + +@register_jitable +def _median_inner(temp_arry, n): + """ + The main logic of the median() call. *temp_arry* must be disposable, + as this function will mutate it. + """ + low = 0 + high = n - 1 + half = n >> 1 + if n & 1 == 0: + a, b = _select_two(temp_arry, half - 1, low, high) + return (a + b) / 2 + else: + return _select(temp_arry, half, low, high) + + +@overload(np.median) +def np_median(a): + if not isinstance(a, types.Array): + return + + def median_impl(a): + # np.median() works on the flattened array, and we need a temporary + # workspace anyway + temp_arry = a.flatten() + n = temp_arry.shape[0] + return _median_inner(temp_arry, n) + + return median_impl + + +@register_jitable +def _collect_percentiles_inner(a, q): + #TODO: This needs rewriting to be closer to NumPy, particularly the nan/inf + # handling which is generally subject to algorithmic changes. + n = len(a) + + if n == 1: + # single element array; output same for all percentiles + out = np.full(len(q), a[0], dtype=np.float64) + else: + out = np.empty(len(q), dtype=np.float64) + for i in range(len(q)): + percentile = q[i] + + # bypass pivoting where requested percentile is 100 + if percentile == 100: + val = np.max(a) + # heuristics to handle infinite values a la NumPy + if ~np.all(np.isfinite(a)): + if ~np.isfinite(val): + val = np.nan + + # bypass pivoting where requested percentile is 0 + elif percentile == 0: + val = np.min(a) + # convoluted heuristics to handle infinite values a la NumPy + if ~np.all(np.isfinite(a)): + num_pos_inf = np.sum(a == np.inf) + num_neg_inf = np.sum(a == -np.inf) + num_finite = n - (num_neg_inf + num_pos_inf) + if num_finite == 0: + val = np.nan + if num_pos_inf == 1 and n == 2: + val = np.nan + if num_neg_inf > 1: + val = np.nan + if num_finite == 1: + if num_pos_inf > 1: + if num_neg_inf != 1: + val = np.nan + + else: + # linear interp between closest ranks + rank = 1 + (n - 1) * np.true_divide(percentile, 100.0) + f = math.floor(rank) + m = rank - f + lower, upper = _select_two(a, k=int(f - 1), low=0, high=(n - 1)) + val = lower * (1 - m) + upper * m + out[i] = val + + return out + + +@register_jitable +def _can_collect_percentiles(a, nan_mask, skip_nan): + if skip_nan: + a = a[~nan_mask] + if len(a) == 0: + return False # told to skip nan, but no elements remain + else: + if np.any(nan_mask): + return False # told *not* to skip nan, but nan encountered + + if len(a) == 1: # single element array + val = a[0] + return np.isfinite(val) # can collect percentiles if element is finite + else: + return True + + +@register_jitable +def check_valid(q, q_upper_bound): + valid = True + + # avoid expensive reductions where possible + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if q[i] < 0.0 or q[i] > q_upper_bound or np.isnan(q[i]): + valid = False + break + else: + if np.any(np.isnan(q)) or np.any(q < 0.0) or np.any(q > q_upper_bound): + valid = False + + return valid + + +@register_jitable +def percentile_is_valid(q): + if not check_valid(q, q_upper_bound=100.0): + raise ValueError('Percentiles must be in the range [0, 100]') + + +@register_jitable +def quantile_is_valid(q): + if not check_valid(q, q_upper_bound=1.0): + raise ValueError('Quantiles must be in the range [0, 1]') + + +@register_jitable +def _collect_percentiles(a, q, check_q, factor, skip_nan): + q = np.asarray(q, dtype=np.float64).flatten() + check_q(q) + q = q * factor + + temp_arry = np.asarray(a, dtype=np.float64).flatten() + nan_mask = np.isnan(temp_arry) + + if _can_collect_percentiles(temp_arry, nan_mask, skip_nan): + temp_arry = temp_arry[~nan_mask] + out = _collect_percentiles_inner(temp_arry, q) + else: + out = np.full(len(q), np.nan) + + return out + + +def _percentile_quantile_inner(a, q, skip_nan, factor, check_q): + """ + The underlying algorithm to find percentiles and quantiles + is the same, hence we converge onto the same code paths + in this inner function implementation + """ + dt = determine_dtype(a) + if np.issubdtype(dt, np.complexfloating): + raise TypingError('Not supported for complex dtype') + # this could be supported, but would require a + # lexicographic comparison + + def np_percentile_q_scalar_impl(a, q): + return _collect_percentiles(a, q, check_q, factor, skip_nan)[0] + + def np_percentile_impl(a, q): + return _collect_percentiles(a, q, check_q, factor, skip_nan) + + if isinstance(q, (types.Number, types.Boolean)): + return np_percentile_q_scalar_impl + elif isinstance(q, types.Array) and q.ndim == 0: + return np_percentile_q_scalar_impl + else: + return np_percentile_impl + + +@overload(np.percentile) +def np_percentile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=False, factor=1.0, check_q=percentile_is_valid + ) + + +@overload(np.nanpercentile) +def np_nanpercentile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=True, factor=1.0, check_q=percentile_is_valid + ) + + +@overload(np.quantile) +def np_quantile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=False, factor=100.0, check_q=quantile_is_valid + ) + + +@overload(np.nanquantile) +def np_nanquantile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=True, factor=100.0, check_q=quantile_is_valid + ) + + +@overload(np.nanmedian) +def np_nanmedian(a): + if not isinstance(a, types.Array): + return + isnan = get_isnan(a.dtype) + + def nanmedian_impl(a): + # Create a temporary workspace with only non-NaN values + temp_arry = np.empty(a.size, a.dtype) + n = 0 + for view in np.nditer(a): + v = view.item() + if not isnan(v): + temp_arry[n] = v + n += 1 + + # all NaNs + if n == 0: + return np.nan + + return _median_inner(temp_arry, n) + + return nanmedian_impl + + +@register_jitable +def np_partition_impl_inner(a, kth_array): + + # allocate and fill empty array rather than copy a and mutate in place + # as the latter approach fails to preserve strides + out = np.empty_like(a) + + idx = np.ndindex(a.shape[:-1]) # Numpy default partition axis is -1 + for s in idx: + arry = a[s].copy() + low = 0 + high = len(arry) - 1 + + for kth in kth_array: + _select_w_nan(arry, kth, low, high) + low = kth # narrow span of subsequent partition + + out[s] = arry + return out + + +@register_jitable +def np_argpartition_impl_inner(a, kth_array): + + # allocate and fill empty array rather than copy a and mutate in place + # as the latter approach fails to preserve strides + out = np.empty_like(a, dtype=np.intp) + + idx = np.ndindex(a.shape[:-1]) # Numpy default partition axis is -1 + for s in idx: + arry = a[s].copy() + idx_arry = np.arange(len(arry)) + low = 0 + high = len(arry) - 1 + + for kth in kth_array: + _arg_select_w_nan(arry, kth, low, high, idx_arry) + low = kth # narrow span of subsequent partition + + out[s] = idx_arry + return out + + +@register_jitable +def valid_kths(a, kth): + """ + Returns a sorted, unique array of kth values which serve + as indexers for partitioning the input array, a. + + If the absolute value of any of the provided values + is greater than a.shape[-1] an exception is raised since + we are partitioning along the last axis (per Numpy default + behaviour). + + Values less than 0 are transformed to equivalent positive + index values. + """ + # cast boolean to int, where relevant + kth_array = _asarray(kth).astype(np.int64) + + if kth_array.ndim != 1: + raise ValueError('kth must be scalar or 1-D') + # numpy raises ValueError: object too deep for desired array + + if np.any(np.abs(kth_array) >= a.shape[-1]): + raise ValueError("kth out of bounds") + + out = np.empty_like(kth_array) + + for index, val in np.ndenumerate(kth_array): + if val < 0: + out[index] = val + a.shape[-1] # equivalent positive index + else: + out[index] = val + + return np.unique(out) + + +@overload(np.partition) +def np_partition(a, kth): + + if not isinstance(a, (types.Array, types.Sequence, types.Tuple)): + raise NumbaTypeError('The first argument must be an array-like') + + if isinstance(a, types.Array) and a.ndim == 0: + msg = 'The first argument must be at least 1-D (found 0-D)' + raise NumbaTypeError(msg) + + kthdt = getattr(kth, 'dtype', kth) + if not isinstance(kthdt, (types.Boolean, types.Integer)): + # bool gets cast to int subsequently + raise NumbaTypeError('Partition index must be integer') + + def np_partition_impl(a, kth): + a_tmp = _asarray(a) + if a_tmp.size == 0: + return a_tmp.copy() + else: + kth_array = valid_kths(a_tmp, kth) + return np_partition_impl_inner(a_tmp, kth_array) + + return np_partition_impl + + +@overload(np.argpartition) +def np_argpartition(a, kth): + + if not isinstance(a, (types.Array, types.Sequence, types.Tuple)): + raise NumbaTypeError('The first argument must be an array-like') + + if isinstance(a, types.Array) and a.ndim == 0: + msg = 'The first argument must be at least 1-D (found 0-D)' + raise NumbaTypeError(msg) + + kthdt = getattr(kth, 'dtype', kth) + if not isinstance(kthdt, (types.Boolean, types.Integer)): + # bool gets cast to int subsequently + raise NumbaTypeError('Partition index must be integer') + + def np_argpartition_impl(a, kth): + a_tmp = _asarray(a) + if a_tmp.size == 0: + return a_tmp.copy().astype('intp') + else: + kth_array = valid_kths(a_tmp, kth) + return np_argpartition_impl_inner(a_tmp, kth_array) + + return np_argpartition_impl + + +#---------------------------------------------------------------------------- +# Building matrices + +@register_jitable +def _tri_impl(N, M, k): + shape = max(0, N), max(0, M) # numpy floors each dimension at 0 + out = np.empty(shape, dtype=np.float64) # numpy default dtype + + for i in range(shape[0]): + m_max = min(max(0, i + k + 1), shape[1]) + out[i, :m_max] = 1 + out[i, m_max:] = 0 + + return out + + +@overload(np.tri) +def np_tri(N, M=None, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + def tri_impl(N, M=None, k=0): + if M is None: + M = N + return _tri_impl(N, M, k) + + return tri_impl + + +@register_jitable +def _make_square(m): + """ + Takes a 1d array and tiles it to form a square matrix + - i.e. a facsimile of np.tile(m, (len(m), 1)) + """ + assert m.ndim == 1 + + len_m = len(m) + out = np.empty((len_m, len_m), dtype=m.dtype) + + for i in range(len_m): + out[i] = m + + return out + + +@register_jitable +def np_tril_impl_2d(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint) + return np.where(mask, m, np.zeros_like(m, dtype=m.dtype)) + + +@overload(np.tril) +def my_tril(m, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + def np_tril_impl_1d(m, k=0): + m_2d = _make_square(m) + return np_tril_impl_2d(m_2d, k) + + def np_tril_impl_multi(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint) + idx = np.ndindex(m.shape[:-2]) + z = np.empty_like(m) + zero_opt = np.zeros_like(mask, dtype=m.dtype) + for sel in idx: + z[sel] = np.where(mask, m[sel], zero_opt) + return z + + if m.ndim == 1: + return np_tril_impl_1d + elif m.ndim == 2: + return np_tril_impl_2d + else: + return np_tril_impl_multi + + +@overload(np.tril_indices) +def np_tril_indices(n, k=0, m=None): + + # we require integer arguments, unlike numpy + check_is_integer(n, 'n') + check_is_integer(k, 'k') + if not is_nonelike(m): + check_is_integer(m, 'm') + + def np_tril_indices_impl(n, k=0, m=None): + return np.nonzero(np.tri(n, m, k=k)) + return np_tril_indices_impl + + +@overload(np.tril_indices_from) +def np_tril_indices_from(arr, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + if arr.ndim != 2: + raise TypingError("input array must be 2-d") + + def np_tril_indices_from_impl(arr, k=0): + return np.tril_indices(arr.shape[0], k=k, m=arr.shape[1]) + return np_tril_indices_from_impl + + +@register_jitable +def np_triu_impl_2d(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint) + return np.where(mask, np.zeros_like(m, dtype=m.dtype), m) + + +@overload(np.triu) +def my_triu(m, k=0): + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + def np_triu_impl_1d(m, k=0): + m_2d = _make_square(m) + return np_triu_impl_2d(m_2d, k) + + def np_triu_impl_multi(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint) + idx = np.ndindex(m.shape[:-2]) + z = np.empty_like(m) + zero_opt = np.zeros_like(mask, dtype=m.dtype) + for sel in idx: + z[sel] = np.where(mask, zero_opt, m[sel]) + return z + + if m.ndim == 1: + return np_triu_impl_1d + elif m.ndim == 2: + return np_triu_impl_2d + else: + return np_triu_impl_multi + + +@overload(np.triu_indices) +def np_triu_indices(n, k=0, m=None): + + # we require integer arguments, unlike numpy + check_is_integer(n, 'n') + check_is_integer(k, 'k') + if not is_nonelike(m): + check_is_integer(m, 'm') + + def np_triu_indices_impl(n, k=0, m=None): + return np.nonzero(1 - np.tri(n, m, k=k - 1)) + return np_triu_indices_impl + + +@overload(np.triu_indices_from) +def np_triu_indices_from(arr, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + if arr.ndim != 2: + raise TypingError("input array must be 2-d") + + def np_triu_indices_from_impl(arr, k=0): + return np.triu_indices(arr.shape[0], k=k, m=arr.shape[1]) + return np_triu_indices_from_impl + + +def _prepare_array(arr): + pass + + +@overload(_prepare_array) +def _prepare_array_impl(arr): + if arr in (None, types.none): + return lambda arr: np.array(()) + else: + return lambda arr: _asarray(arr).ravel() + + +def _dtype_of_compound(inobj): + obj = inobj + while True: + if isinstance(obj, (types.Number, types.Boolean)): + return as_dtype(obj) + l = getattr(obj, '__len__', None) + if l is not None and l() == 0: # empty tuple or similar + return np.float64 + dt = getattr(obj, 'dtype', None) + if dt is None: + raise NumbaTypeError("type has no dtype attr") + if isinstance(obj, types.Sequence): + obj = obj.dtype + else: + return as_dtype(dt) + + +@overload(np.ediff1d) +def np_ediff1d(ary, to_end=None, to_begin=None): + + if isinstance(ary, types.Array): + if isinstance(ary.dtype, types.Boolean): + raise NumbaTypeError("Boolean dtype is unsupported (as per NumPy)") + # Numpy tries to do this: return ary[1:] - ary[:-1] which + # results in a TypeError exception being raised + + # Check that to_end and to_begin are compatible with ary + ary_dt = _dtype_of_compound(ary) + to_begin_dt = None + if not (is_nonelike(to_begin)): + to_begin_dt = _dtype_of_compound(to_begin) + to_end_dt = None + if not (is_nonelike(to_end)): + to_end_dt = _dtype_of_compound(to_end) + + if to_begin_dt is not None and not np.can_cast(to_begin_dt, ary_dt): + msg = "dtype of to_begin must be compatible with input ary" + raise NumbaTypeError(msg) + + if to_end_dt is not None and not np.can_cast(to_end_dt, ary_dt): + msg = "dtype of to_end must be compatible with input ary" + raise NumbaTypeError(msg) + + def np_ediff1d_impl(ary, to_end=None, to_begin=None): + # transform each input into an equivalent 1d array + start = _prepare_array(to_begin) + mid = _prepare_array(ary) + end = _prepare_array(to_end) + + out_dtype = mid.dtype + # output array dtype determined by ary dtype, per NumPy + # (for the most part); an exception to the rule is a zero length + # array-like, where NumPy falls back to np.float64; this behaviour + # is *not* replicated + + if len(mid) > 0: + out = np.empty((len(start) + len(mid) + len(end) - 1), + dtype=out_dtype) + start_idx = len(start) + mid_idx = len(start) + len(mid) - 1 + out[:start_idx] = start + out[start_idx:mid_idx] = np.diff(mid) + out[mid_idx:] = end + else: + out = np.empty((len(start) + len(end)), dtype=out_dtype) + start_idx = len(start) + out[:start_idx] = start + out[start_idx:] = end + return out + + return np_ediff1d_impl + + +def _select_element(arr): + pass + + +@overload(_select_element) +def _select_element_impl(arr): + zerod = getattr(arr, 'ndim', None) == 0 + if zerod: + def impl(arr): + x = np.array((1,), dtype=arr.dtype) + x[:] = arr + return x[0] + return impl + else: + def impl(arr): + return arr + return impl + + +def _get_d(dx, x): + pass + + +@overload(_get_d) +def get_d_impl(x, dx): + if is_nonelike(x): + def impl(x, dx): + return np.asarray(dx) + else: + def impl(x, dx): + return np.diff(np.asarray(x)) + return impl + + +@overload(np.trapz) +def np_trapz(y, x=None, dx=1.0): + + if isinstance(y, (types.Number, types.Boolean)): + raise TypingError('y cannot be a scalar') + elif isinstance(y, types.Array) and y.ndim == 0: + raise TypingError('y cannot be 0D') + # NumPy raises IndexError: list assignment index out of range + + # inspired by: + # https://github.com/numpy/numpy/blob/7ee52003/numpy/lib/function_base.py#L4040-L4065 # noqa: E501 + def impl(y, x=None, dx=1.0): + yarr = np.asarray(y) + d = _get_d(x, dx) + y_ave = (yarr[..., slice(1, None)] + yarr[..., slice(None, -1)]) / 2.0 + ret = np.sum(d * y_ave, -1) + processed = _select_element(ret) + return processed + + return impl + + +# numpy 2.0 rename np.trapz to np.trapezoid +if numpy_version >= (2, 0): + overload(np.trapezoid)(np_trapz) + + +@register_jitable +def _np_vander(x, N, increasing, out): + """ + Generate an N-column Vandermonde matrix from a supplied 1-dimensional + array, x. Store results in an output matrix, out, which is assumed to + be of the required dtype. + + Values are accumulated using np.multiply to match the floating point + precision behaviour of numpy.vander. + """ + m, n = out.shape + assert m == len(x) + assert n == N + + if increasing: + for i in range(N): + if i == 0: + out[:, i] = 1 + else: + out[:, i] = np.multiply(x, out[:, (i - 1)]) + else: + for i in range(N - 1, -1, -1): + if i == N - 1: + out[:, i] = 1 + else: + out[:, i] = np.multiply(x, out[:, (i + 1)]) + + +@register_jitable +def _check_vander_params(x, N): + if x.ndim > 1: + raise ValueError('x must be a one-dimensional array or sequence.') + if N < 0: + raise ValueError('Negative dimensions are not allowed') + + +@overload(np.vander) +def np_vander(x, N=None, increasing=False): + if N not in (None, types.none): + if not isinstance(N, types.Integer): + raise TypingError('Second argument N must be None or an integer') + + def np_vander_impl(x, N=None, increasing=False): + if N is None: + N = len(x) + + _check_vander_params(x, N) + + # allocate output matrix using dtype determined in closure + out = np.empty((len(x), int(N)), dtype=dtype) + + _np_vander(x, N, increasing, out) + return out + + def np_vander_seq_impl(x, N=None, increasing=False): + if N is None: + N = len(x) + + x_arr = np.array(x) + _check_vander_params(x_arr, N) + + # allocate output matrix using dtype inferred when x_arr was created + out = np.empty((len(x), int(N)), dtype=x_arr.dtype) + + _np_vander(x_arr, N, increasing, out) + return out + + if isinstance(x, types.Array): + x_dt = as_dtype(x.dtype) + # replicate numpy behaviour w.r.t.type promotion + dtype = np.promote_types(x_dt, int) + return np_vander_impl + elif isinstance(x, (types.Tuple, types.Sequence)): + return np_vander_seq_impl + + +@overload(np.roll) +def np_roll(a, shift): + if not isinstance(shift, (types.Integer, types.Boolean)): + raise TypingError('shift must be an integer') + + def np_roll_impl(a, shift): + arr = np.asarray(a) + out = np.empty(arr.shape, dtype=arr.dtype) + # empty_like might result in different contiguity vs NumPy + + arr_flat = arr.flat + for i in range(arr.size): + idx = (i + shift) % arr.size + out.flat[idx] = arr_flat[i] + + return out + + if isinstance(a, (types.Number, types.Boolean)): + return lambda a, shift: np.asarray(a) + else: + return np_roll_impl + + +#---------------------------------------------------------------------------- +# Mathematical functions + +LIKELY_IN_CACHE_SIZE = 8 + + +@register_jitable +def binary_search_with_guess(key, arr, length, guess): + # NOTE: Do not refactor... see note in np_interp function impl below + # this is a facsimile of binary_search_with_guess prior to 1.15: + # https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L447 # noqa: E501 + imin = 0 + imax = length + + # Handle keys outside of the arr range first + if key > arr[length - 1]: + return length + elif key < arr[0]: + return -1 + + # If len <= 4 use linear search. + # From above we know key >= arr[0] when we start. + if length <= 4: + i = 1 + while i < length and key >= arr[i]: + i += 1 + return i - 1 + + if guess > length - 3: + guess = length - 3 + + if guess < 1: + guess = 1 + + # check most likely values: guess - 1, guess, guess + 1 + if key < arr[guess]: + if key < arr[guess - 1]: + imax = guess - 1 + + # last attempt to restrict search to items in cache + if guess > LIKELY_IN_CACHE_SIZE and \ + key >= arr[guess - LIKELY_IN_CACHE_SIZE]: + imin = guess - LIKELY_IN_CACHE_SIZE + else: + # key >= arr[guess - 1] + return guess - 1 + else: + # key >= arr[guess] + if key < arr[guess + 1]: + return guess + else: + # key >= arr[guess + 1] + if key < arr[guess + 2]: + return guess + 1 + else: + # key >= arr[guess + 2] + imin = guess + 2 + # last attempt to restrict search to items in cache + if (guess < (length - LIKELY_IN_CACHE_SIZE - 1)) and \ + (key < arr[guess + LIKELY_IN_CACHE_SIZE]): + imax = guess + LIKELY_IN_CACHE_SIZE + + # finally, find index by bisection + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if key >= arr[imid]: + imin = imid + 1 + else: + imax = imid + + return imin - 1 + + +@register_jitable +def np_interp_impl_complex_inner(x, xp, fp, dtype): + # NOTE: Do not refactor... see note in np_interp function impl below + # this is a facsimile of arr_interp_complex post 1.16 with added + # branching to support np1.17 style NaN handling. + # https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L628 # noqa: E501 + dz = np.asarray(x) + dx = np.asarray(xp) + dy = np.asarray(fp) + + if len(dx) == 0: + raise ValueError('array of sample points is empty') + + if len(dx) != len(dy): + raise ValueError('fp and xp are not of the same size.') + + if dx.size == 1: + return np.full(dz.shape, fill_value=dy[0], dtype=dtype) + + dres = np.empty(dz.shape, dtype=dtype) + + lenx = dz.size + lenxp = len(dx) + lval = dy[0] + rval = dy[lenxp - 1] + + if lenxp == 1: + xp_val = dx[0] + fp_val = dy[0] + + for i in range(lenx): + x_val = dz.flat[i] + if x_val < xp_val: + dres.flat[i] = lval + elif x_val > xp_val: + dres.flat[i] = rval + else: + dres.flat[i] = fp_val + + else: + j = 0 + + # only pre-calculate slopes if there are relatively few of them. + if lenxp <= lenx: + slopes = np.empty((lenxp - 1), dtype=dtype) + else: + slopes = np.empty(0, dtype=dtype) + + if slopes.size: + for i in range(lenxp - 1): + inv_dx = 1 / (dx[i + 1] - dx[i]) + real = (dy[i + 1].real - dy[i].real) * inv_dx + imag = (dy[i + 1].imag - dy[i].imag) * inv_dx + slopes[i] = real + 1j * imag + + for i in range(lenx): + x_val = dz.flat[i] + + if np.isnan(x_val): + real = x_val + imag = 0.0 + dres.flat[i] = real + 1j * imag + continue + + j = binary_search_with_guess(x_val, dx, lenxp, j) + + if j == -1: + dres.flat[i] = lval + elif j == lenxp: + dres.flat[i] = rval + elif j == lenxp - 1: + dres.flat[i] = dy[j] + elif dx[j] == x_val: + # Avoid potential non-finite interpolation + dres.flat[i] = dy[j] + else: + if slopes.size: + slope = slopes[j] + else: + inv_dx = 1 / (dx[j + 1] - dx[j]) + real = (dy[j + 1].real - dy[j].real) * inv_dx + imag = (dy[j + 1].imag - dy[j].imag) * inv_dx + slope = real + 1j * imag + + # NumPy 1.17 handles NaN correctly - this is a copy of + # innermost part of arr_interp_complex post 1.17: + # https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L798-L812 # noqa: E501 + + # If we get NaN in one direction, try the other + real = slope.real * (x_val - dx[j]) + dy[j].real + if np.isnan(real): + real = slope.real * (x_val - dx[j + 1]) + dy[j + 1].real + if np.isnan(real) and dy[j].real == dy[j + 1].real: + real = dy[j].real + + imag = slope.imag * (x_val - dx[j]) + dy[j].imag + if np.isnan(imag): + imag = slope.imag * (x_val - dx[j + 1]) + dy[j + 1].imag + if np.isnan(imag) and dy[j].imag == dy[j + 1].imag: + imag = dy[j].imag + + dres.flat[i] = real + 1j * imag + + return dres + + +@register_jitable +def np_interp_impl_inner(x, xp, fp, dtype): + # NOTE: Do not refactor... see note in np_interp function impl below + # this is a facsimile of arr_interp post 1.16: + # https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L473 # noqa: E501 + dz = np.asarray(x, dtype=np.float64) + dx = np.asarray(xp, dtype=np.float64) + dy = np.asarray(fp, dtype=np.float64) + + if len(dx) == 0: + raise ValueError('array of sample points is empty') + + if len(dx) != len(dy): + raise ValueError('fp and xp are not of the same size.') + + if dx.size == 1: + return np.full(dz.shape, fill_value=dy[0], dtype=dtype) + + dres = np.empty(dz.shape, dtype=dtype) + + lenx = dz.size + lenxp = len(dx) + lval = dy[0] + rval = dy[lenxp - 1] + + if lenxp == 1: + xp_val = dx[0] + fp_val = dy[0] + + for i in range(lenx): + x_val = dz.flat[i] + if x_val < xp_val: + dres.flat[i] = lval + elif x_val > xp_val: + dres.flat[i] = rval + else: + dres.flat[i] = fp_val + + else: + j = 0 + + # only pre-calculate slopes if there are relatively few of them. + if lenxp <= lenx: + slopes = (dy[1:] - dy[:-1]) / (dx[1:] - dx[:-1]) + else: + slopes = np.empty(0, dtype=dtype) + + for i in range(lenx): + x_val = dz.flat[i] + + if np.isnan(x_val): + dres.flat[i] = x_val + continue + + j = binary_search_with_guess(x_val, dx, lenxp, j) + + if j == -1: + dres.flat[i] = lval + elif j == lenxp: + dres.flat[i] = rval + elif j == lenxp - 1: + dres.flat[i] = dy[j] + elif dx[j] == x_val: + # Avoid potential non-finite interpolation + dres.flat[i] = dy[j] + else: + if slopes.size: + slope = slopes[j] + else: + slope = (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j]) + + dres.flat[i] = slope * (x_val - dx[j]) + dy[j] + + # NOTE: this is in np1.17 + # https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L610-L616 # noqa: E501 + # + # If we get nan in one direction, try the other + if np.isnan(dres.flat[i]): + dres.flat[i] = slope * (x_val - dx[j + 1]) + dy[j + 1] # noqa: E501 + if np.isnan(dres.flat[i]) and dy[j] == dy[j + 1]: + dres.flat[i] = dy[j] + + return dres + + +@overload(np.interp) +def np_interp(x, xp, fp): + # Replicating basic interp is relatively simple, but matching the behaviour + # of NumPy for edge cases is really quite hard. After a couple of attempts + # to avoid translation of the C source it was deemed necessary. + + if hasattr(xp, 'ndim') and xp.ndim > 1: + raise TypingError('xp must be 1D') + if hasattr(fp, 'ndim') and fp.ndim > 1: + raise TypingError('fp must be 1D') + + complex_dtype_msg = ( + "Cannot cast array data from complex dtype to float64 dtype" + ) + + xp_dt = determine_dtype(xp) + if np.issubdtype(xp_dt, np.complexfloating): + raise TypingError(complex_dtype_msg) + + fp_dt = determine_dtype(fp) + dtype = np.result_type(fp_dt, np.float64) + + if np.issubdtype(dtype, np.complexfloating): + inner = np_interp_impl_complex_inner + else: + inner = np_interp_impl_inner + + def np_interp_impl(x, xp, fp): + return inner(x, xp, fp, dtype) + + def np_interp_scalar_impl(x, xp, fp): + return inner(x, xp, fp, dtype).flat[0] + + if isinstance(x, types.Number): + if isinstance(x, types.Complex): + raise TypingError(complex_dtype_msg) + return np_interp_scalar_impl + + return np_interp_impl + + +#---------------------------------------------------------------------------- +# Statistics + +@register_jitable +def row_wise_average(a): + assert a.ndim == 2 + + m, n = a.shape + out = np.empty((m, 1), dtype=a.dtype) + + for i in range(m): + out[i, 0] = np.sum(a[i, :]) / n + + return out + + +@register_jitable +def np_cov_impl_inner(X, bias, ddof): + + # determine degrees of freedom + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + # determine the normalization factor + fact = X.shape[1] - ddof + + # numpy warns if less than 0 and floors at 0 + fact = max(fact, 0.0) + + # de-mean + X -= row_wise_average(X) + + # calculate result - requires blas + c = np.dot(X, np.conj(X.T)) + c *= np.true_divide(1, fact) + return c + + +def _prepare_cov_input_inner(): + pass + + +@overload(_prepare_cov_input_inner) +def _prepare_cov_input_impl(m, y, rowvar, dtype): + if y in (None, types.none): + def _prepare_cov_input_inner(m, y, rowvar, dtype): + m_arr = np.atleast_2d(_asarray(m)) + + if not rowvar: + m_arr = m_arr.T + + return m_arr + else: + def _prepare_cov_input_inner(m, y, rowvar, dtype): + m_arr = np.atleast_2d(_asarray(m)) + y_arr = np.atleast_2d(_asarray(y)) + + # transpose if asked to and not a (1, n) vector - this looks + # wrong as you might end up transposing one and not the other, + # but it's what numpy does + if not rowvar: + if m_arr.shape[0] != 1: + m_arr = m_arr.T + if y_arr.shape[0] != 1: + y_arr = y_arr.T + + m_rows, m_cols = m_arr.shape + y_rows, y_cols = y_arr.shape + + if m_cols != y_cols: + raise ValueError("m and y have incompatible dimensions") + + # allocate and fill output array + out = np.empty((m_rows + y_rows, m_cols), dtype=dtype) + out[:m_rows, :] = m_arr + out[-y_rows:, :] = y_arr + + return out + + return _prepare_cov_input_inner + + +@register_jitable +def _handle_m_dim_change(m): + if m.ndim == 2 and m.shape[0] == 1: + msg = ("2D array containing a single row is unsupported due to " + "ambiguity in type inference. To use numpy.cov in this case " + "simply pass the row as a 1D array, i.e. m[0].") + raise RuntimeError(msg) + + +_handle_m_dim_nop = register_jitable(lambda x: x) + + +def determine_dtype(array_like): + array_like_dt = np.float64 + if isinstance(array_like, types.Array): + array_like_dt = as_dtype(array_like.dtype) + elif isinstance(array_like, (types.Number, types.Boolean)): + array_like_dt = as_dtype(array_like) + elif isinstance(array_like, (types.UniTuple, types.Tuple)): + coltypes = set() + for val in array_like: + if hasattr(val, 'count'): + [coltypes.add(v) for v in val] + else: + coltypes.add(val) + if len(coltypes) > 1: + array_like_dt = np.promote_types(*[as_dtype(ty) for ty in coltypes]) + elif len(coltypes) == 1: + array_like_dt = as_dtype(coltypes.pop()) + + return array_like_dt + + +def check_dimensions(array_like, name): + if isinstance(array_like, types.Array): + if array_like.ndim > 2: + raise NumbaTypeError("{0} has more than 2 dimensions".format(name)) + elif isinstance(array_like, types.Sequence): + if isinstance(array_like.key[0], types.Sequence): + if isinstance(array_like.key[0].key[0], types.Sequence): + msg = "{0} has more than 2 dimensions".format(name) + raise NumbaTypeError(msg) + + +@register_jitable +def _handle_ddof(ddof): + if not np.isfinite(ddof): + raise ValueError('Cannot convert non-finite ddof to integer') + if ddof - int(ddof) != 0: + raise ValueError('ddof must be integral value') + + +_handle_ddof_nop = register_jitable(lambda x: x) + + +@register_jitable +def _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER, + _M_DIM_HANDLER): + _M_DIM_HANDLER(m) + _DDOF_HANDLER(ddof) + return _prepare_cov_input_inner(m, y, rowvar, dtype) + + +def scalar_result_expected(mandatory_input, optional_input): + opt_is_none = optional_input in (None, types.none) + + if isinstance(mandatory_input, types.Array) and mandatory_input.ndim == 1: + return opt_is_none + + if isinstance(mandatory_input, types.BaseTuple): + if all(isinstance(x, (types.Number, types.Boolean)) + for x in mandatory_input.types): + return opt_is_none + else: + if (len(mandatory_input.types) == 1 and + isinstance(mandatory_input.types[0], types.BaseTuple)): + return opt_is_none + + if isinstance(mandatory_input, (types.Number, types.Boolean)): + return opt_is_none + + if isinstance(mandatory_input, types.Sequence): + if (not isinstance(mandatory_input.key[0], types.Sequence) and + opt_is_none): + return True + + return False + + +@register_jitable +def _clip_corr(x): + return np.where(np.fabs(x) > 1, np.sign(x), x) + + +@register_jitable +def _clip_complex(x): + real = _clip_corr(x.real) + imag = _clip_corr(x.imag) + return real + 1j * imag + + +@overload(np.cov) +def np_cov(m, y=None, rowvar=True, bias=False, ddof=None): + + # reject problem if m and / or y are more than 2D + check_dimensions(m, 'm') + check_dimensions(y, 'y') + + # reject problem if ddof invalid (either upfront if type is + # obviously invalid, or later if value found to be non-integral) + if ddof in (None, types.none): + _DDOF_HANDLER = _handle_ddof_nop + else: + if isinstance(ddof, (types.Integer, types.Boolean)): + _DDOF_HANDLER = _handle_ddof_nop + elif isinstance(ddof, types.Float): + _DDOF_HANDLER = _handle_ddof + else: + raise TypingError('ddof must be a real numerical scalar type') + + # special case for 2D array input with 1 row of data - select + # handler function which we'll call later when we have access + # to the shape of the input array + _M_DIM_HANDLER = _handle_m_dim_nop + if isinstance(m, types.Array): + _M_DIM_HANDLER = _handle_m_dim_change + + # infer result dtype + m_dt = determine_dtype(m) + y_dt = determine_dtype(y) + dtype = np.result_type(m_dt, y_dt, np.float64) + + def np_cov_impl(m, y=None, rowvar=True, bias=False, ddof=None): + X = _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER, + _M_DIM_HANDLER).astype(dtype) + + if np.any(np.array(X.shape) == 0): + return np.full((X.shape[0], X.shape[0]), fill_value=np.nan, + dtype=dtype) + else: + return np_cov_impl_inner(X, bias, ddof) + + def np_cov_impl_single_variable(m, y=None, rowvar=True, bias=False, + ddof=None): + X = _prepare_cov_input(m, y, rowvar, ddof, dtype, _DDOF_HANDLER, + _M_DIM_HANDLER).astype(dtype) + + if np.any(np.array(X.shape) == 0): + variance = np.nan + else: + variance = np_cov_impl_inner(X, bias, ddof).flat[0] + + return np.array(variance) + + if scalar_result_expected(m, y): + return np_cov_impl_single_variable + else: + return np_cov_impl + + +@overload(np.corrcoef) +def np_corrcoef(x, y=None, rowvar=True): + + x_dt = determine_dtype(x) + y_dt = determine_dtype(y) + dtype = np.result_type(x_dt, y_dt, np.float64) + + if dtype == np.complex128: + clip_fn = _clip_complex + else: + clip_fn = _clip_corr + + def np_corrcoef_impl(x, y=None, rowvar=True): + c = np.cov(x, y, rowvar) + d = np.diag(c) + stddev = np.sqrt(d.real) + + for i in range(c.shape[0]): + c[i, :] /= stddev + c[:, i] /= stddev + + return clip_fn(c) + + def np_corrcoef_impl_single_variable(x, y=None, rowvar=True): + c = np.cov(x, y, rowvar) + return c / c + + if scalar_result_expected(x, y): + return np_corrcoef_impl_single_variable + else: + return np_corrcoef_impl + + +#---------------------------------------------------------------------------- +# Element-wise computations + + +@overload(np.argwhere) +def np_argwhere(a): + # needs to be much more array-like for the array impl to work, Numba bug + # in one of the underlying function calls? + + use_scalar = isinstance(a, (types.Number, types.Boolean)) + if type_can_asarray(a) and not use_scalar: + def impl(a): + arr = np.asarray(a) + if arr.shape == (): + return np.zeros((0, 1), dtype=types.intp) + return np.transpose(np.vstack(np.nonzero(arr))) + else: + falseish = (0, 0) + trueish = (1, 0) + + def impl(a): + if a is not None and bool(a): + return np.zeros(trueish, dtype=types.intp) + else: + return np.zeros(falseish, dtype=types.intp) + + return impl + + +@overload(np.flatnonzero) +def np_flatnonzero(a): + + if type_can_asarray(a): + def impl(a): + arr = np.asarray(a) + return np.nonzero(np.ravel(arr))[0] + else: + def impl(a): + if a is not None and bool(a): + data = [0] + else: + data = [x for x in range(0)] + return np.array(data, dtype=types.intp) + + return impl + + +@register_jitable +def _fill_diagonal_params(a, wrap): + if a.ndim == 2: + m = a.shape[0] + n = a.shape[1] + step = 1 + n + if wrap: + end = n * m + else: + end = n * min(m, n) + else: + shape = np.array(a.shape) + + if not np.all(np.diff(shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + step = 1 + (np.cumprod(shape[:-1])).sum() + end = shape.prod() + + return end, step + + +@register_jitable +def _fill_diagonal_scalar(a, val, wrap): + end, step = _fill_diagonal_params(a, wrap) + + for i in range(0, end, step): + a.flat[i] = val + + +@register_jitable +def _fill_diagonal(a, val, wrap): + end, step = _fill_diagonal_params(a, wrap) + ctr = 0 + v_len = len(val) + + for i in range(0, end, step): + a.flat[i] = val[ctr] + ctr += 1 + ctr = ctr % v_len + + +@register_jitable +def _check_val_int(a, val): + iinfo = np.iinfo(a.dtype) + v_min = iinfo.min + v_max = iinfo.max + + # check finite values are within bounds + if np.any(~np.isfinite(val)) or np.any(val < v_min) or np.any(val > v_max): + raise ValueError('Unable to safely conform val to a.dtype') + + +@register_jitable +def _check_val_float(a, val): + finfo = np.finfo(a.dtype) + v_min = finfo.min + v_max = finfo.max + + # check finite values are within bounds + finite_vals = val[np.isfinite(val)] + if np.any(finite_vals < v_min) or np.any(finite_vals > v_max): + raise ValueError('Unable to safely conform val to a.dtype') + + +# no check performed, needed for pathway where no check is required +_check_nop = register_jitable(lambda x, y: x) + + +def _asarray(x): + pass + + +@overload(_asarray) +def _asarray_impl(x): + if isinstance(x, types.Array): + return lambda x: x + elif isinstance(x, (types.Sequence, types.Tuple)): + return lambda x: np.array(x) + elif isinstance(x, (types.Number, types.Boolean)): + ty = as_dtype(x) + return lambda x: np.array([x], dtype=ty) + + +@overload(np.fill_diagonal) +def np_fill_diagonal(a, val, wrap=False): + + if a.ndim > 1: + # the following can be simplified after #3088; until then, employ + # a basic mechanism for catching cases where val is of a type/value + # which cannot safely be cast to a.dtype + if isinstance(a.dtype, types.Integer): + checker = _check_val_int + elif isinstance(a.dtype, types.Float): + checker = _check_val_float + else: + checker = _check_nop + + def scalar_impl(a, val, wrap=False): + tmpval = _asarray(val).flatten() + checker(a, tmpval) + _fill_diagonal_scalar(a, val, wrap) + + def non_scalar_impl(a, val, wrap=False): + tmpval = _asarray(val).flatten() + checker(a, tmpval) + _fill_diagonal(a, tmpval, wrap) + + if isinstance(val, (types.Float, types.Integer, types.Boolean)): + return scalar_impl + elif isinstance(val, (types.Tuple, types.Sequence, types.Array)): + return non_scalar_impl + else: + msg = "The first argument must be at least 2-D (found %s-D)" % a.ndim + raise TypingError(msg) + + +def _np_round_intrinsic(tp): + # np.round() always rounds half to even + return "llvm.rint.f%d" % (tp.bitwidth,) + + +@intrinsic +def _np_round_float(typingctx, val): + sig = val(val) + + def codegen(context, builder, sig, args): + [val] = args + tp = sig.args[0] + llty = context.get_value_type(tp) + module = builder.module + fnty = llvmlite.ir.FunctionType(llty, [llty]) + fn = cgutils.get_or_insert_function(module, fnty, + _np_round_intrinsic(tp)) + res = builder.call(fn, (val,)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + return sig, codegen + + +@register_jitable +def round_ndigits(x, ndigits): + if math.isinf(x) or math.isnan(x): + return x + + # NOTE: this is CPython's algorithm, but perhaps this is overkill + # when emulating Numpy's behaviour. + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow. + pow1 = 10.0 ** (ndigits - 22) + pow2 = 1e22 + else: + pow1 = 10.0 ** ndigits + pow2 = 1.0 + y = (x * pow1) * pow2 + if math.isinf(y): + return x + return (_np_round_float(y) / pow2) / pow1 + + else: + pow1 = 10.0 ** (-ndigits) + y = x / pow1 + return _np_round_float(y) * pow1 + + +@overload(np.around) +@overload(np.round) +def impl_np_round(a, decimals=0, out=None): + if not type_can_asarray(a): + raise TypingError('The argument "a" must be array-like') + + if not (isinstance(out, types.Array) or is_nonelike(out)): + msg = 'The argument "out" must be an array if it is provided' + raise TypingError(msg) + + if isinstance(a, (types.Float, types.Integer, types.Complex)): + if is_nonelike(out): + if isinstance(a, types.Float): + def impl(a, decimals=0, out=None): + if decimals == 0: + return _np_round_float(a) + else: + return round_ndigits(a, decimals) + return impl + elif isinstance(a, types.Integer): + def impl(a, decimals=0, out=None): + if decimals == 0: + return a + else: + return int(round_ndigits(a, decimals)) + return impl + elif isinstance(a, types.Complex): + def impl(a, decimals=0, out=None): + if decimals == 0: + real = _np_round_float(a.real) + imag = _np_round_float(a.imag) + else: + real = round_ndigits(a.real, decimals) + imag = round_ndigits(a.imag, decimals) + return complex(real, imag) + return impl + else: + def impl(a, decimals=0, out=None): + out[0] = np.round(a, decimals) + return out + return impl + elif isinstance(a, types.Array): + if is_nonelike(out): + def impl(a, decimals=0, out=None): + out = np.empty_like(a) + return np.round(a, decimals, out) + return impl + else: + def impl(a, decimals=0, out=None): + if a.shape != out.shape: + raise ValueError("invalid output shape") + for index, val in np.ndenumerate(a): + out[index] = np.round(val, decimals) + return out + return impl + + +if numpy_version < (2, 0): + overload(np.round_)(impl_np_round) + + +@overload(np.sinc) +def impl_np_sinc(x): + if isinstance(x, types.Number): + def impl(x): + if x == 0.e0: # to match np impl + x = 1e-20 + x *= np.pi # np sinc is the normalised variant + return np.sin(x) / x + return impl + elif isinstance(x, types.Array): + def impl(x): + out = np.zeros_like(x) + for index, val in np.ndenumerate(x): + out[index] = np.sinc(val) + return out + return impl + else: + raise NumbaTypeError('Argument "x" must be a Number or array-like.') + + +@overload(np.angle) +def ov_np_angle(z, deg=False): + deg_mult = float(180 / np.pi) + + # non-complex scalar values are accepted as well + if isinstance(z, types.Number): + def impl(z, deg=False): + if deg: + return np.arctan2(z.imag, z.real) * deg_mult + else: + return np.arctan2(z.imag, z.real) + return impl + elif isinstance(z, types.Array): + dtype = z.dtype + + if isinstance(dtype, types.Complex): + ret_dtype = dtype.underlying_float + elif isinstance(dtype, types.Float): + ret_dtype = dtype + else: + return + + def impl(z, deg=False): + out = np.zeros_like(z, dtype=ret_dtype) + for index, val in np.ndenumerate(z): + out[index] = np.angle(val, deg) + return out + return impl + else: + raise NumbaTypeError('Argument "z" must be a complex ' + f'or Array[complex]. Got {z}') + + +@lower_builtin(np.nonzero, types.Array) +@lower_builtin("array.nonzero", types.Array) +def array_nonzero(context, builder, sig, args): + aryty = sig.args[0] + # Return type is a N-tuple of 1D C-contiguous arrays + retty = sig.return_type + outaryty = retty.dtype + nouts = retty.count + + ary = make_array(aryty)(context, builder, args[0]) + shape = cgutils.unpack_tuple(builder, ary.shape) + strides = cgutils.unpack_tuple(builder, ary.strides) + data = ary.data + layout = aryty.layout + + # First count the number of non-zero elements + zero = context.get_constant(types.intp, 0) + one = context.get_constant(types.intp, 1) + count = cgutils.alloca_once_value(builder, zero) + with cgutils.loop_nest(builder, shape, zero.type) as indices: + ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides, + layout, indices) + val = load_item(context, builder, aryty, ptr) + nz = context.is_true(builder, aryty.dtype, val) + with builder.if_then(nz): + builder.store(builder.add(builder.load(count), one), count) + + # Then allocate output arrays of the right size + out_shape = (builder.load(count),) + outs = [_empty_nd_impl(context, builder, outaryty, out_shape)._getvalue() + for i in range(nouts)] + outarys = [make_array(outaryty)(context, builder, out) for out in outs] + out_datas = [out.data for out in outarys] + + # And fill them up + index = cgutils.alloca_once_value(builder, zero) + with cgutils.loop_nest(builder, shape, zero.type) as indices: + ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides, + layout, indices) + val = load_item(context, builder, aryty, ptr) + nz = context.is_true(builder, aryty.dtype, val) + with builder.if_then(nz): + # Store element indices in output arrays + if not indices: + # For a 0-d array, store 0 in the unique output array + indices = (zero,) + cur = builder.load(index) + for i in range(nouts): + ptr = cgutils.get_item_pointer2(context, builder, out_datas[i], + out_shape, (), + 'C', [cur]) + store_item(context, builder, outaryty, indices[i], ptr) + builder.store(builder.add(cur, one), index) + + tup = context.make_tuple(builder, sig.return_type, outs) + return impl_ret_new_ref(context, builder, sig.return_type, tup) + + +def _where_zero_size_array_impl(dtype): + def impl(condition, x, y): + x_ = np.asarray(x).astype(dtype) + y_ = np.asarray(y).astype(dtype) + return x_ if condition else y_ + return impl + + +@register_jitable +def _where_generic_inner_impl(cond, x, y, res): + for idx, c in np.ndenumerate(cond): + res[idx] = x[idx] if c else y[idx] + return res + + +@register_jitable +def _where_fast_inner_impl(cond, x, y, res): + cf = cond.flat + xf = x.flat + yf = y.flat + rf = res.flat + for i in range(cond.size): + rf[i] = xf[i] if cf[i] else yf[i] + return res + + +def _where_generic_impl(dtype, layout): + use_faster_impl = layout in [{'C'}, {'F'}] + + def impl(condition, x, y): + cond1, x1, y1 = np.asarray(condition), np.asarray(x), np.asarray(y) + shape = np.broadcast_shapes(cond1.shape, x1.shape, y1.shape) + cond_ = np.broadcast_to(cond1, shape) + x_ = np.broadcast_to(x1, shape) + y_ = np.broadcast_to(y1, shape) + + if layout == 'F': + res = np.empty(shape[::-1], dtype=dtype).T + else: + res = np.empty(shape, dtype=dtype) + + if use_faster_impl: + return _where_fast_inner_impl(cond_, x_, y_, res) + else: + return _where_generic_inner_impl(cond_, x_, y_, res) + + return impl + + +@overload(np.where) +def ov_np_where(condition): + if not type_can_asarray(condition): + msg = 'The argument "condition" must be array-like' + raise NumbaTypeError(msg) + + def where_cond_none_none(condition): + return np.asarray(condition).nonzero() + return where_cond_none_none + + +@overload(np.where) +def ov_np_where_x_y(condition, x, y): + if not type_can_asarray(condition): + msg = 'The argument "condition" must be array-like' + raise NumbaTypeError(msg) + + # corner case: None is a valid value for np.where: + # >>> np.where([0, 1], None, 2) + # array([None, 2]) + # + # >>> np.where([0, 1], 2, None) + # array([2, None]) + # + # >>> np.where([0, 1], None, None) + # array([None, None]) + if is_nonelike(x) or is_nonelike(y): + # skip it for now as np.asarray(None) is not supported + raise NumbaTypeError('Argument "x" or "y" cannot be None') + + for arg, name in zip((x, y), ('x', 'y')): + if not type_can_asarray(arg): + msg = 'The argument "{}" must be array-like if provided' + raise NumbaTypeError(msg.format(name)) + + cond_arr = isinstance(condition, types.Array) + x_arr = isinstance(x, types.Array) + y_arr = isinstance(y, types.Array) + + if cond_arr: + x_dt = determine_dtype(x) + y_dt = determine_dtype(y) + dtype = np.promote_types(x_dt, y_dt) + + # corner case - 0 dim values + def check_0_dim(arg): + return isinstance(arg, types.Number) or ( + isinstance(arg, types.Array) and arg.ndim == 0) + special_0_case = all([check_0_dim(a) for a in (condition, x, y)]) + if special_0_case: + return _where_zero_size_array_impl(dtype) + + layout = condition.layout + if x_arr and y_arr: + if x.layout == y.layout == condition.layout: + layout = x.layout + else: + layout = 'A' + return _where_generic_impl(dtype, layout) + else: + def impl(condition, x, y): + return np.where(np.asarray(condition), np.asarray(x), np.asarray(y)) + return impl + + +@overload(np.real) +def np_real(val): + def np_real_impl(val): + return val.real + + return np_real_impl + + +@overload(np.imag) +def np_imag(val): + def np_imag_impl(val): + return val.imag + + return np_imag_impl + + +#---------------------------------------------------------------------------- +# Misc functions + +@overload(operator.contains) +def np_contains(arr, key): + if not isinstance(arr, types.Array): + return + + def np_contains_impl(arr, key): + for x in np.nditer(arr): + if x == key: + return True + return False + + return np_contains_impl + + +@overload(np.count_nonzero) +def np_count_nonzero(a, axis=None): + if not type_can_asarray(a): + raise TypingError("The argument to np.count_nonzero must be array-like") + + if is_nonelike(axis): + def impl(a, axis=None): + arr2 = np.ravel(a) + return np.sum(arr2 != 0) + return impl + else: + def impl(a, axis=None): + arr2 = a.astype(np.bool_) + return np.sum(arr2, axis=axis) + return impl + + +np_delete_handler_isslice = register_jitable(lambda x : x) +np_delete_handler_isarray = register_jitable(lambda x : np.asarray(x)) + + +@overload(np.delete) +def np_delete(arr, obj): + # Implementation based on numpy + # https://github.com/numpy/numpy/blob/af66e487a57bfd4850f4306e3b85d1dac3c70412/numpy/lib/function_base.py#L4065-L4267 # noqa: E501 + + if not isinstance(arr, (types.Array, types.Sequence)): + raise TypingError("arr must be either an Array or a Sequence") + + if isinstance(obj, (types.Array, types.Sequence, types.SliceType)): + if isinstance(obj, (types.SliceType)): + handler = np_delete_handler_isslice + else: + if not isinstance(obj.dtype, types.Integer): + raise TypingError('obj should be of Integer dtype') + handler = np_delete_handler_isarray + + def np_delete_impl(arr, obj): + arr = np.ravel(np.asarray(arr)) + N = arr.size + + keep = np.ones(N, dtype=np.bool_) + obj = handler(obj) + keep[obj] = False + return arr[keep] + return np_delete_impl + + else: # scalar value + if not isinstance(obj, types.Integer): + raise TypingError('obj should be of Integer dtype') + + def np_delete_scalar_impl(arr, obj): + arr = np.ravel(np.asarray(arr)) + N = arr.size + pos = obj + + if (pos < -N or pos >= N): + raise IndexError('obj must be less than the len(arr)') + # NumPy raises IndexError: index 'i' is out of + # bounds for axis 'x' with size 'n' + + if (pos < 0): + pos += N + + return np.concatenate((arr[:pos], arr[pos + 1:])) + return np_delete_scalar_impl + + +@overload(np.diff) +def np_diff_impl(a, n=1): + if not isinstance(a, types.Array) or a.ndim == 0: + return + + def diff_impl(a, n=1): + if n == 0: + return a.copy() + if n < 0: + raise ValueError("diff(): order must be non-negative") + size = a.shape[-1] + out_shape = a.shape[:-1] + (max(size - n, 0),) + out = np.empty(out_shape, a.dtype) + if out.size == 0: + return out + + # np.diff() works on each last dimension subarray independently. + # To make things easier, normalize input and output into 2d arrays + a2 = a.reshape((-1, size)) + out2 = out.reshape((-1, out.shape[-1])) + # A scratchpad for subarrays + work = np.empty(size, a.dtype) + + for major in range(a2.shape[0]): + # First iteration: diff a2 into work + for i in range(size - 1): + work[i] = a2[major, i + 1] - a2[major, i] + # Other iterations: diff work into itself + for niter in range(1, n): + for i in range(size - niter - 1): + work[i] = work[i + 1] - work[i] + # Copy final diff into out2 + out2[major] = work[:size - n] + + return out + + return diff_impl + + +@overload(np.array_equal) +def np_array_equal(a1, a2): + + if not (type_can_asarray(a1) and type_can_asarray(a2)): + raise TypingError('Both arguments to "array_equals" must be array-like') + + accepted = (types.Boolean, types.Number) + if isinstance(a1, accepted) and isinstance(a2, accepted): + # special case + def impl(a1, a2): + return a1 == a2 + else: + def impl(a1, a2): + a = np.asarray(a1) + b = np.asarray(a2) + if a.shape == b.shape: + return np.all(a == b) + return False + + return impl + + +@overload(np.intersect1d) +def jit_np_intersect1d(ar1, ar2, assume_unique=False): + # Not implemented to support return_indices + # https://github.com/numpy/numpy/blob/v1.19.0/numpy/lib + # /arraysetops.py#L347-L441 + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('intersect1d: first two args must be array-like') + if not isinstance(assume_unique, (types.Boolean, bool)): + raise TypingError('intersect1d: ' + 'argument "assume_unique" must be boolean') + + def np_intersects1d_impl(ar1, ar2, assume_unique=False): + ar1 = np.asarray(ar1) + ar2 = np.asarray(ar2) + + if not assume_unique: + ar1 = np.unique(ar1) + ar2 = np.unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + aux.sort() + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + return int1d + return np_intersects1d_impl + + +def validate_1d_array_like(func_name, seq): + if isinstance(seq, types.Array): + if seq.ndim != 1: + raise NumbaTypeError("{0}(): input should have dimension 1" + .format(func_name)) + elif not isinstance(seq, types.Sequence): + raise NumbaTypeError("{0}(): input should be an array or sequence" + .format(func_name)) + + +@overload(np.bincount) +def np_bincount(a, weights=None, minlength=0): + validate_1d_array_like("bincount", a) + + if not isinstance(a.dtype, types.Integer): + return + + check_is_integer(minlength, 'minlength') + + if weights not in (None, types.none): + validate_1d_array_like("bincount", weights) + # weights is promoted to double in C impl + # https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c#L93-L95 # noqa: E501 + out_dtype = np.float64 + + @register_jitable + def validate_inputs(a, weights, minlength): + if len(a) != len(weights): + raise ValueError("bincount(): weights and list don't have " + "the same length") + + @register_jitable + def count_item(out, idx, val, weights): + out[val] += weights[idx] + + else: + out_dtype = types.intp + + @register_jitable + def validate_inputs(a, weights, minlength): + pass + + @register_jitable + def count_item(out, idx, val, weights): + out[val] += 1 + + def bincount_impl(a, weights=None, minlength=0): + validate_inputs(a, weights, minlength) + if minlength < 0: + raise ValueError("'minlength' must not be negative") + + n = len(a) + a_max = a[0] if n > 0 else -1 + for i in range(1, n): + if a[i] < 0: + raise ValueError("bincount(): first argument must be " + "non-negative") + a_max = max(a_max, a[i]) + + out_length = max(a_max + 1, minlength) + out = np.zeros(out_length, out_dtype) + for i in range(n): + count_item(out, i, a[i], weights) + return out + + return bincount_impl + + +less_than_float = register_jitable(lt_floats) +less_than_complex = register_jitable(lt_complex) + + +@register_jitable +def less_than_or_equal_complex(a, b): + if np.isnan(a.real): + if np.isnan(b.real): + if np.isnan(a.imag): + return np.isnan(b.imag) + else: + if np.isnan(b.imag): + return True + else: + return a.imag <= b.imag + else: + return False + + else: + if np.isnan(b.real): + return True + else: + if np.isnan(a.imag): + if np.isnan(b.imag): + return a.real <= b.real + else: + return False + else: + if np.isnan(b.imag): + return True + else: + if a.real < b.real: + return True + elif a.real == b.real: + return a.imag <= b.imag + return False + + +@register_jitable +def _less_than_or_equal(a, b): + if isinstance(a, complex) or isinstance(b, complex): + return less_than_or_equal_complex(a, b) + + elif isinstance(b, float): + if np.isnan(b): + return True + + return a <= b + + +@register_jitable +def _less_than(a, b): + if isinstance(a, complex) or isinstance(b, complex): + return less_than_complex(a, b) + + elif isinstance(b, float): + return less_than_float(a, b) + + return a < b + + +@register_jitable +def _less_then_datetime64(a, b): + # Original numpy code is at: + # https://github.com/numpy/numpy/blob/3dad50936a8dc534a81a545365f69ee9ab162ffe/numpy/_core/src/npysort/npysort_common.h#L334-L346 + if np.isnat(a): + return 0 + + if np.isnat(b): + return 1 + + return a < b + + +@register_jitable +def _less_then_or_equal_datetime64(a, b): + return not _less_then_datetime64(b, a) + + +def _searchsorted(cmp): + # a facsimile of: + # https://github.com/numpy/numpy/blob/4f84d719657eb455a35fcdf9e75b83eb1f97024a/numpy/core/src/npysort/binsearch.cpp#L61 # noqa: E501 + + def impl(a, key_val, min_idx, max_idx): + while min_idx < max_idx: + # to avoid overflow + mid_idx = min_idx + ((max_idx - min_idx) >> 1) + mid_val = a[mid_idx] + if cmp(mid_val, key_val): + min_idx = mid_idx + 1 + else: + max_idx = mid_idx + return min_idx, max_idx + + return impl + + +VALID_SEARCHSORTED_SIDES = frozenset({'left', 'right'}) + + +def make_searchsorted_implementation(np_dtype, side): + assert side in VALID_SEARCHSORTED_SIDES + + if np_dtype.char in 'mM': + # is datetime + lt = _less_then_datetime64 + le = _less_then_or_equal_datetime64 + else: + lt = _less_than + le = _less_than_or_equal + + if side == 'left': + _impl = _searchsorted(lt) + _cmp = lt + else: + _impl = _searchsorted(le) + _cmp = le + + return register_jitable(_impl), register_jitable(_cmp) + + +@overload(np.searchsorted) +def searchsorted(a, v, side='left'): + side_val = getattr(side, 'literal_value', side) + + if side_val not in VALID_SEARCHSORTED_SIDES: + # could change this so that side doesn't need to be + # a compile-time constant + raise NumbaValueError(f"Invalid value given for 'side': {side_val}") + + if isinstance(v, (types.Array, types.Sequence)): + v_dt = as_dtype(v.dtype) + else: + v_dt = as_dtype(v) + + np_dt = np.promote_types(as_dtype(a.dtype), v_dt) + _impl, _cmp = make_searchsorted_implementation(np_dt, side_val) + + if isinstance(v, types.Array): + def impl(a, v, side='left'): + out = np.empty(v.size, dtype=np.intp) + last_key_val = v.flat[0] + min_idx = 0 + max_idx = len(a) + + for i in range(v.size): + key_val = v.flat[i] + + if _cmp(last_key_val, key_val): + max_idx = len(a) + else: + min_idx = 0 + if max_idx < len(a): + max_idx += 1 + else: + max_idx = len(a) + + last_key_val = key_val + min_idx, max_idx = _impl(a, key_val, min_idx, max_idx) + out[i] = min_idx + + return out.reshape(v.shape) + elif isinstance(v, types.Sequence): + def impl(a, v, side='left'): + v = np.asarray(v) + return np.searchsorted(a, v, side=side) + else: # presumably `v` is scalar + def impl(a, v, side='left'): + r, _ = _impl(a, v, 0, len(a)) + return r + return impl + + +@overload(np.digitize) +def np_digitize(x, bins, right=False): + + if isinstance(x, types.Array) and x.dtype in types.complex_domain: + raise TypingError('x may not be complex') + + @register_jitable + def _monotonicity(bins): + + # all bin edges hold the same value + if len(bins) == 0: + return 1 + + # Skip repeated values at the beginning of the array + last_value = bins[0] + i = 1 + while i < len(bins) and bins[i] == last_value: + i += 1 + + # all bin edges hold the same value + if i == len(bins): + return 1 + + next_value = bins[i] + + if last_value < next_value: + # Possibly monotonic increasing + for i in range(i + 1, len(bins)): + last_value = next_value + next_value = bins[i] + if last_value > next_value: + return 0 + return 1 + + else: + # last > next, possibly monotonic decreasing + for i in range(i + 1, len(bins)): + last_value = next_value + next_value = bins[i] + if last_value < next_value: + return 0 + return -1 + + def digitize_impl(x, bins, right=False): + + mono = _monotonicity(bins) + + if mono == 0: + raise ValueError( + "bins must be monotonically increasing or decreasing" + ) + + # this is backwards because the arguments below are swapped + if right: + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - np.searchsorted(bins[::-1], x, side='left') + else: + return np.searchsorted(bins, x, side='left') + else: + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - np.searchsorted(bins[::-1], x, side='right') + else: + return np.searchsorted(bins, x, side='right') + + return digitize_impl + + +_range = range + + +@overload(np.histogram) +def np_histogram(a, bins=10, range=None): + if isinstance(bins, (int, types.Integer)): + # With a uniform distribution of bins, use a fast algorithm + # independent of the number of bins + + if range in (None, types.none): + inf = float('inf') + + def histogram_impl(a, bins=10, range=None): + bin_min = inf + bin_max = -inf + for view in np.nditer(a): + v = view.item() + if bin_min > v: + bin_min = v + if bin_max < v: + bin_max = v + return np.histogram(a, bins, (bin_min, bin_max)) + + else: + def histogram_impl(a, bins=10, range=None): + if bins <= 0: + raise ValueError("histogram(): `bins` should be a " + "positive integer") + bin_min, bin_max = range + if not bin_min <= bin_max: + raise ValueError("histogram(): max must be larger than " + "min in range parameter") + + hist = np.zeros(bins, np.intp) + if bin_max > bin_min: + bin_ratio = bins / (bin_max - bin_min) + for view in np.nditer(a): + v = view.item() + b = math.floor((v - bin_min) * bin_ratio) + if 0 <= b < bins: + hist[int(b)] += 1 + elif v == bin_max: + hist[bins - 1] += 1 + + bins_array = np.linspace(bin_min, bin_max, bins + 1) + return hist, bins_array + + else: + # With a custom bins array, use a bisection search + + def histogram_impl(a, bins=10, range=None): + nbins = len(bins) - 1 + for i in _range(nbins): + # Note this also catches NaNs + if not bins[i] <= bins[i + 1]: + raise ValueError("histogram(): bins must increase " + "monotonically") + + bin_min = bins[0] + bin_max = bins[nbins] + hist = np.zeros(nbins, np.intp) + + if nbins > 0: + for view in np.nditer(a): + v = view.item() + if not bin_min <= v <= bin_max: + # Value is out of bounds, ignore (also catches NaNs) + continue + # Bisect in bins[:-1] + lo = 0 + hi = nbins - 1 + while lo < hi: + # Note the `+ 1` is necessary to avoid an infinite + # loop where mid = lo => lo = mid + mid = (lo + hi + 1) >> 1 + if v < bins[mid]: + hi = mid - 1 + else: + lo = mid + hist[lo] += 1 + + return hist, bins + + return histogram_impl + + +# Create np.finfo, np.iinfo and np.MachAr +# machar +_mach_ar_supported = ('ibeta', 'it', 'machep', 'eps', 'negep', 'epsneg', + 'iexp', 'minexp', 'xmin', 'maxexp', 'xmax', 'irnd', + 'ngrd', 'epsilon', 'tiny', 'huge', 'precision', + 'resolution',) +MachAr = namedtuple('MachAr', _mach_ar_supported) + +# Do not support MachAr field +# finfo +_finfo_supported = ('eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'min', + 'minexp', 'negep', 'nexp', 'nmant', 'precision', + 'resolution', 'tiny', 'bits',) + + +finfo = namedtuple('finfo', _finfo_supported) + +# iinfo +_iinfo_supported = ('min', 'max', 'bits',) + +iinfo = namedtuple('iinfo', _iinfo_supported) + + +def generate_xinfo_body(arg, np_func, container, attr): + nbty = getattr(arg, 'dtype', arg) + np_dtype = as_dtype(nbty) + try: + f = np_func(np_dtype) + except ValueError: # This exception instance comes from NumPy + # The np function might not support the dtype + return None + data = tuple([getattr(f, x) for x in attr]) + + @register_jitable + def impl(arg): + return container(*data) + return impl + + +@overload(np.finfo) +def ol_np_finfo(dtype): + fn = generate_xinfo_body(dtype, np.finfo, finfo, _finfo_supported) + + def impl(dtype): + return fn(dtype) + return impl + + +@overload(np.iinfo) +def ol_np_iinfo(int_type): + fn = generate_xinfo_body(int_type, np.iinfo, iinfo, _iinfo_supported) + + def impl(int_type): + return fn(int_type) + return impl + + +def _get_inner_prod(dta, dtb): + # gets an inner product implementation, if both types are float then + # BLAS is used else a local function + + @register_jitable + def _innerprod(a, b): + acc = 0 + for i in range(len(a)): + acc = acc + a[i] * b[i] + return acc + + # no BLAS... use local function regardless + if not _HAVE_BLAS: + return _innerprod + + flty = types.real_domain | types.complex_domain + floats = dta in flty and dtb in flty + if not floats: + return _innerprod + else: + a_dt = as_dtype(dta) + b_dt = as_dtype(dtb) + dt = np.promote_types(a_dt, b_dt) + + @register_jitable + def _dot_wrap(a, b): + return np.dot(a.astype(dt), b.astype(dt)) + return _dot_wrap + + +def _assert_1d(a, func_name): + if isinstance(a, types.Array): + if not a.ndim <= 1: + raise TypingError("%s() only supported on 1D arrays " % func_name) + + +def _np_correlate_core(ap1, ap2, mode, direction): + pass + + +@overload(_np_correlate_core) +def _np_correlate_core_impl(ap1, ap2, mode, direction): + a_dt = as_dtype(ap1.dtype) + b_dt = as_dtype(ap2.dtype) + dt = np.promote_types(a_dt, b_dt) + innerprod = _get_inner_prod(ap1.dtype, ap2.dtype) + + def impl(ap1, ap2, mode, direction): + # Implementation loosely based on `_pyarray_correlate` from + # https://github.com/numpy/numpy/blob/3bce2be74f228684ca2895ad02b63953f37e2a9d/numpy/core/src/multiarray/multiarraymodule.c#L1191 # noqa: E501 + # For "mode": + # Convolve uses 'full' by default. + # Correlate uses 'valid' by default. + # For "direction", +1 to write the return values out in order 0->N + # -1 to write them out N->0. + + n1 = len(ap1) + n2 = len(ap2) + + if n1 < n2: + # This should never occur when called by np.convolve because + # _np_correlate.impl swaps arguments based on length. + # The same applies for np.correlate. + raise ValueError("'len(ap1)' must greater than 'len(ap2)'") + + length = n1 + n = n2 + if mode == "valid": + length = length - n + 1 + n_left = 0 + n_right = 0 + elif mode == "full": + n_right = n - 1 + n_left = n - 1 + length = length + n - 1 + elif mode == "same": + n_left = n // 2 + n_right = n - n_left - 1 + else: + raise ValueError( + "Invalid 'mode', " + "valid are 'full', 'same', 'valid'" + ) + + ret = np.zeros(length, dt) + + if direction == 1: + idx = 0 + inc = 1 + elif direction == -1: + idx = length - 1 + inc = -1 + else: + raise ValueError("Invalid direction") + + for i in range(n_left): + k = i + n - n_left + ret[idx] = innerprod(ap1[:k], ap2[-k:]) + idx = idx + inc + + for i in range(n1 - n2 + 1): + ret[idx] = innerprod(ap1[i : i + n2], ap2) + idx = idx + inc + + for i in range(n_right): + k = n - i - 1 + ret[idx] = innerprod(ap1[-k:], ap2[:k]) + idx = idx + inc + + return ret + + return impl + + +@overload(np.correlate) +def _np_correlate(a, v, mode="valid"): + _assert_1d(a, 'np.correlate') + _assert_1d(v, 'np.correlate') + + @register_jitable + def op_conj(x): + return np.conj(x) + + @register_jitable + def op_nop(x): + return x + + if a.dtype in types.complex_domain: + if v.dtype in types.complex_domain: + a_op = op_nop + b_op = op_conj + else: + a_op = op_nop + b_op = op_nop + else: + if v.dtype in types.complex_domain: + a_op = op_nop + b_op = op_conj + else: + a_op = op_conj + b_op = op_nop + + def impl(a, v, mode="valid"): + la = len(a) + lv = len(v) + + if la == 0: + raise ValueError("'a' cannot be empty") + if lv == 0: + raise ValueError("'v' cannot be empty") + + if la < lv: + return _np_correlate_core(b_op(v), a_op(a), mode, -1) + else: + return _np_correlate_core(a_op(a), b_op(v), mode, 1) + + return impl + + +@overload(np.convolve) +def np_convolve(a, v, mode="full"): + _assert_1d(a, 'np.convolve') + _assert_1d(v, 'np.convolve') + + def impl(a, v, mode="full"): + la = len(a) + lv = len(v) + + if la == 0: + raise ValueError("'a' cannot be empty") + if lv == 0: + raise ValueError("'v' cannot be empty") + + if la < lv: + return _np_correlate_core(v, a[::-1], mode, 1) + else: + return _np_correlate_core(a, v[::-1], mode, 1) + + return impl + + +@overload(np.asarray) +def np_asarray(a, dtype=None): + + # developer note... keep this function (type_can_asarray) in sync with the + # accepted types implementations below! + if not type_can_asarray(a): + return None + + if isinstance(a, types.Array): + if is_nonelike(dtype) or a.dtype == dtype.dtype: + def impl(a, dtype=None): + return a + else: + def impl(a, dtype=None): + return a.astype(dtype) + elif isinstance(a, (types.Sequence, types.Tuple)): + # Nested lists cannot be unpacked, therefore only single lists are + # permitted and these conform to Sequence and can be unpacked along on + # the same path as Tuple. + if is_nonelike(dtype): + def impl(a, dtype=None): + return np.array(a) + else: + def impl(a, dtype=None): + return np.array(a, dtype) + elif isinstance(a, (types.Number, types.Boolean)): + dt_conv = a if is_nonelike(dtype) else dtype + ty = as_dtype(dt_conv) + + def impl(a, dtype=None): + return np.array(a, ty) + elif isinstance(a, types.containers.ListType): + if not isinstance(a.dtype, (types.Number, types.Boolean)): + raise TypingError( + "asarray support for List is limited " + "to Boolean and Number types") + + target_dtype = a.dtype if is_nonelike(dtype) else dtype + + def impl(a, dtype=None): + l = len(a) + ret = np.empty(l, dtype=target_dtype) + for i, v in enumerate(a): + ret[i] = v + return ret + elif isinstance(a, types.StringLiteral): + arr = np.asarray(a.literal_value) + + def impl(a, dtype=None): + return arr.copy() + else: + impl = None + + return impl + + +if numpy_version < (2, 0): + @overload(np.asfarray) + def np_asfarray(a, dtype=np.float64): + # convert numba dtype types into NumPy dtype + if isinstance(dtype, types.Type): + dtype = as_dtype(dtype) + if not np.issubdtype(dtype, np.inexact): + dx = types.float64 + else: + dx = dtype + + def impl(a, dtype=np.float64): + return np.asarray(a, dx) + return impl + + +@overload(np.extract) +def np_extract(condition, arr): + + def np_extract_impl(condition, arr): + cond = np.asarray(condition).flatten() + a = np.asarray(arr) + + if a.size == 0: + raise ValueError('Cannot extract from an empty array') + + # the following looks odd but replicates NumPy... + # https://github.com/numpy/numpy/issues/12859 + if np.any(cond[a.size:]) and cond.size > a.size: + msg = 'condition shape inconsistent with arr shape' + raise ValueError(msg) + # NumPy raises IndexError: index 'm' is out of + # bounds for size 'n' + + max_len = min(a.size, cond.size) + out = [a.flat[idx] for idx in range(max_len) if cond[idx]] + + return np.array(out) + + return np_extract_impl + + +@overload(np.select) +def np_select(condlist, choicelist, default=0): + + def np_select_arr_impl(condlist, choicelist, default=0): + if len(condlist) != len(choicelist): + raise ValueError('list of cases must be same length as list ' + 'of conditions') + out = default * np.ones(choicelist[0].shape, choicelist[0].dtype) + # should use reversed+zip, but reversed is not available + for i in range(len(condlist) - 1, -1, -1): + cond = condlist[i] + choice = choicelist[i] + out = np.where(cond, choice, out) + return out + + # first we check the types of the input parameters + if not isinstance(condlist, (types.List, types.UniTuple)): + raise NumbaTypeError('condlist must be a List or a Tuple') + if not isinstance(choicelist, (types.List, types.UniTuple)): + raise NumbaTypeError('choicelist must be a List or a Tuple') + if not isinstance(default, (int, types.Number, types.Boolean)): + raise NumbaTypeError('default must be a scalar (number or boolean)') + # the types of the parameters have been checked, now we test the types + # of the content of the parameters + # implementation note: if in the future numba's np.where accepts tuples + # as elements of condlist, then the check below should be extended to + # accept tuples + if not isinstance(condlist[0], types.Array): + raise NumbaTypeError('items of condlist must be arrays') + if not isinstance(choicelist[0], types.Array): + raise NumbaTypeError('items of choicelist must be arrays') + # the types of the parameters and their contents have been checked, + # now we test the dtypes of the content of parameters + if isinstance(condlist[0], types.Array): + if not isinstance(condlist[0].dtype, types.Boolean): + raise NumbaTypeError('condlist arrays must contain booleans') + if isinstance(condlist[0], types.UniTuple): + if not (isinstance(condlist[0], types.UniTuple) + and isinstance(condlist[0][0], types.Boolean)): + raise NumbaTypeError('condlist tuples must only contain booleans') + # the input types are correct, now we perform checks on the dimensions + if (isinstance(condlist[0], types.Array) and + condlist[0].ndim != choicelist[0].ndim): + raise NumbaTypeError('condlist and choicelist elements must have the ' + 'same number of dimensions') + if isinstance(condlist[0], types.Array) and condlist[0].ndim < 1: + raise NumbaTypeError('condlist arrays must be of at least dimension 1') + + return np_select_arr_impl + + +@overload(np.union1d) +def np_union1d(ar1, ar2): + if not type_can_asarray(ar1) or not type_can_asarray(ar2): + raise TypingError("The arguments to np.union1d must be array-like") + if (('unichr' in ar1.dtype.name or 'unichr' in ar2.dtype.name) and + ar1.dtype.name != ar2.dtype.name): + raise TypingError("For Unicode arrays, arrays must have same dtype") + + def union_impl(ar1, ar2): + a = np.ravel(np.asarray(ar1)) + b = np.ravel(np.asarray(ar2)) + return np.unique(np.concatenate((a, b))) + + return union_impl + + +@overload(np.asarray_chkfinite) +def np_asarray_chkfinite(a, dtype=None): + + msg = "The argument to np.asarray_chkfinite must be array-like" + if not isinstance(a, (types.Array, types.Sequence, types.Tuple)): + raise TypingError(msg) + + if is_nonelike(dtype): + dt = a.dtype + else: + try: + dt = as_dtype(dtype) + except NumbaNotImplementedError: + raise TypingError('dtype must be a valid Numpy dtype') + + def impl(a, dtype=None): + a = np.asarray(a, dtype=dt) + for i in np.nditer(a): + if not np.isfinite(i): + raise ValueError("array must not contain infs or NaNs") + return a + + return impl + + +@overload(np.unwrap) +def numpy_unwrap(p, discont=None, axis=-1, period=6.283185307179586): + if not isinstance(axis, (int, types.Integer)): + msg = 'The argument "axis" must be an integer' + raise TypingError(msg) + + if not type_can_asarray(p): + msg = 'The argument "p" must be array-like' + raise TypingError(msg) + + if (not isinstance(discont, (types.Integer, types.Float)) + and not cgutils.is_nonelike(discont)): + msg = 'The argument "discont" must be a scalar' + raise TypingError(msg) + + if not isinstance(period, (float, types.Number)): + msg = 'The argument "period" must be a scalar' + raise TypingError(msg) + + slice1 = (slice(1, None, None),) + if isinstance(period, types.Number): + dtype = np.result_type(as_dtype(p.dtype), as_dtype(period)) + else: + dtype = np.result_type(as_dtype(p.dtype), np.float64) + + integer_input = np.issubdtype(dtype, np.integer) + + def impl(p, discont=None, axis=-1, period=6.283185307179586): + if axis != -1: + msg = 'Value for argument "axis" is not supported' + raise ValueError(msg) + # Flatten to a 2D array, keeping axis -1 + p_init = np.asarray(p).astype(dtype) + init_shape = p_init.shape + last_axis = init_shape[-1] + p_new = p_init.reshape((p_init.size // last_axis, last_axis)) + # Manipulate discont and period + if discont is None: + discont = period / 2 + if integer_input: + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + + # Work on each row separately + for i in range(p_init.size // last_axis): + row = p_new[i] + dd = np.diff(row) + ddmod = np.mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + ddmod = np.where((ddmod == interval_low) & (dd > 0), + interval_high, ddmod) + ph_correct = ddmod - dd + + ph_correct = np.where(np.array([abs(x) for x in dd]) < discont, 0, + ph_correct) + ph_ravel = np.where(np.array([abs(x) for x in dd]) < discont, 0, + ph_correct) + ph_correct = np.reshape(ph_ravel, ph_correct.shape) + up = np.copy(row) + up[slice1] = row[slice1] + ph_correct.cumsum() + p_new[i] = up + + return p_new.reshape(init_shape) + + return impl + +#---------------------------------------------------------------------------- +# Windowing functions +# - translated from the numpy implementations found in: +# https://github.com/numpy/numpy/blob/v1.16.1/numpy/lib/function_base.py#L2543-L3233 # noqa: E501 +# at commit: f1c4c758e1c24881560dd8ab1e64ae750 +# - and also, for NumPy >= 1.20, translated from implementations in +# https://github.com/numpy/numpy/blob/156cd054e007b05d4ac4829e10a369d19dd2b0b1/numpy/lib/function_base.py#L2655-L3065 # noqa: E501 + + +@register_jitable +def np_bartlett_impl(M): + n = np.arange(1. - M, M, 2) + return np.where(np.less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1)) + + +@register_jitable +def np_blackman_impl(M): + n = np.arange(1. - M, M, 2) + return (0.42 + 0.5 * np.cos(np.pi * n / (M - 1)) + + 0.08 * np.cos(2.0 * np.pi * n / (M - 1))) + + +@register_jitable +def np_hamming_impl(M): + n = np.arange(1 - M, M, 2) + return 0.54 + 0.46 * np.cos(np.pi * n / (M - 1)) + + +@register_jitable +def np_hanning_impl(M): + n = np.arange(1 - M, M, 2) + return 0.5 + 0.5 * np.cos(np.pi * n / (M - 1)) + + +def window_generator(func): + def window_overload(M): + if not isinstance(M, types.Integer): + raise TypingError('M must be an integer') + + def window_impl(M): + + if M < 1: + return np.array((), dtype=np.float64) + if M == 1: + return np.ones(1, dtype=np.float64) + return func(M) + + return window_impl + return window_overload + + +overload(np.bartlett)(window_generator(np_bartlett_impl)) +overload(np.blackman)(window_generator(np_blackman_impl)) +overload(np.hamming)(window_generator(np_hamming_impl)) +overload(np.hanning)(window_generator(np_hanning_impl)) + + +_i0A = np.array([ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1, +]) + +_i0B = np.array([ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1, +]) + + +@register_jitable +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x * b1 - b2 + vals[i] + + return 0.5 * (b0 - b2) + + +@register_jitable +def _i0(x): + if x < 0: + x = -x + if x <= 8.0: + y = (0.5 * x) - 2.0 + return np.exp(x) * _chbevl(y, _i0A) + + return np.exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / np.sqrt(x) + + +@register_jitable +def _i0n(n, alpha, beta): + y = np.empty_like(n, dtype=np.float64) + t = _i0(np.float64(beta)) + for i in range(len(y)): + y[i] = _i0(beta * np.sqrt(1 - ((n[i] - alpha) / alpha)**2.0)) / t + + return y + + +@overload(np.kaiser) +def np_kaiser(M, beta): + if not isinstance(M, types.Integer): + raise TypingError('M must be an integer') + + if not isinstance(beta, (types.Integer, types.Float)): + raise TypingError('beta must be an integer or float') + + def np_kaiser_impl(M, beta): + if M < 1: + return np.array((), dtype=np.float64) + if M == 1: + return np.ones(1, dtype=np.float64) + + n = np.arange(0, M) + alpha = (M - 1) / 2.0 + + return _i0n(n, alpha, beta) + + return np_kaiser_impl + + +@register_jitable +def _cross_operation(a, b, out): + + def _cross_preprocessing(x): + x0 = x[..., 0] + x1 = x[..., 1] + if x.shape[-1] == 3: + x2 = x[..., 2] + else: + x2 = np.multiply(x.dtype.type(0), x0) + return x0, x1, x2 + + a0, a1, a2 = _cross_preprocessing(a) + b0, b1, b2 = _cross_preprocessing(b) + + cp0 = np.multiply(a1, b2) - np.multiply(a2, b1) + cp1 = np.multiply(a2, b0) - np.multiply(a0, b2) + cp2 = np.multiply(a0, b1) - np.multiply(a1, b0) + + out[..., 0] = cp0 + out[..., 1] = cp1 + out[..., 2] = cp2 + + +def _cross(a, b): + pass + + +@overload(_cross) +def _cross_impl(a, b): + dtype = np.promote_types(as_dtype(a.dtype), as_dtype(b.dtype)) + if a.ndim == 1 and b.ndim == 1: + def impl(a, b): + cp = np.empty((3,), dtype) + _cross_operation(a, b, cp) + return cp + else: + def impl(a, b): + shape = np.add(a[..., 0], b[..., 0]).shape + cp = np.empty(shape + (3,), dtype) + _cross_operation(a, b, cp) + return cp + return impl + + +@overload(np.cross) +def np_cross(a, b): + if not type_can_asarray(a) or not type_can_asarray(b): + raise TypingError("Inputs must be array-like.") + + def impl(a, b): + a_ = np.asarray(a) + b_ = np.asarray(b) + if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3): + raise ValueError(( + "Incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)" + )) + + if a_.shape[-1] == 3 or b_.shape[-1] == 3: + return _cross(a_, b_) + else: + raise ValueError(( + "Dimensions for both inputs is 2.\n" + "Please replace your numpy.cross(a, b) call with " + "a call to `cross2d(a, b)` from `numba.np.extensions`." + )) + return impl + + +@register_jitable +def _cross2d_operation(a, b): + + def _cross_preprocessing(x): + x0 = x[..., 0] + x1 = x[..., 1] + return x0, x1 + + a0, a1 = _cross_preprocessing(a) + b0, b1 = _cross_preprocessing(b) + + cp = np.multiply(a0, b1) - np.multiply(a1, b0) + # If ndim of a and b is 1, cp is a scalar. + # In this case np.cross returns a 0-D array, containing the scalar. + # np.asarray is used to reconcile this case, without introducing + # overhead in the case where cp is an actual N-D array. + # (recall that np.asarray does not copy existing arrays) + return np.asarray(cp) + + +def cross2d(a, b): + pass + + +@overload(cross2d) +def cross2d_impl(a, b): + if not type_can_asarray(a) or not type_can_asarray(b): + raise TypingError("Inputs must be array-like.") + + def impl(a, b): + a_ = np.asarray(a) + b_ = np.asarray(b) + if a_.shape[-1] != 2 or b_.shape[-1] != 2: + raise ValueError(( + "Incompatible dimensions for 2D cross product\n" + "(dimension must be 2 for both inputs)" + )) + return _cross2d_operation(a_, b_) + + return impl + + +@overload(np.trim_zeros) +def np_trim_zeros(filt, trim='fb'): + if not isinstance(filt, types.Array): + raise NumbaTypeError('The first argument must be an array') + + if filt.ndim > 1: + raise NumbaTypeError('array must be 1D') + + if not isinstance(trim, (str, types.UnicodeType)): + raise NumbaTypeError('The second argument must be a string') + + def impl(filt, trim='fb'): + a_ = np.asarray(filt) + first = 0 + trim = trim.lower() + if 'f' in trim: + for i in a_: + if i != 0: + break + else: + first = first + 1 + last = len(filt) + if 'b' in trim: + for i in a_[::-1]: + if i != 0: + break + else: + last = last - 1 + return a_[first:last] + + return impl + + +@overload(np.setxor1d) +def jit_np_setxor1d(ar1, ar2, assume_unique=False): + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('setxor1d: first two args must be array-like') + if not (isinstance(assume_unique, (types.Boolean, bool))): + raise TypingError('setxor1d: Argument "assume_unique" must be boolean') + + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L477 # noqa: E501 + def np_setxor1d_impl(ar1, ar2, assume_unique=False): + a = np.asarray(ar1) + b = np.asarray(ar2) + + if not assume_unique: + a = np.unique(a) + b = np.unique(b) + else: + a = a.ravel() + b = b.ravel() + + # Implementation very similar to np_intersect1d_impl: + # We want union minus the intersect + aux = np.concatenate((a, b)) + aux.sort() + + flag = np.empty(aux.shape[0] + 1, dtype=np.bool_) + flag[0] = True + flag[-1] = True + flag[1:-1] = aux[1:] != aux[:-1] + return aux[flag[1:] & flag[:-1]] + + return np_setxor1d_impl + + +@overload(np.setdiff1d) +def jit_np_setdiff1d(ar1, ar2, assume_unique=False): + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('setdiff1d: first two args must be array-like') + if not (isinstance(assume_unique, (types.Boolean, bool))): + raise TypingError('setdiff1d: Argument "assume_unique" must be boolean') + + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L940 # noqa: E501 + def np_setdiff1d_impl(ar1, ar2, assume_unique=False): + ar1 = np.asarray(ar1) + ar2 = np.asarray(ar2) + if assume_unique: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + else: + ar1 = np.unique(ar1) + ar2 = np.unique(ar2) + return ar1[np.in1d(ar1, ar2, assume_unique=True, invert=True)] + + return np_setdiff1d_impl + + +@overload(np.in1d) +def jit_np_in1d(ar1, ar2, assume_unique=False, invert=False): + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('in1d: first two args must be array-like') + if not isinstance(assume_unique, (types.Boolean, bool)): + raise TypingError('in1d: Argument "assume_unique" must be boolean') + if not isinstance(invert, (types.Boolean, bool)): + raise TypingError('in1d: Argument "invert" must be boolean') + + def np_in1d_impl(ar1, ar2, assume_unique=False, invert=False): + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L525 # noqa: E501 + + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # This code is run when it would make the code significantly faster + # Sorting is also not guaranteed to work on objects but numba does + # not support object arrays. + if len(ar2) < 10 * len(ar1) ** 0.145: + if invert: + mask = np.ones(len(ar1), dtype=np.bool_) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=np.bool_) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + # Equivalent to ar1, inv_idx = np.unique(ar1, return_inverse=True) + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L358C8-L358C8 # noqa: E501 + order1 = np.argsort(ar1) + aux = ar1[order1] + mask = np.empty(aux.shape, dtype=np.bool_) + mask[:1] = True + mask[1:] = aux[1:] != aux[:-1] + ar1 = aux[mask] + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[order1] = imask + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + flag = np.empty(sar.size, np.bool_) + if invert: + flag[:-1] = (sar[1:] != sar[:-1]) + else: + flag[:-1] = (sar[1:] == sar[:-1]) + flag[-1:] = invert + ret = np.empty(ar.shape, dtype=np.bool_) + ret[order] = flag + + # return ret[:len(ar1)] + if assume_unique: + return ret[:len(ar1)] + else: + return ret[inv_idx] + + return np_in1d_impl + + +@overload(np.isin) +def jit_np_isin(element, test_elements, assume_unique=False, invert=False): + if not (type_can_asarray(element) or type_can_asarray(test_elements)): + raise TypingError('isin: first two args must be array-like') + if not (isinstance(assume_unique, (types.Boolean, bool))): + raise TypingError('isin: Argument "assume_unique" must be boolean') + if not (isinstance(invert, (types.Boolean, bool))): + raise TypingError('isin: Argument "invert" must be boolean') + + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L889 # noqa: E501 + def np_isin_impl(element, test_elements, assume_unique=False, invert=False): + + element = np.asarray(element) + return np.in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + return np_isin_impl diff --git a/venv/lib/python3.10/site-packages/numba/np/npdatetime.py b/venv/lib/python3.10/site-packages/numba/np/npdatetime.py new file mode 100644 index 0000000000000000000000000000000000000000..ba29542e92064297ca1ed8860248fa0e7ca918db --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/npdatetime.py @@ -0,0 +1,840 @@ +""" +Implementation of operations on numpy timedelta64. +""" + +import numpy as np +import operator + +import llvmlite.ir +from llvmlite.ir import Constant + +from numba.core import types, cgutils +from numba.core.cgutils import create_constant_array +from numba.core.imputils import (lower_builtin, lower_constant, + impl_ret_untracked, lower_cast) +from numba.np import npdatetime_helpers, numpy_support, npyfuncs +from numba.extending import overload_method +from numba.core.config import IS_32BITS +from numba.core.errors import LoweringError + +# datetime64 and timedelta64 use the same internal representation +DATETIME64 = TIMEDELTA64 = llvmlite.ir.IntType(64) +NAT = Constant(TIMEDELTA64, npdatetime_helpers.NAT) + +TIMEDELTA_BINOP_SIG = (types.NPTimedelta,) * 2 + + +def scale_by_constant(builder, val, factor): + """ + Multiply *val* by the constant *factor*. + """ + return builder.mul(val, Constant(TIMEDELTA64, factor)) + + +def unscale_by_constant(builder, val, factor): + """ + Divide *val* by the constant *factor*. + """ + return builder.sdiv(val, Constant(TIMEDELTA64, factor)) + + +def add_constant(builder, val, const): + """ + Add constant *const* to *val*. + """ + return builder.add(val, Constant(TIMEDELTA64, const)) + + +def scale_timedelta(context, builder, val, srcty, destty): + """ + Scale the timedelta64 *val* from *srcty* to *destty* + (both numba.types.NPTimedelta instances) + """ + factor = npdatetime_helpers.get_timedelta_conversion_factor( + srcty.unit, destty.unit) + if factor is None: + # This can happen when using explicit output in a ufunc. + msg = f"cannot convert timedelta64 from {srcty.unit} to {destty.unit}" + raise LoweringError(msg) + return scale_by_constant(builder, val, factor) + + +def normalize_timedeltas(context, builder, left, right, leftty, rightty): + """ + Scale either *left* or *right* to the other's unit, in order to have + homogeneous units. + """ + factor = npdatetime_helpers.get_timedelta_conversion_factor( + leftty.unit, rightty.unit) + if factor is not None: + return scale_by_constant(builder, left, factor), right + factor = npdatetime_helpers.get_timedelta_conversion_factor( + rightty.unit, leftty.unit) + if factor is not None: + return left, scale_by_constant(builder, right, factor) + # Typing should not let this happen, except on == and != operators + raise RuntimeError("cannot normalize %r and %r" % (leftty, rightty)) + + +def alloc_timedelta_result(builder, name='ret'): + """ + Allocate a NaT-initialized datetime64 (or timedelta64) result slot. + """ + ret = cgutils.alloca_once(builder, TIMEDELTA64, name=name) + builder.store(NAT, ret) + return ret + + +def alloc_boolean_result(builder, name='ret'): + """ + Allocate an uninitialized boolean result slot. + """ + ret = cgutils.alloca_once(builder, llvmlite.ir.IntType(1), name=name) + return ret + + +def is_not_nat(builder, val): + """ + Return a predicate which is true if *val* is not NaT. + """ + return builder.icmp_unsigned('!=', val, NAT) + + +def are_not_nat(builder, vals): + """ + Return a predicate which is true if all of *vals* are not NaT. + """ + assert len(vals) >= 1 + pred = is_not_nat(builder, vals[0]) + for val in vals[1:]: + pred = builder.and_(pred, is_not_nat(builder, val)) + return pred + + +normal_year_months = create_constant_array( + TIMEDELTA64, + [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]) +leap_year_months = create_constant_array( + TIMEDELTA64, + [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]) +normal_year_months_acc = create_constant_array( + TIMEDELTA64, + [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]) +leap_year_months_acc = create_constant_array( + TIMEDELTA64, + [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335]) + + +@lower_constant(types.NPDatetime) +@lower_constant(types.NPTimedelta) +def datetime_constant(context, builder, ty, pyval): + return DATETIME64(pyval.astype(np.int64)) + + +# Arithmetic operators on timedelta64 + +@lower_builtin(operator.pos, types.NPTimedelta) +def timedelta_pos_impl(context, builder, sig, args): + res = args[0] + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.neg, types.NPTimedelta) +def timedelta_neg_impl(context, builder, sig, args): + res = builder.neg(args[0]) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(abs, types.NPTimedelta) +def timedelta_abs_impl(context, builder, sig, args): + val, = args + ret = alloc_timedelta_result(builder) + with builder.if_else(cgutils.is_scalar_neg(builder, val)) as (then, otherwise): + with then: + builder.store(builder.neg(val), ret) + with otherwise: + builder.store(val, ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def timedelta_sign_impl(context, builder, sig, args): + """ + np.sign(timedelta64) + """ + val, = args + ret = alloc_timedelta_result(builder) + zero = Constant(TIMEDELTA64, 0) + with builder.if_else(builder.icmp_signed('>', val, zero) + ) as (gt_zero, le_zero): + with gt_zero: + builder.store(Constant(TIMEDELTA64, 1), ret) + with le_zero: + with builder.if_else(builder.icmp_unsigned('==', val, zero) + ) as (eq_zero, lt_zero): + with eq_zero: + builder.store(Constant(TIMEDELTA64, 0), ret) + with lt_zero: + builder.store(Constant(TIMEDELTA64, -1), ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.add, *TIMEDELTA_BINOP_SIG) +@lower_builtin(operator.iadd, *TIMEDELTA_BINOP_SIG) +def timedelta_add_impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + ret = alloc_timedelta_result(builder) + with cgutils.if_likely(builder, are_not_nat(builder, [va, vb])): + va = scale_timedelta(context, builder, va, ta, sig.return_type) + vb = scale_timedelta(context, builder, vb, tb, sig.return_type) + builder.store(builder.add(va, vb), ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.sub, *TIMEDELTA_BINOP_SIG) +@lower_builtin(operator.isub, *TIMEDELTA_BINOP_SIG) +def timedelta_sub_impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + ret = alloc_timedelta_result(builder) + with cgutils.if_likely(builder, are_not_nat(builder, [va, vb])): + va = scale_timedelta(context, builder, va, ta, sig.return_type) + vb = scale_timedelta(context, builder, vb, tb, sig.return_type) + builder.store(builder.sub(va, vb), ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +def _timedelta_times_number(context, builder, td_arg, td_type, + number_arg, number_type, return_type): + ret = alloc_timedelta_result(builder) + with cgutils.if_likely(builder, is_not_nat(builder, td_arg)): + if isinstance(number_type, types.Float): + val = builder.sitofp(td_arg, number_arg.type) + val = builder.fmul(val, number_arg) + val = _cast_to_timedelta(context, builder, val) + else: + val = builder.mul(td_arg, number_arg) + # The scaling is required for ufunc np.multiply() with an explicit + # output in a different unit. + val = scale_timedelta(context, builder, val, td_type, return_type) + builder.store(val, ret) + return builder.load(ret) + + +@lower_builtin(operator.mul, types.NPTimedelta, types.Integer) +@lower_builtin(operator.imul, types.NPTimedelta, types.Integer) +@lower_builtin(operator.mul, types.NPTimedelta, types.Float) +@lower_builtin(operator.imul, types.NPTimedelta, types.Float) +def timedelta_times_number(context, builder, sig, args): + res = _timedelta_times_number(context, builder, + args[0], sig.args[0], args[1], sig.args[1], + sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.mul, types.Integer, types.NPTimedelta) +@lower_builtin(operator.imul, types.Integer, types.NPTimedelta) +@lower_builtin(operator.mul, types.Float, types.NPTimedelta) +@lower_builtin(operator.imul, types.Float, types.NPTimedelta) +def number_times_timedelta(context, builder, sig, args): + res = _timedelta_times_number(context, builder, + args[1], sig.args[1], args[0], sig.args[0], + sig.return_type) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.truediv, types.NPTimedelta, types.Integer) +@lower_builtin(operator.itruediv, types.NPTimedelta, types.Integer) +@lower_builtin(operator.floordiv, types.NPTimedelta, types.Integer) +@lower_builtin(operator.ifloordiv, types.NPTimedelta, types.Integer) +@lower_builtin(operator.truediv, types.NPTimedelta, types.Float) +@lower_builtin(operator.itruediv, types.NPTimedelta, types.Float) +@lower_builtin(operator.floordiv, types.NPTimedelta, types.Float) +@lower_builtin(operator.ifloordiv, types.NPTimedelta, types.Float) +def timedelta_over_number(context, builder, sig, args): + td_arg, number_arg = args + number_type = sig.args[1] + ret = alloc_timedelta_result(builder) + ok = builder.and_(is_not_nat(builder, td_arg), + builder.not_(cgutils.is_scalar_zero_or_nan(builder, number_arg))) + with cgutils.if_likely(builder, ok): + # Denominator is non-zero, non-NaN + if isinstance(number_type, types.Float): + val = builder.sitofp(td_arg, number_arg.type) + val = builder.fdiv(val, number_arg) + val = _cast_to_timedelta(context, builder, val) + else: + val = builder.sdiv(td_arg, number_arg) + # The scaling is required for ufuncs np.*divide() with an explicit + # output in a different unit. + val = scale_timedelta(context, builder, val, + sig.args[0], sig.return_type) + builder.store(val, ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.truediv, *TIMEDELTA_BINOP_SIG) +@lower_builtin(operator.itruediv, *TIMEDELTA_BINOP_SIG) +def timedelta_over_timedelta(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + not_nan = are_not_nat(builder, [va, vb]) + ll_ret_type = context.get_value_type(sig.return_type) + ret = cgutils.alloca_once(builder, ll_ret_type, name='ret') + builder.store(Constant(ll_ret_type, float('nan')), ret) + with cgutils.if_likely(builder, not_nan): + va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) + va = builder.sitofp(va, ll_ret_type) + vb = builder.sitofp(vb, ll_ret_type) + builder.store(builder.fdiv(va, vb), ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.floordiv, *TIMEDELTA_BINOP_SIG) +def timedelta_floor_div_timedelta(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + ll_ret_type = context.get_value_type(sig.return_type) + not_nan = are_not_nat(builder, [va, vb]) + ret = cgutils.alloca_once(builder, ll_ret_type, name='ret') + zero = Constant(ll_ret_type, 0) + one = Constant(ll_ret_type, 1) + builder.store(zero, ret) + with cgutils.if_likely(builder, not_nan): + va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) + # is the denominator zero or NaT? + denom_ok = builder.not_(builder.icmp_signed('==', vb, zero)) + with cgutils.if_likely(builder, denom_ok): + # is either arg negative? + vaneg = builder.icmp_signed('<', va, zero) + neg = builder.or_(vaneg, builder.icmp_signed('<', vb, zero)) + with builder.if_else(neg) as (then, otherwise): + with then: # one or more value negative + with builder.if_else(vaneg) as (negthen, negotherwise): + with negthen: + top = builder.sub(va, one) + div = builder.sdiv(top, vb) + builder.store(div, ret) + with negotherwise: + top = builder.add(va, one) + div = builder.sdiv(top, vb) + builder.store(div, ret) + with otherwise: + div = builder.sdiv(va, vb) + builder.store(div, ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + +def timedelta_mod_timedelta(context, builder, sig, args): + # inspired by https://github.com/numpy/numpy/blob/fe8072a12d65e43bd2e0b0f9ad67ab0108cc54b3/numpy/core/src/umath/loops.c.src#L1424 + # alg is basically as `a % b`: + # if a or b is NaT return NaT + # elseif b is 0 return NaT + # else pretend a and b are int and do pythonic int modulus + + [va, vb] = args + [ta, tb] = sig.args + not_nan = are_not_nat(builder, [va, vb]) + ll_ret_type = context.get_value_type(sig.return_type) + ret = alloc_timedelta_result(builder) + builder.store(NAT, ret) + zero = Constant(ll_ret_type, 0) + with cgutils.if_likely(builder, not_nan): + va, vb = normalize_timedeltas(context, builder, va, vb, ta, tb) + # is the denominator zero or NaT? + denom_ok = builder.not_(builder.icmp_signed('==', vb, zero)) + with cgutils.if_likely(builder, denom_ok): + # is either arg negative? + vapos = builder.icmp_signed('>', va, zero) + vbpos = builder.icmp_signed('>', vb, zero) + rem = builder.srem(va, vb) + cond = builder.or_(builder.and_(vapos, vbpos), + builder.icmp_signed('==', rem, zero)) + with builder.if_else(cond) as (then, otherwise): + with then: + builder.store(rem, ret) + with otherwise: + builder.store(builder.add(rem, vb), ret) + + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# Comparison operators on timedelta64 + + +def _create_timedelta_comparison_impl(ll_op, default_value): + def impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + ret = alloc_boolean_result(builder) + with builder.if_else(are_not_nat(builder, [va, vb])) as (then, otherwise): + with then: + try: + norm_a, norm_b = normalize_timedeltas( + context, builder, va, vb, ta, tb) + except RuntimeError: + # Cannot normalize units => the values are unequal (except if NaT) + builder.store(default_value, ret) + else: + builder.store(builder.icmp_unsigned(ll_op, norm_a, norm_b), ret) + with otherwise: + # NaT ==/>=/>/ is True + if ll_op == '!=': + builder.store(cgutils.true_bit, ret) + else: + builder.store(cgutils.false_bit, ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + return impl + + +def _create_timedelta_ordering_impl(ll_op): + def impl(context, builder, sig, args): + [va, vb] = args + [ta, tb] = sig.args + ret = alloc_boolean_result(builder) + with builder.if_else(are_not_nat(builder, [va, vb])) as (then, otherwise): + with then: + norm_a, norm_b = normalize_timedeltas( + context, builder, va, vb, ta, tb) + builder.store(builder.icmp_signed(ll_op, norm_a, norm_b), ret) + with otherwise: + # NaT >=/>/') +timedelta_ge_timedelta_impl = _create_timedelta_ordering_impl('>=') + +for op_, func in [(operator.eq, timedelta_eq_timedelta_impl), + (operator.ne, timedelta_ne_timedelta_impl), + (operator.lt, timedelta_lt_timedelta_impl), + (operator.le, timedelta_le_timedelta_impl), + (operator.gt, timedelta_gt_timedelta_impl), + (operator.ge, timedelta_ge_timedelta_impl)]: + lower_builtin(op_, *TIMEDELTA_BINOP_SIG)(func) + + +# Arithmetic on datetime64 + +def is_leap_year(builder, year_val): + """ + Return a predicate indicating whether *year_val* (offset by 1970) is a + leap year. + """ + actual_year = builder.add(year_val, Constant(DATETIME64, 1970)) + multiple_of_4 = cgutils.is_null( + builder, builder.and_(actual_year, Constant(DATETIME64, 3))) + not_multiple_of_100 = cgutils.is_not_null( + builder, builder.srem(actual_year, Constant(DATETIME64, 100))) + multiple_of_400 = cgutils.is_null( + builder, builder.srem(actual_year, Constant(DATETIME64, 400))) + return builder.and_(multiple_of_4, + builder.or_(not_multiple_of_100, multiple_of_400)) + + +def year_to_days(builder, year_val): + """ + Given a year *year_val* (offset to 1970), return the number of days + since the 1970 epoch. + """ + # The algorithm below is copied from Numpy's get_datetimestruct_days() + # (src/multiarray/datetime.c) + ret = cgutils.alloca_once(builder, TIMEDELTA64) + # First approximation + days = scale_by_constant(builder, year_val, 365) + # Adjust for leap years + with builder.if_else(cgutils.is_neg_int(builder, year_val)) \ + as (if_neg, if_pos): + with if_pos: + # At or after 1970: + # 1968 is the closest leap year before 1970. + # Exclude the current year, so add 1. + from_1968 = add_constant(builder, year_val, 1) + # Add one day for each 4 years + p_days = builder.add(days, + unscale_by_constant(builder, from_1968, 4)) + # 1900 is the closest previous year divisible by 100 + from_1900 = add_constant(builder, from_1968, 68) + # Subtract one day for each 100 years + p_days = builder.sub(p_days, + unscale_by_constant(builder, from_1900, 100)) + # 1600 is the closest previous year divisible by 400 + from_1600 = add_constant(builder, from_1900, 300) + # Add one day for each 400 years + p_days = builder.add(p_days, + unscale_by_constant(builder, from_1600, 400)) + builder.store(p_days, ret) + with if_neg: + # Before 1970: + # NOTE `year_val` is negative, and so will be `from_1972` and `from_2000`. + # 1972 is the closest later year after 1970. + # Include the current year, so subtract 2. + from_1972 = add_constant(builder, year_val, -2) + # Subtract one day for each 4 years (`from_1972` is negative) + n_days = builder.add(days, + unscale_by_constant(builder, from_1972, 4)) + # 2000 is the closest later year divisible by 100 + from_2000 = add_constant(builder, from_1972, -28) + # Add one day for each 100 years + n_days = builder.sub(n_days, + unscale_by_constant(builder, from_2000, 100)) + # 2000 is also the closest later year divisible by 400 + # Subtract one day for each 400 years + n_days = builder.add(n_days, + unscale_by_constant(builder, from_2000, 400)) + builder.store(n_days, ret) + return builder.load(ret) + + +def reduce_datetime_for_unit(builder, dt_val, src_unit, dest_unit): + dest_unit_code = npdatetime_helpers.DATETIME_UNITS[dest_unit] + src_unit_code = npdatetime_helpers.DATETIME_UNITS[src_unit] + if dest_unit_code < 2 or src_unit_code >= 2: + return dt_val, src_unit + # Need to compute the day ordinal for *dt_val* + if src_unit_code == 0: + # Years to days + year_val = dt_val + days_val = year_to_days(builder, year_val) + + else: + # Months to days + leap_array = cgutils.global_constant(builder, "leap_year_months_acc", + leap_year_months_acc) + normal_array = cgutils.global_constant(builder, "normal_year_months_acc", + normal_year_months_acc) + + days = cgutils.alloca_once(builder, TIMEDELTA64) + + # First compute year number and month number + year, month = cgutils.divmod_by_constant(builder, dt_val, 12) + + # Then deduce the number of days + with builder.if_else(is_leap_year(builder, year)) as (then, otherwise): + with then: + addend = builder.load(cgutils.gep(builder, leap_array, + 0, month, inbounds=True)) + builder.store(addend, days) + with otherwise: + addend = builder.load(cgutils.gep(builder, normal_array, + 0, month, inbounds=True)) + builder.store(addend, days) + + days_val = year_to_days(builder, year) + days_val = builder.add(days_val, builder.load(days)) + + if dest_unit_code == 2: + # Need to scale back to weeks + weeks, _ = cgutils.divmod_by_constant(builder, days_val, 7) + return weeks, 'W' + else: + return days_val, 'D' + + +def convert_datetime_for_arith(builder, dt_val, src_unit, dest_unit): + """ + Convert datetime *dt_val* from *src_unit* to *dest_unit*. + """ + # First partial conversion to days or weeks, if necessary. + dt_val, dt_unit = reduce_datetime_for_unit( + builder, dt_val, src_unit, dest_unit) + # Then multiply by the remaining constant factor. + dt_factor = npdatetime_helpers.get_timedelta_conversion_factor(dt_unit, dest_unit) + if dt_factor is None: + # This can happen when using explicit output in a ufunc. + raise LoweringError("cannot convert datetime64 from %r to %r" + % (src_unit, dest_unit)) + return scale_by_constant(builder, dt_val, dt_factor) + + +def _datetime_timedelta_arith(ll_op_name): + def impl(context, builder, dt_arg, dt_unit, + td_arg, td_unit, ret_unit): + ret = alloc_timedelta_result(builder) + with cgutils.if_likely(builder, are_not_nat(builder, [dt_arg, td_arg])): + dt_arg = convert_datetime_for_arith(builder, dt_arg, + dt_unit, ret_unit) + td_factor = npdatetime_helpers.get_timedelta_conversion_factor( + td_unit, ret_unit) + td_arg = scale_by_constant(builder, td_arg, td_factor) + ret_val = getattr(builder, ll_op_name)(dt_arg, td_arg) + builder.store(ret_val, ret) + return builder.load(ret) + return impl + + +_datetime_plus_timedelta = _datetime_timedelta_arith('add') +_datetime_minus_timedelta = _datetime_timedelta_arith('sub') + +# datetime64 + timedelta64 + + +@lower_builtin(operator.add, types.NPDatetime, types.NPTimedelta) +@lower_builtin(operator.iadd, types.NPDatetime, types.NPTimedelta) +def datetime_plus_timedelta(context, builder, sig, args): + dt_arg, td_arg = args + dt_type, td_type = sig.args + res = _datetime_plus_timedelta(context, builder, + dt_arg, dt_type.unit, + td_arg, td_type.unit, + sig.return_type.unit) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@lower_builtin(operator.add, types.NPTimedelta, types.NPDatetime) +@lower_builtin(operator.iadd, types.NPTimedelta, types.NPDatetime) +def timedelta_plus_datetime(context, builder, sig, args): + td_arg, dt_arg = args + td_type, dt_type = sig.args + res = _datetime_plus_timedelta(context, builder, + dt_arg, dt_type.unit, + td_arg, td_type.unit, + sig.return_type.unit) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# datetime64 - timedelta64 + + +@lower_builtin(operator.sub, types.NPDatetime, types.NPTimedelta) +@lower_builtin(operator.isub, types.NPDatetime, types.NPTimedelta) +def datetime_minus_timedelta(context, builder, sig, args): + dt_arg, td_arg = args + dt_type, td_type = sig.args + res = _datetime_minus_timedelta(context, builder, + dt_arg, dt_type.unit, + td_arg, td_type.unit, + sig.return_type.unit) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# datetime64 - datetime64 + + +@lower_builtin(operator.sub, types.NPDatetime, types.NPDatetime) +def datetime_minus_datetime(context, builder, sig, args): + va, vb = args + ta, tb = sig.args + unit_a = ta.unit + unit_b = tb.unit + ret_unit = sig.return_type.unit + ret = alloc_timedelta_result(builder) + with cgutils.if_likely(builder, are_not_nat(builder, [va, vb])): + va = convert_datetime_for_arith(builder, va, unit_a, ret_unit) + vb = convert_datetime_for_arith(builder, vb, unit_b, ret_unit) + ret_val = builder.sub(va, vb) + builder.store(ret_val, ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + +# datetime64 comparisons + + +def _create_datetime_comparison_impl(ll_op): + def impl(context, builder, sig, args): + va, vb = args + ta, tb = sig.args + unit_a = ta.unit + unit_b = tb.unit + ret_unit = npdatetime_helpers.get_best_unit(unit_a, unit_b) + ret = alloc_boolean_result(builder) + with builder.if_else(are_not_nat(builder, [va, vb])) as (then, otherwise): + with then: + norm_a = convert_datetime_for_arith( + builder, va, unit_a, ret_unit) + norm_b = convert_datetime_for_arith( + builder, vb, unit_b, ret_unit) + ret_val = builder.icmp_signed(ll_op, norm_a, norm_b) + builder.store(ret_val, ret) + with otherwise: + if ll_op == '!=': + ret_val = cgutils.true_bit + else: + ret_val = cgutils.false_bit + builder.store(ret_val, ret) + res = builder.load(ret) + return impl_ret_untracked(context, builder, sig.return_type, res) + + return impl + + +datetime_eq_datetime_impl = _create_datetime_comparison_impl('==') +datetime_ne_datetime_impl = _create_datetime_comparison_impl('!=') +datetime_lt_datetime_impl = _create_datetime_comparison_impl('<') +datetime_le_datetime_impl = _create_datetime_comparison_impl('<=') +datetime_gt_datetime_impl = _create_datetime_comparison_impl('>') +datetime_ge_datetime_impl = _create_datetime_comparison_impl('>=') + +for op, func in [(operator.eq, datetime_eq_datetime_impl), + (operator.ne, datetime_ne_datetime_impl), + (operator.lt, datetime_lt_datetime_impl), + (operator.le, datetime_le_datetime_impl), + (operator.gt, datetime_gt_datetime_impl), + (operator.ge, datetime_ge_datetime_impl)]: + lower_builtin(op, *[types.NPDatetime]*2)(func) + + +######################################################################## +# datetime/timedelta fmax/fmin maximum/minimum support + +def _gen_datetime_max_impl(NAT_DOMINATES): + def datetime_max_impl(context, builder, sig, args): + # note this could be optimizing relying on the actual value of NAT + # but as NumPy doesn't rely on this, this seems more resilient + in1, in2 = args + in1_not_nat = is_not_nat(builder, in1) + in2_not_nat = is_not_nat(builder, in2) + in1_ge_in2 = builder.icmp_signed('>=', in1, in2) + res = builder.select(in1_ge_in2, in1, in2) + if NAT_DOMINATES: + # NaT now dominates, like NaN + in1, in2 = in2, in1 + res = builder.select(in1_not_nat, res, in2) + res = builder.select(in2_not_nat, res, in1) + + return impl_ret_untracked(context, builder, sig.return_type, res) + return datetime_max_impl + +datetime_maximum_impl = _gen_datetime_max_impl(True) +datetime_fmax_impl = _gen_datetime_max_impl(False) + +def _gen_datetime_min_impl(NAT_DOMINATES): + def datetime_min_impl(context, builder, sig, args): + # note this could be optimizing relying on the actual value of NAT + # but as NumPy doesn't rely on this, this seems more resilient + in1, in2 = args + in1_not_nat = is_not_nat(builder, in1) + in2_not_nat = is_not_nat(builder, in2) + in1_le_in2 = builder.icmp_signed('<=', in1, in2) + res = builder.select(in1_le_in2, in1, in2) + if NAT_DOMINATES: + # NaT now dominates, like NaN + in1, in2 = in2, in1 + res = builder.select(in1_not_nat, res, in2) + res = builder.select(in2_not_nat, res, in1) + + return impl_ret_untracked(context, builder, sig.return_type, res) + return datetime_min_impl + +datetime_minimum_impl = _gen_datetime_min_impl(True) +datetime_fmin_impl = _gen_datetime_min_impl(False) + +def _gen_timedelta_max_impl(NAT_DOMINATES): + def timedelta_max_impl(context, builder, sig, args): + # note this could be optimizing relying on the actual value of NAT + # but as NumPy doesn't rely on this, this seems more resilient + in1, in2 = args + in1_not_nat = is_not_nat(builder, in1) + in2_not_nat = is_not_nat(builder, in2) + in1_ge_in2 = builder.icmp_signed('>=', in1, in2) + res = builder.select(in1_ge_in2, in1, in2) + if NAT_DOMINATES: + # NaT now dominates, like NaN + in1, in2 = in2, in1 + res = builder.select(in1_not_nat, res, in2) + res = builder.select(in2_not_nat, res, in1) + + return impl_ret_untracked(context, builder, sig.return_type, res) + return timedelta_max_impl + +timedelta_maximum_impl = _gen_timedelta_max_impl(True) +timedelta_fmax_impl = _gen_timedelta_max_impl(False) + +def _gen_timedelta_min_impl(NAT_DOMINATES): + def timedelta_min_impl(context, builder, sig, args): + # note this could be optimizing relying on the actual value of NAT + # but as NumPy doesn't rely on this, this seems more resilient + in1, in2 = args + in1_not_nat = is_not_nat(builder, in1) + in2_not_nat = is_not_nat(builder, in2) + in1_le_in2 = builder.icmp_signed('<=', in1, in2) + res = builder.select(in1_le_in2, in1, in2) + if NAT_DOMINATES: + # NaT now dominates, like NaN + in1, in2 = in2, in1 + res = builder.select(in1_not_nat, res, in2) + res = builder.select(in2_not_nat, res, in1) + + return impl_ret_untracked(context, builder, sig.return_type, res) + return timedelta_min_impl + +timedelta_minimum_impl = _gen_timedelta_min_impl(True) +timedelta_fmin_impl = _gen_timedelta_min_impl(False) + +def _cast_to_timedelta(context, builder, val): + temp = builder.alloca(TIMEDELTA64) + val_is_nan = builder.fcmp_unordered('uno', val, val) + with builder.if_else(val_is_nan) as ( + then, els): + with then: + # NaN does not guarantee to cast to NAT. + # We should store NAT explicitly. + builder.store(NAT, temp) + with els: + builder.store(builder.fptosi(val, TIMEDELTA64), temp) + return builder.load(temp) + + +@lower_builtin(np.isnat, types.NPDatetime) +@lower_builtin(np.isnat, types.NPTimedelta) +def _np_isnat_impl(context, builder, sig, args): + return npyfuncs.np_datetime_isnat_impl(context, builder, sig, args) + + +@lower_cast(types.NPDatetime, types.Integer) +@lower_cast(types.NPTimedelta, types.Integer) +def _cast_npdatetime_int64(context, builder, fromty, toty, val): + if toty.bitwidth != 64: # all date time types are 64 bit + msg = f"Cannot cast {fromty} to {toty} as {toty} is not 64 bits wide." + raise ValueError(msg) + return val + + +@overload_method(types.NPTimedelta, '__hash__') +@overload_method(types.NPDatetime, '__hash__') +def ol_hash_npdatetime(x): + if numpy_support.numpy_version >= (2, 2)\ + and isinstance(x, types.NPTimedelta) and not x.unit: + raise ValueError("Can't hash generic timedelta64") + + if IS_32BITS: + def impl(x): + x = np.int64(x) + if x < 2**31 - 1: # x < LONG_MAX + y = np.int32(x) + else: + hi = (np.int64(x) & 0xffffffff00000000) >> 32 + lo = (np.int64(x) & 0x00000000ffffffff) + y = np.int32(lo + (1000003) * hi) + if y == -1: + y = np.int32(-2) + return y + else: + def impl(x): + if np.int64(x) == -1: + return np.int64(-2) + return np.int64(x) + return impl + + +lower_builtin(npdatetime_helpers.datetime_minimum, types.NPDatetime, types.NPDatetime)(datetime_minimum_impl) +lower_builtin(npdatetime_helpers.datetime_minimum, types.NPTimedelta, types.NPTimedelta)(datetime_minimum_impl) +lower_builtin(npdatetime_helpers.datetime_maximum, types.NPDatetime, types.NPDatetime)(datetime_maximum_impl) +lower_builtin(npdatetime_helpers.datetime_maximum, types.NPTimedelta, types.NPTimedelta)(datetime_maximum_impl) diff --git a/venv/lib/python3.10/site-packages/numba/np/npdatetime_helpers.py b/venv/lib/python3.10/site-packages/numba/np/npdatetime_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..4abb671b91ddea0c32fc1571dc2f9dc88a5335b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/npdatetime_helpers.py @@ -0,0 +1,212 @@ +""" +Helper functions for np.timedelta64 and np.datetime64. +For now, multiples-of-units (for example timedeltas expressed in tens +of seconds) are not supported. +""" + + +import numpy as np + + +DATETIME_UNITS = { + 'Y': 0, # Years + 'M': 1, # Months + 'W': 2, # Weeks + # Yes, there's a gap here + 'D': 4, # Days + 'h': 5, # Hours + 'm': 6, # Minutes + 's': 7, # Seconds + 'ms': 8, # Milliseconds + 'us': 9, # Microseconds + 'ns': 10, # Nanoseconds + 'ps': 11, # Picoseconds + 'fs': 12, # Femtoseconds + 'as': 13, # Attoseconds + '': 14, # "generic", i.e. unit-less +} + +NAT = np.timedelta64('nat').astype(np.int64) + +# NOTE: numpy has several inconsistent functions for timedelta casting: +# - can_cast_timedelta64_{metadata,units}() disallows "safe" casting +# to and from generic units +# - cast_timedelta_to_timedelta() allows casting from (but not to) +# generic units +# - compute_datetime_metadata_greatest_common_divisor() allows casting from +# generic units (used for promotion) + + +def same_kind(src, dest): + """ + Whether the *src* and *dest* units are of the same kind. + """ + return (DATETIME_UNITS[src] < 5) == (DATETIME_UNITS[dest] < 5) + + +def can_cast_timedelta_units(src, dest): + # Mimic NumPy's "safe" casting and promotion + # `dest` must be more precise than `src` and they must be compatible + # for conversion. + # XXX should we switch to enforcing "same-kind" for Numpy 1.10+ ? + src = DATETIME_UNITS[src] + dest = DATETIME_UNITS[dest] + if src == dest: + return True + if src == 14: + return True + if src > dest: + return False + if dest == 14: + # unit-less timedelta64 is not compatible with anything else + return False + if src <= 1 and dest > 1: + # Cannot convert between months or years and other units + return False + return True + + +# Exact conversion factors from one unit to the immediately more precise one +_factors = { + 0: (1, 12), # Years -> Months + 2: (4, 7), # Weeks -> Days + 4: (5, 24), # Days -> Hours + 5: (6, 60), # Hours -> Minutes + 6: (7, 60), # Minutes -> Seconds + 7: (8, 1000), + 8: (9, 1000), + 9: (10, 1000), + 10: (11, 1000), + 11: (12, 1000), + 12: (13, 1000), +} + + +def _get_conversion_multiplier(big_unit_code, small_unit_code): + """ + Return an integer multiplier allowing to convert from *big_unit_code* + to *small_unit_code*. + None is returned if the conversion is not possible through a + simple integer multiplication. + """ + # Mimics get_datetime_units_factor() in NumPy's datetime.c, + # with a twist to allow no-op conversion from generic units. + if big_unit_code == 14: + return 1 + c = big_unit_code + factor = 1 + while c < small_unit_code: + try: + c, mult = _factors[c] + except KeyError: + # No possible conversion + return None + factor *= mult + if c == small_unit_code: + return factor + else: + return None + + +def get_timedelta_conversion_factor(src_unit, dest_unit): + """ + Return an integer multiplier allowing to convert from timedeltas + of *src_unit* to *dest_unit*. + """ + return _get_conversion_multiplier(DATETIME_UNITS[src_unit], + DATETIME_UNITS[dest_unit]) + + +def get_datetime_timedelta_conversion(datetime_unit, timedelta_unit): + """ + Compute a possible conversion for combining *datetime_unit* and + *timedelta_unit* (presumably for adding or subtracting). + Return (result unit, integer datetime multiplier, integer timedelta + multiplier). RuntimeError is raised if the combination is impossible. + """ + # XXX now unused (I don't know where / how Numpy uses this) + dt_unit_code = DATETIME_UNITS[datetime_unit] + td_unit_code = DATETIME_UNITS[timedelta_unit] + if td_unit_code == 14 or dt_unit_code == 14: + return datetime_unit, 1, 1 + if td_unit_code < 2 and dt_unit_code >= 2: + # Cannot combine Y or M timedelta64 with a finer-grained datetime64 + raise RuntimeError("cannot combine datetime64(%r) and timedelta64(%r)" + % (datetime_unit, timedelta_unit)) + dt_factor, td_factor = 1, 1 + + # If years or months, the datetime unit is first scaled to weeks or days, + # then conversion continues below. This is the same algorithm as used + # in Numpy's get_datetime_conversion_factor() (src/multiarray/datetime.c): + # """Conversions between years/months and other units use + # the factor averaged over the 400 year leap year cycle.""" + if dt_unit_code == 0: + if td_unit_code >= 4: + dt_factor = 97 + 400 * 365 + td_factor = 400 + dt_unit_code = 4 + elif td_unit_code == 2: + dt_factor = 97 + 400 * 365 + td_factor = 400 * 7 + dt_unit_code = 2 + elif dt_unit_code == 1: + if td_unit_code >= 4: + dt_factor = 97 + 400 * 365 + td_factor = 400 * 12 + dt_unit_code = 4 + elif td_unit_code == 2: + dt_factor = 97 + 400 * 365 + td_factor = 400 * 12 * 7 + dt_unit_code = 2 + + if td_unit_code >= dt_unit_code: + factor = _get_conversion_multiplier(dt_unit_code, td_unit_code) + assert factor is not None, (dt_unit_code, td_unit_code) + return timedelta_unit, dt_factor * factor, td_factor + else: + factor = _get_conversion_multiplier(td_unit_code, dt_unit_code) + assert factor is not None, (dt_unit_code, td_unit_code) + return datetime_unit, dt_factor, td_factor * factor + + +def combine_datetime_timedelta_units(datetime_unit, timedelta_unit): + """ + Return the unit result of combining *datetime_unit* with *timedelta_unit* + (e.g. by adding or subtracting). None is returned if combining + those units is forbidden. + """ + dt_unit_code = DATETIME_UNITS[datetime_unit] + td_unit_code = DATETIME_UNITS[timedelta_unit] + if dt_unit_code == 14: + return timedelta_unit + elif td_unit_code == 14: + return datetime_unit + if td_unit_code < 2 and dt_unit_code >= 2: + return None + if dt_unit_code > td_unit_code: + return datetime_unit + else: + return timedelta_unit + + +def get_best_unit(unit_a, unit_b): + """ + Get the best (i.e. finer-grained) of two units. + """ + a = DATETIME_UNITS[unit_a] + b = DATETIME_UNITS[unit_b] + if a == 14: + return unit_b + if b == 14: + return unit_a + if b > a: + return unit_b + return unit_a + + +def datetime_minimum(a, b): + pass + + +def datetime_maximum(a, b): + pass diff --git a/venv/lib/python3.10/site-packages/numba/np/npyfuncs.py b/venv/lib/python3.10/site-packages/numba/np/npyfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..de50317a1a32346c0eddbde76e5cccf26cffc1b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/npyfuncs.py @@ -0,0 +1,1707 @@ +"""Codegen for functions used as kernels in NumPy functions + +Typically, the kernels of several ufuncs that can't map directly to +Python builtins +""" + + +import math + +import llvmlite.ir +import numpy as np + +from numba.core.extending import overload +from numba.core.imputils import impl_ret_untracked +from numba.core import typing, types, errors, lowering, cgutils, config +from numba.core.extending import register_jitable +from numba.np import npdatetime +from numba.np.math import cmathimpl, mathimpl, numbers +from numba.np.numpy_support import numpy_version + +# some NumPy constants. Note that we could generate some of them using +# the math library, but having the values copied from npy_math seems to +# yield more accurate results +_NPY_LOG2E = 1.442695040888963407359924681001892137 # math.log(math.e, 2) +_NPY_LOG10E = 0.434294481903251827651128918916605082 # math.log(math.e, 10) +_NPY_LOGE2 = 0.693147180559945309417232121458176568 # math.log(2) + + +def _check_arity_and_homogeneity(sig, args, arity, return_type = None): + """checks that the following are true: + - args and sig.args have arg_count elements + - all input types are homogeneous + - return type is 'return_type' if provided, otherwise it must be + homogeneous with the input types. + """ + assert len(args) == arity + assert len(sig.args) == arity + ty = sig.args[0] + if return_type is None: + return_type = ty + # must have homogeneous args + if not( all(arg==ty for arg in sig.args) and sig.return_type == return_type): + import inspect + fname = inspect.currentframe().f_back.f_code.co_name + msg = '{0} called with invalid types: {1}'.format(fname, sig) + assert False, msg + +if config.USE_LEGACY_TYPE_SYSTEM: + cast_arg_ty = types.float64 +else: + cast_arg_ty = types.np_float64 + +def _call_func_by_name_with_cast(context, builder, sig, args, + func_name, ty=cast_arg_ty): + # it is quite common in NumPy to have loops implemented as a call + # to the double version of the function, wrapped in casts. This + # helper function facilitates that. + mod = builder.module + lty = context.get_argument_type(ty) + fnty = llvmlite.ir.FunctionType(lty, [lty]*len(sig.args)) + fn = cgutils.insert_pure_function(mod, fnty, name=func_name) + cast_args = [context.cast(builder, arg, argty, ty) + for arg, argty in zip(args, sig.args) ] + + result = builder.call(fn, cast_args) + return context.cast(builder, result, types.float64, sig.return_type) + + +def _dispatch_func_by_name_type(context, builder, sig, args, table, user_name): + # for most cases the functions are homogeneous on all their types. + # this code dispatches on the first argument type as it is the most useful + # for our uses (all cases but ldexp are homogeneous in all types, and + # dispatching on the first argument type works of ldexp as well) + # + # assumes that the function pointed by func_name has the type + # signature sig (but needs translation to llvm types). + + ty = sig.args[0] + try: + func_name = table[ty] + except KeyError as e: + msg = "No {0} function for real type {1}".format(user_name, str(e)) + raise errors.LoweringError(msg) + + mod = builder.module + if ty in types.complex_domain: + # In numba struct types are always passed by pointer. So the call has to + # be transformed from "result = func(ops...)" to "func(&result, ops...). + # note that the result value pointer as first argument is the convention + # used by numba. + + # First, prepare the return value + out = context.make_complex(builder, ty) + ptrargs = [cgutils.alloca_once_value(builder, arg) + for arg in args] + call_args = [out._getpointer()] + ptrargs + # get_value_as_argument for struct types like complex allocate stack space + # and initialize with the value, the return value is the pointer to that + # allocated space (ie: pointer to a copy of the value in the stack). + # get_argument_type returns a pointer to the struct type in consonance. + call_argtys = [ty] + list(sig.args) + call_argltys = [context.get_value_type(ty).as_pointer() + for ty in call_argtys] + fnty = llvmlite.ir.FunctionType(llvmlite.ir.VoidType(), call_argltys) + # Note: the function isn't pure here (it writes to its pointer args) + fn = cgutils.get_or_insert_function(mod, fnty, func_name) + builder.call(fn, call_args) + retval = builder.load(call_args[0]) + else: + argtypes = [context.get_argument_type(aty) for aty in sig.args] + restype = context.get_argument_type(sig.return_type) + fnty = llvmlite.ir.FunctionType(restype, argtypes) + fn = cgutils.insert_pure_function(mod, fnty, name=func_name) + retval = context.call_external_function(builder, fn, sig.args, args) + return retval + + + +######################################################################## +# Division kernels inspired by NumPy loops.c.src code +# +# The builtins are not applicable as they rely on a test for zero in the +# denominator. If it is zero the appropriate exception is raised. +# In NumPy, a division by zero does not raise an exception, but instead +# generated a known value. Note that a division by zero in any of the +# operations of a vector may raise an exception or issue a warning +# depending on the np.seterr configuration. This is not supported +# right now (and in any case, it won't be handled by these functions +# either) + +def np_int_sdiv_impl(context, builder, sig, args): + # based on the actual code in NumPy loops.c.src for signed integer types + _check_arity_and_homogeneity(sig, args, 2) + + num, den = args + ty = sig.args[0] # any arg type will do, homogeneous + + ZERO = context.get_constant(ty, 0) + MINUS_ONE = context.get_constant(ty, -1) + MIN_INT = context.get_constant(ty, 1 << (den.type.width-1)) + den_is_zero = builder.icmp_unsigned('==', ZERO, den) + den_is_minus_one = builder.icmp_unsigned('==', MINUS_ONE, den) + num_is_min_int = builder.icmp_unsigned('==', MIN_INT, num) + could_cause_sigfpe = builder.and_(den_is_minus_one, num_is_min_int) + force_zero = builder.or_(den_is_zero, could_cause_sigfpe) + with builder.if_else(force_zero, likely=False) as (then, otherwise): + with then: + bb_then = builder.basic_block + with otherwise: + bb_otherwise = builder.basic_block + div = builder.sdiv(num, den) + mod = builder.srem(num, den) + num_gt_zero = builder.icmp_signed('>', num, ZERO) + den_gt_zero = builder.icmp_signed('>', den, ZERO) + not_same_sign = builder.xor(num_gt_zero, den_gt_zero) + mod_not_zero = builder.icmp_unsigned('!=', mod, ZERO) + needs_fixing = builder.and_(not_same_sign, mod_not_zero) + fix_value = builder.select(needs_fixing, MINUS_ONE, ZERO) + result_otherwise = builder.add(div, fix_value) + + result = builder.phi(ZERO.type) + result.add_incoming(ZERO, bb_then) + result.add_incoming(result_otherwise, bb_otherwise) + + return result + + +def np_int_srem_impl(context, builder, sig, args): + # based on the actual code in NumPy loops.c.src for signed integers + _check_arity_and_homogeneity(sig, args, 2) + + num, den = args + ty = sig.args[0] # any arg type will do, homogeneous + + ZERO = context.get_constant(ty, 0) + den_not_zero = builder.icmp_unsigned('!=', ZERO, den) + bb_no_if = builder.basic_block + with cgutils.if_unlikely(builder, den_not_zero): + bb_if = builder.basic_block + mod = builder.srem(num,den) + num_gt_zero = builder.icmp_signed('>', num, ZERO) + den_gt_zero = builder.icmp_signed('>', den, ZERO) + not_same_sign = builder.xor(num_gt_zero, den_gt_zero) + mod_not_zero = builder.icmp_unsigned('!=', mod, ZERO) + needs_fixing = builder.and_(not_same_sign, mod_not_zero) + fix_value = builder.select(needs_fixing, den, ZERO) + final_mod = builder.add(fix_value, mod) + + result = builder.phi(ZERO.type) + result.add_incoming(ZERO, bb_no_if) + result.add_incoming(final_mod, bb_if) + + return result + + +def np_int_sdivrem_impl(context, builder, sig, args): + div = np_int_sdiv_impl(context, builder, sig.return_type[0](*sig.args), args) + rem = np_int_srem_impl(context, builder, sig.return_type[1](*sig.args), args) + return context.make_tuple(builder, sig.return_type, [div, rem]) + + +def np_int_udiv_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + num, den = args + ty = sig.args[0] # any arg type will do, homogeneous + + ZERO = context.get_constant(ty, 0) + div_by_zero = builder.icmp_unsigned('==', ZERO, den) + with builder.if_else(div_by_zero, likely=False) as (then, otherwise): + with then: + # division by zero + bb_then = builder.basic_block + with otherwise: + # divide! + div = builder.udiv(num, den) + bb_otherwise = builder.basic_block + + result = builder.phi(ZERO.type) + result.add_incoming(ZERO, bb_then) + result.add_incoming(div, bb_otherwise) + return result + + +def np_int_urem_impl(context, builder, sig, args): + # based on the actual code in NumPy loops.c.src for signed integers + _check_arity_and_homogeneity(sig, args, 2) + + num, den = args + ty = sig.args[0] # any arg type will do, homogeneous + + ZERO = context.get_constant(ty, 0) + den_not_zero = builder.icmp_unsigned('!=', ZERO, den) + bb_no_if = builder.basic_block + with cgutils.if_unlikely(builder, den_not_zero): + bb_if = builder.basic_block + mod = builder.urem(num,den) + + result = builder.phi(ZERO.type) + result.add_incoming(ZERO, bb_no_if) + result.add_incoming(mod, bb_if) + + return result + + +def np_int_udivrem_impl(context, builder, sig, args): + div = np_int_udiv_impl(context, builder, sig.return_type[0](*sig.args), args) + rem = np_int_urem_impl(context, builder, sig.return_type[1](*sig.args), args) + return context.make_tuple(builder, sig.return_type, [div, rem]) + + +# implementation of int_fmod is in fact the same as the unsigned remainder, +# that is: srem with a special case returning 0 when the denominator is 0. +np_int_fmod_impl = np_int_urem_impl + + +def np_real_div_impl(context, builder, sig, args): + # in NumPy real div has the same semantics as an fdiv for generating + # NANs, INF and NINF + _check_arity_and_homogeneity(sig, args, 2) + return builder.fdiv(*args) + + +def np_real_mod_impl(context, builder, sig, args): + # note: this maps to NumPy remainder, which has the same semantics as Python + # based on code in loops.c.src + _check_arity_and_homogeneity(sig, args, 2) + in1, in2 = args + ty = sig.args[0] + + ZERO = context.get_constant(ty, 0.0) + res = builder.frem(in1, in2) + res_ne_zero = builder.fcmp_ordered('!=', res, ZERO) + den_lt_zero = builder.fcmp_ordered('<', in2, ZERO) + res_lt_zero = builder.fcmp_ordered('<', res, ZERO) + needs_fixing = builder.and_(res_ne_zero, + builder.xor(den_lt_zero, res_lt_zero)) + fix_value = builder.select(needs_fixing, in2, ZERO) + + return builder.fadd(res, fix_value) + + +def np_real_fmod_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + return builder.frem(*args) + + +def _fabs(context, builder, arg): + ZERO = llvmlite.ir.Constant(arg.type, 0.0) + arg_negated = builder.fsub(ZERO, arg) + arg_is_negative = builder.fcmp_ordered('<', arg, ZERO) + return builder.select(arg_is_negative, arg_negated, arg) + + +def np_complex_div_impl(context, builder, sig, args): + # Extracted from numpy/core/src/umath/loops.c.src, + # inspired by complex_div_impl + # variables named coherent with loops.c.src + # This is implemented using the approach described in + # R.L. Smith. Algorithm 116: Complex division. + # Communications of the ACM, 5(8):435, 1962 + + in1, in2 = [context.make_complex(builder, sig.args[0], value=arg) + for arg in args] + + in1r = in1.real # numerator.real + in1i = in1.imag # numerator.imag + in2r = in2.real # denominator.real + in2i = in2.imag # denominator.imag + ftype = in1r.type + assert all([i.type==ftype for i in [in1r, in1i, in2r, in2i]]), "mismatched types" + out = context.make_helper(builder, sig.return_type) + + ZERO = llvmlite.ir.Constant(ftype, 0.0) + ONE = llvmlite.ir.Constant(ftype, 1.0) + + # if abs(denominator.real) >= abs(denominator.imag) + in2r_abs = _fabs(context, builder, in2r) + in2i_abs = _fabs(context, builder, in2i) + in2r_abs_ge_in2i_abs = builder.fcmp_ordered('>=', in2r_abs, in2i_abs) + with builder.if_else(in2r_abs_ge_in2i_abs) as (then, otherwise): + with then: + # if abs(denominator.real) == 0 and abs(denominator.imag) == 0 + in2r_is_zero = builder.fcmp_ordered('==', in2r_abs, ZERO) + in2i_is_zero = builder.fcmp_ordered('==', in2i_abs, ZERO) + in2_is_zero = builder.and_(in2r_is_zero, in2i_is_zero) + with builder.if_else(in2_is_zero) as (inn_then, inn_otherwise): + with inn_then: + # division by 0. + # fdiv generates the appropriate NAN/INF/NINF + out.real = builder.fdiv(in1r, in2r_abs) + out.imag = builder.fdiv(in1i, in2i_abs) + with inn_otherwise: + # general case for: + # abs(denominator.real) > abs(denominator.imag) + rat = builder.fdiv(in2i, in2r) + # scl = 1.0/(in2r + in2i*rat) + tmp1 = builder.fmul(in2i, rat) + tmp2 = builder.fadd(in2r, tmp1) + scl = builder.fdiv(ONE, tmp2) + # out.real = (in1r + in1i*rat)*scl + # out.imag = (in1i - in1r*rat)*scl + tmp3 = builder.fmul(in1i, rat) + tmp4 = builder.fmul(in1r, rat) + tmp5 = builder.fadd(in1r, tmp3) + tmp6 = builder.fsub(in1i, tmp4) + out.real = builder.fmul(tmp5, scl) + out.imag = builder.fmul(tmp6, scl) + with otherwise: + # general case for: + # abs(denominator.imag) > abs(denominator.real) + rat = builder.fdiv(in2r, in2i) + # scl = 1.0/(in2i + in2r*rat) + tmp1 = builder.fmul(in2r, rat) + tmp2 = builder.fadd(in2i, tmp1) + scl = builder.fdiv(ONE, tmp2) + # out.real = (in1r*rat + in1i)*scl + # out.imag = (in1i*rat - in1r)*scl + tmp3 = builder.fmul(in1r, rat) + tmp4 = builder.fmul(in1i, rat) + tmp5 = builder.fadd(tmp3, in1i) + tmp6 = builder.fsub(tmp4, in1r) + out.real = builder.fmul(tmp5, scl) + out.imag = builder.fmul(tmp6, scl) + + return out._getvalue() + + +######################################################################## +# NumPy logaddexp + +def _npy_logaddexp(x1, x2): + pass + +def _generate_logaddexp(fnoverload, const, log1pfn, expfn): + # Code generation for logaddexp and logaddexp2 is based on: + # https://github.com/numpy/numpy/blob/12c2b7dd62fc0c14b81c8892ed5f4f59cc94d09c/numpy/core/src/npymath/npy_math_internal.h.src#L467-L507 + + @overload(fnoverload, target='generic') + def ol_npy_logaddexp(x1, x2): + if x1 != x2: + return + shift = x1(const) + def impl(x1, x2): + x, y = x1, x2 + if (x == y): + # Handles infinities of the same sign without warnings + return x + shift + else: + tmp = x - y + if (tmp > 0): + return x + log1pfn(expfn(-tmp)) + elif (tmp <= 0): + return y + log1pfn(expfn(tmp)) + else: + # NaN + return tmp + return impl + +def _npy_logaddexp(x1, x2): + pass + + +_generate_logaddexp(_npy_logaddexp, _NPY_LOGE2, np.log1p, np.exp) + + +def np_real_logaddexp_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + fnty = context.typing_context.resolve_value_type(_npy_logaddexp) + sig = fnty.get_call_type(context.typing_context, (*sig.args,), {}) + impl = context.get_function(fnty, sig) + return impl(builder, args) + +######################################################################## +# NumPy logaddexp2 +def _npy_logaddexp2(x1, x2): + pass + +def npy_log2_1p(x): + pass + +# The following npy_log2_1p function is a translation of: +# https://github.com/numpy/numpy/blob/12c2b7dd62fc0c14b81c8892ed5f4f59cc94d09c/numpy/core/src/npymath/npy_math_internal.h.src#L457-L460 + +@overload(npy_log2_1p, target='generic') +def ol_npy_log2_1p(x): + LOG2E = x(_NPY_LOG2E) + def impl(x): + return LOG2E * np.log1p(x) + return impl + + +_generate_logaddexp(_npy_logaddexp2, 1.0, npy_log2_1p, np.exp2) + + +def np_real_logaddexp2_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + fnty = context.typing_context.resolve_value_type(_npy_logaddexp2) + sig = fnty.get_call_type(context.typing_context, (*sig.args,), {}) + impl = context.get_function(fnty, sig) + return impl(builder, args) + + +######################################################################## +# true div kernels + +def np_int_truediv_impl(context, builder, sig, args): + # in NumPy we don't check for 0 denominator... fdiv handles div by + # 0 in the way NumPy expects.. + # integer truediv always yields double + num, den = args + lltype = num.type + assert all(i.type==lltype for i in args), "must have homogeneous types" + numty, denty = sig.args + + num = context.cast(builder, num, numty, types.float64) + den = context.cast(builder, den, denty, types.float64) + + return builder.fdiv(num,den) + + +######################################################################## +# floor div kernels + +def np_real_floor_div_impl(context, builder, sig, args): + res = np_real_div_impl(context, builder, sig, args) + s = typing.signature(sig.return_type, sig.return_type) + return np_real_floor_impl(context, builder, s, (res,)) + + +def np_real_divmod_impl(context, builder, sig, args): + div = np_real_floor_div_impl(context, builder, sig.return_type[0](*sig.args), args) + rem = np_real_mod_impl(context, builder, sig.return_type[1](*sig.args), args) + return context.make_tuple(builder, sig.return_type, [div, rem]) + + +def np_complex_floor_div_impl(context, builder, sig, args): + # this is based on the complex floor divide in Numpy's loops.c.src + # This is basically a full complex division with a complex floor + # applied. + # The complex floor seems to be defined as the real floor applied + # with the real part and zero in the imaginary part. Fully developed + # so it avoids computing anything related to the imaginary result. + float_kind = sig.args[0].underlying_float + floor_sig = typing.signature(float_kind, float_kind) + + in1, in2 = [context.make_complex(builder, sig.args[0], value=arg) + for arg in args] + + in1r = in1.real + in1i = in1.imag + in2r = in2.real + in2i = in2.imag + ftype = in1r.type + assert all([i.type==ftype for i in [in1r, in1i, in2r, in2i]]), "mismatched types" + + ZERO = llvmlite.ir.Constant(ftype, 0.0) + + out = context.make_helper(builder, sig.return_type) + out.imag = ZERO + + in2r_abs = _fabs(context, builder, in2r) + in2i_abs = _fabs(context, builder, in2i) + in2r_abs_ge_in2i_abs = builder.fcmp_ordered('>=', in2r_abs, in2i_abs) + + with builder.if_else(in2r_abs_ge_in2i_abs) as (then, otherwise): + with then: + rat = builder.fdiv(in2i, in2r) + # out.real = floor((in1r+in1i*rat)/(in2r + in2i*rat)) + tmp1 = builder.fmul(in1i, rat) + tmp2 = builder.fmul(in2i, rat) + tmp3 = builder.fadd(in1r, tmp1) + tmp4 = builder.fadd(in2r, tmp2) + tmp5 = builder.fdiv(tmp3, tmp4) + out.real = np_real_floor_impl(context, builder, floor_sig, (tmp5,)) + with otherwise: + rat = builder.fdiv(in2r, in2i) + # out.real = floor((in1i + in1r*rat)/(in2i + in2r*rat)) + tmp1 = builder.fmul(in1r, rat) + tmp2 = builder.fmul(in2r, rat) + tmp3 = builder.fadd(in1i, tmp1) + tmp4 = builder.fadd(in2i, tmp2) + tmp5 = builder.fdiv(tmp3, tmp4) + out.real = np_real_floor_impl(context, builder, floor_sig, (tmp5,)) + return out._getvalue() + + +######################################################################## +# numpy power funcs + +def np_complex_power_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + return numbers.complex_power_impl(context, builder, sig, args) + + +######################################################################## +# numpy float power funcs + +def real_float_power_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + return numbers.real_power_impl(context, builder, sig, args) + + +def np_complex_float_power_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + return numbers.complex_power_impl(context, builder, sig, args) + + +######################################################################## +# numpy greatest common denominator + +def np_gcd_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + return mathimpl.gcd_impl(context, builder, sig, args) + + +######################################################################## +# numpy lowest common multiple + +def np_lcm_impl(context, builder, sig, args): + + xty, yty = sig.args + assert xty == yty == sig.return_type + x, y = args + + def lcm(a, b): + """ + Like gcd, heavily cribbed from Julia. + """ + return 0 if a == 0 else abs(a * (b // np.gcd(b, a))) + + res = context.compile_internal(builder, lcm, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +######################################################################## +# Numpy style complex sign + +def np_complex_sign_impl(context, builder, sig, args): + # equivalent to complex sign in NumPy's sign + # but implemented via selects, balancing the 4 cases. + _check_arity_and_homogeneity(sig, args, 1) + + if numpy_version >= (2, 0): + # NumPy >= 2.0.0 + def complex_sign(z): + abs = math.hypot(z.real, z.imag) + if abs == 0: + return 0 + 0j + else: + return z / abs + + res = context.compile_internal(builder, complex_sign, sig, args) + return impl_ret_untracked(context, builder, sig.return_type, res) + else: + op = args[0] + ty = sig.args[0] + result = context.make_complex(builder, ty) + float_ty = ty.underlying_float + + ZERO = context.get_constant(float_ty, 0.0) + ONE = context.get_constant(float_ty, 1.0) + MINUS_ONE = context.get_constant(float_ty, -1.0) + NAN = context.get_constant(float_ty, float('nan')) + + result.real = ZERO + result.imag = ZERO + cmp_sig = typing.signature(types.boolean, *[ty] * 2) + cmp_args = [op, result._getvalue()] + arg1_ge_arg2 = np_complex_ge_impl(context, builder, cmp_sig, cmp_args) + arg1_eq_arg2 = np_complex_eq_impl(context, builder, cmp_sig, cmp_args) + arg1_lt_arg2 = np_complex_lt_impl(context, builder, cmp_sig, cmp_args) + + real_when_ge = builder.select(arg1_eq_arg2, ZERO, ONE) + real_when_nge = builder.select(arg1_lt_arg2, MINUS_ONE, NAN) + result.real = builder.select(arg1_ge_arg2, real_when_ge, real_when_nge) + + return result._getvalue() + + +######################################################################## +# Numpy rint + +def np_real_rint_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + return mathimpl.call_fp_intrinsic(builder, 'llvm.rint', args) + + +def np_complex_rint_impl(context, builder, sig, args): + # based on code in NumPy's funcs.inc.src + # rint of a complex number defined as rint of its real and imag + # parts + _check_arity_and_homogeneity(sig, args, 1) + ty = sig.args[0] + float_ty = ty.underlying_float + in1 = context.make_complex(builder, ty, value=args[0]) + out = context.make_complex(builder, ty) + + inner_sig = typing.signature(*[float_ty]*2) + out.real = np_real_rint_impl(context, builder, inner_sig, [in1.real]) + out.imag = np_real_rint_impl(context, builder, inner_sig, [in1.imag]) + return out._getvalue() + + +######################################################################## +# NumPy exp + +def np_real_exp_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.exp_impl(context, builder, sig, args) + + +def np_complex_exp_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return cmathimpl.exp_impl(context, builder, sig, args) + +######################################################################## +# NumPy exp2 + +def np_real_exp2_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + ll_ty = args[0].type + fnty = llvmlite.ir.FunctionType(ll_ty, [ll_ty,]) + fn = cgutils.insert_pure_function(builder.module, fnty, + name='llvm.exp2') + return builder.call(fn, [args[0]]) + + +def np_complex_exp2_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + ty = sig.args[0] + float_ty = ty.underlying_float + in1 = context.make_complex(builder, ty, value=args[0]) + tmp = context.make_complex(builder, ty) + loge2 = context.get_constant(float_ty, _NPY_LOGE2) + tmp.real = builder.fmul(loge2, in1.real) + tmp.imag = builder.fmul(loge2, in1.imag) + return np_complex_exp_impl(context, builder, sig, [tmp._getvalue()]) + + +######################################################################## +# NumPy log + +def np_real_log_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.log_impl(context, builder, sig, args) + + +def np_complex_log_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return cmathimpl.log_impl(context, builder, sig, args) + +######################################################################## +# NumPy log2 + +def np_real_log2_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + ll_ty = args[0].type + fnty = llvmlite.ir.FunctionType(ll_ty, [ll_ty,]) + fn = cgutils.insert_pure_function(builder.module, fnty, + name='llvm.log2') + return builder.call(fn, [args[0]]) + +def np_complex_log2_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + float_ty = ty.underlying_float + tmp = np_complex_log_impl(context, builder, sig, args) + tmp = context.make_complex(builder, ty, value=tmp) + log2e = context.get_constant(float_ty, _NPY_LOG2E) + tmp.real = builder.fmul(log2e, tmp.real) + tmp.imag = builder.fmul(log2e, tmp.imag) + return tmp._getvalue() + + +######################################################################## +# NumPy log10 + +def np_real_log10_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.log10_impl(context, builder, sig, args) + + +def np_complex_log10_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + float_ty = ty.underlying_float + tmp = np_complex_log_impl(context, builder, sig, args) + tmp = context.make_complex(builder, ty, value=tmp) + log10e = context.get_constant(float_ty, _NPY_LOG10E) + tmp.real = builder.fmul(log10e, tmp.real) + tmp.imag = builder.fmul(log10e, tmp.imag) + return tmp._getvalue() + + +######################################################################## +# NumPy expm1 + +def np_real_expm1_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.expm1_impl(context, builder, sig, args) + +def np_complex_expm1_impl(context, builder, sig, args): + # this is based on nc_expm1 in funcs.inc.src + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + float_ty = ty.underlying_float + float_unary_sig = typing.signature(*[float_ty]*2) + + MINUS_ONE = context.get_constant(float_ty, -1.0) + in1 = context.make_complex(builder, ty, value=args[0]) + a = np_real_exp_impl(context, builder, float_unary_sig, [in1.real]) + out = context.make_complex(builder, ty) + cos_imag = np_real_cos_impl(context, builder, float_unary_sig, [in1.imag]) + sin_imag = np_real_sin_impl(context, builder, float_unary_sig, [in1.imag]) + tmp = builder.fmul(a, cos_imag) + out.imag = builder.fmul(a, sin_imag) + out.real = builder.fadd(tmp, MINUS_ONE) + + return out._getvalue() + + +######################################################################## +# NumPy log1p + +def np_real_log1p_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.log1p_impl(context, builder, sig, args) + +def np_complex_log1p_impl(context, builder, sig, args): + # base on NumPy's nc_log1p in funcs.inc.src + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + float_ty = ty.underlying_float + float_unary_sig = typing.signature(*[float_ty]*2) + float_binary_sig = typing.signature(*[float_ty]*3) + + ONE = context.get_constant(float_ty, 1.0) + in1 = context.make_complex(builder, ty, value=args[0]) + out = context.make_complex(builder, ty) + real_plus_one = builder.fadd(in1.real, ONE) + l = np_real_hypot_impl(context, builder, float_binary_sig, + [real_plus_one, in1.imag]) + out.imag = np_real_atan2_impl(context, builder, float_binary_sig, + [in1.imag, real_plus_one]) + out.real = np_real_log_impl(context, builder, float_unary_sig, [l]) + + return out._getvalue() + + +######################################################################## +# NumPy sqrt + +def np_real_sqrt_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.sqrt_impl(context, builder, sig, args) + + +def np_complex_sqrt_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return cmathimpl.sqrt_impl(context, builder, sig, args) + + +######################################################################## +# NumPy square + +def np_int_square_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return builder.mul(args[0], args[0]) + + +def np_real_square_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return builder.fmul(args[0], args[0]) + +def np_complex_square_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + binary_sig = typing.signature(*[sig.return_type]*3) + return numbers.complex_mul_impl(context, builder, binary_sig, + [args[0], args[0]]) + + +######################################################################## +# NumPy cbrt + +def np_real_cbrt_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + # We enable fastmath here to force np.power(x, 1/3) to generate a + # call to libm cbrt function + @register_jitable(fastmath=True) + def cbrt(x): + if x < 0: + return -np.power(-x, 1.0 / 3.0) + else: + return np.power(x, 1.0 / 3.0) + + def _cbrt(x): + if np.isnan(x): + return np.nan + return cbrt(x) + + return context.compile_internal(builder, _cbrt, sig, args) + + +######################################################################## +# NumPy reciprocal + +def np_int_reciprocal_impl(context, builder, sig, args): + # based on the implementation in loops.c.src + # integer versions for reciprocal are performed via promotion + # using double, and then converted back to the type + _check_arity_and_homogeneity(sig, args, 1) + ty = sig.return_type + + binary_sig = typing.signature(*[ty]*3) + in_as_float = context.cast(builder, args[0], ty, types.float64) + ONE = context.get_constant(types.float64, 1) + result_as_float = builder.fdiv(ONE, in_as_float) + return context.cast(builder, result_as_float, types.float64, ty) + + +def np_real_reciprocal_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + ONE = context.get_constant(sig.return_type, 1.0) + return builder.fdiv(ONE, args[0]) + + +def np_complex_reciprocal_impl(context, builder, sig, args): + # based on the implementation in loops.c.src + # Basically the same Smith method used for division, but with + # the numerator substituted by 1.0 + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + float_ty = ty.underlying_float + + ZERO = context.get_constant(float_ty, 0.0) + ONE = context.get_constant(float_ty, 1.0) + in1 = context.make_complex(builder, ty, value=args[0]) + out = context.make_complex(builder, ty) + in1r = in1.real + in1i = in1.imag + in1r_abs = _fabs(context, builder, in1r) + in1i_abs = _fabs(context, builder, in1i) + in1i_abs_le_in1r_abs = builder.fcmp_ordered('<=', in1i_abs, in1r_abs) + + with builder.if_else(in1i_abs_le_in1r_abs) as (then, otherwise): + with then: + r = builder.fdiv(in1i, in1r) + tmp0 = builder.fmul(in1i, r) + d = builder.fadd(in1r, tmp0) + inv_d = builder.fdiv(ONE, d) + minus_r = builder.fsub(ZERO, r) + out.real = inv_d + out.imag = builder.fmul(minus_r, inv_d) + with otherwise: + r = builder.fdiv(in1r, in1i) + tmp0 = builder.fmul(in1r, r) + d = builder.fadd(tmp0, in1i) + inv_d = builder.fdiv(ONE, d) + out.real = builder.fmul(r, inv_d) + out.imag = builder.fsub(ZERO, inv_d) + + return out._getvalue() + + +######################################################################## +# NumPy sin + +def np_real_sin_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.sin_impl(context, builder, sig, args) + + +def np_complex_sin_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return cmathimpl.sin_impl(context, builder, sig, args) + + +######################################################################## +# NumPy cos + +def np_real_cos_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.cos_impl(context, builder, sig, args) + + +def np_complex_cos_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return cmathimpl.cos_impl(context, builder, sig, args) + + +######################################################################## +# NumPy tan + +def np_real_tan_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.tan_impl(context, builder, sig, args) + + +######################################################################## +# NumPy asin + +def np_real_asin_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.asin_impl(context, builder, sig, args) + + +######################################################################## +# NumPy acos + +def np_real_acos_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.acos_impl(context, builder, sig, args) + + +######################################################################## +# NumPy atan + +def np_real_atan_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.atan_impl(context, builder, sig, args) + + +######################################################################## +# NumPy atan2 + +def np_real_atan2_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + return mathimpl.atan2_float_impl(context, builder, sig, args) + + +######################################################################## +# NumPy hypot + +def np_real_hypot_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + return mathimpl.hypot_float_impl(context, builder, sig, args) + + +######################################################################## +# NumPy sinh + +def np_real_sinh_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.sinh_impl(context, builder, sig, args) + + +def np_complex_sinh_impl(context, builder, sig, args): + # npymath does not provide a complex sinh. The code in funcs.inc.src + # is translated here... + _check_arity_and_homogeneity(sig, args, 1) + + + ty = sig.args[0] + fty = ty.underlying_float + fsig1 = typing.signature(*[fty]*2) + x = context.make_complex(builder, ty, args[0]) + out = context.make_complex(builder, ty) + xr = x.real + xi = x.imag + + sxi = np_real_sin_impl(context, builder, fsig1, [xi]) + shxr = np_real_sinh_impl(context, builder, fsig1, [xr]) + cxi = np_real_cos_impl(context, builder, fsig1, [xi]) + chxr = np_real_cosh_impl(context, builder, fsig1, [xr]) + + out.real = builder.fmul(cxi, shxr) + out.imag = builder.fmul(sxi, chxr) + + return out._getvalue() + + +######################################################################## +# NumPy cosh + +def np_real_cosh_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.cosh_impl(context, builder, sig, args) + + +def np_complex_cosh_impl(context, builder, sig, args): + # npymath does not provide a complex cosh. The code in funcs.inc.src + # is translated here... + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + fty = ty.underlying_float + fsig1 = typing.signature(*[fty]*2) + x = context.make_complex(builder, ty, args[0]) + out = context.make_complex(builder, ty) + xr = x.real + xi = x.imag + + cxi = np_real_cos_impl(context, builder, fsig1, [xi]) + chxr = np_real_cosh_impl(context, builder, fsig1, [xr]) + sxi = np_real_sin_impl(context, builder, fsig1, [xi]) + shxr = np_real_sinh_impl(context, builder, fsig1, [xr]) + + out.real = builder.fmul(cxi, chxr) + out.imag = builder.fmul(sxi, shxr) + + return out._getvalue() + + +######################################################################## +# NumPy tanh + +def np_real_tanh_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.tanh_impl(context, builder, sig, args) + + +def np_complex_tanh_impl(context, builder, sig, args): + # npymath does not provide complex tan functions. The code + # in funcs.inc.src for tanh is translated here... + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + fty = ty.underlying_float + fsig1 = typing.signature(*[fty]*2) + ONE = context.get_constant(fty, 1.0) + x = context.make_complex(builder, ty, args[0]) + out = context.make_complex(builder, ty) + + xr = x.real + xi = x.imag + si = np_real_sin_impl(context, builder, fsig1, [xi]) + ci = np_real_cos_impl(context, builder, fsig1, [xi]) + shr = np_real_sinh_impl(context, builder, fsig1, [xr]) + chr_ = np_real_cosh_impl(context, builder, fsig1, [xr]) + rs = builder.fmul(ci, shr) + is_ = builder.fmul(si, chr_) + rc = builder.fmul(ci, chr_) + ic = builder.fmul(si, shr) # note: opposite sign from code in funcs.inc.src + sqr_rc = builder.fmul(rc, rc) + sqr_ic = builder.fmul(ic, ic) + d = builder.fadd(sqr_rc, sqr_ic) + inv_d = builder.fdiv(ONE, d) + rs_rc = builder.fmul(rs, rc) + is_ic = builder.fmul(is_, ic) + is_rc = builder.fmul(is_, rc) + rs_ic = builder.fmul(rs, ic) + numr = builder.fadd(rs_rc, is_ic) + numi = builder.fsub(is_rc, rs_ic) + out.real = builder.fmul(numr, inv_d) + out.imag = builder.fmul(numi, inv_d) + + return out._getvalue() + + +######################################################################## +# NumPy asinh + +def np_real_asinh_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.asinh_impl(context, builder, sig, args) + + +######################################################################## +# NumPy acosh + +def np_real_acosh_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.acosh_impl(context, builder, sig, args) + + +def np_complex_acosh_impl(context, builder, sig, args): + # npymath does not provide a complex acosh. The code in funcs.inc.src + # is translated here... + # log(x + sqrt(x+1) * sqrt(x-1)) + _check_arity_and_homogeneity(sig, args, 1) + + ty = sig.args[0] + csig2 = typing.signature(*[ty]*3) + + ONE = context.get_constant_generic(builder, ty, 1.0 + 0.0j) + x = args[0] + + x_plus_one = numbers.complex_add_impl(context, builder, csig2, [x, + ONE]) + x_minus_one = numbers.complex_sub_impl(context, builder, csig2, [x, + ONE]) + sqrt_x_plus_one = np_complex_sqrt_impl(context, builder, sig, [x_plus_one]) + sqrt_x_minus_one = np_complex_sqrt_impl(context, builder, sig, [x_minus_one]) + prod_sqrt = numbers.complex_mul_impl(context, builder, csig2, + [sqrt_x_plus_one, + sqrt_x_minus_one]) + log_arg = numbers.complex_add_impl(context, builder, csig2, [x, + prod_sqrt]) + + return np_complex_log_impl(context, builder, sig, [log_arg]) + + +######################################################################## +# NumPy atanh + +def np_real_atanh_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + return mathimpl.atanh_impl(context, builder, sig, args) + + +######################################################################## +# NumPy floor + +def np_real_floor_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + return mathimpl.call_fp_intrinsic(builder, 'llvm.floor', args) + + +######################################################################## +# NumPy ceil + +def np_real_ceil_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + return mathimpl.call_fp_intrinsic(builder, 'llvm.ceil', args) + + +######################################################################## +# NumPy trunc + +def np_real_trunc_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + return mathimpl.call_fp_intrinsic(builder, 'llvm.trunc', args) + + +######################################################################## +# NumPy fabs + +def np_real_fabs_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1) + + return mathimpl.call_fp_intrinsic(builder, 'llvm.fabs', args) + + +######################################################################## +# NumPy style predicates + +# For real and integer types rely on numbers... but complex ordering in +# NumPy is lexicographic (while Python does not provide ordering). +def np_complex_ge_impl(context, builder, sig, args): + # equivalent to macro CGE in NumPy's loops.c.src + # ((xr > yr && !npy_isnan(xi) && !npy_isnan(yi)) || (xr == yr && xi >= yi)) + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + + ty = sig.args[0] + in1, in2 = [context.make_complex(builder, ty, value=arg) for arg in args] + xr = in1.real + xi = in1.imag + yr = in2.real + yi = in2.imag + + xr_gt_yr = builder.fcmp_ordered('>', xr, yr) + no_nan_xi_yi = builder.fcmp_ordered('ord', xi, yi) + xr_eq_yr = builder.fcmp_ordered('==', xr, yr) + xi_ge_yi = builder.fcmp_ordered('>=', xi, yi) + first_term = builder.and_(xr_gt_yr, no_nan_xi_yi) + second_term = builder.and_(xr_eq_yr, xi_ge_yi) + return builder.or_(first_term, second_term) + + +def np_complex_le_impl(context, builder, sig, args): + # equivalent to macro CLE in NumPy's loops.c.src + # ((xr < yr && !npy_isnan(xi) && !npy_isnan(yi)) || (xr == yr && xi <= yi)) + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + + ty = sig.args[0] + in1, in2 = [context.make_complex(builder, ty, value=arg) for arg in args] + xr = in1.real + xi = in1.imag + yr = in2.real + yi = in2.imag + + xr_lt_yr = builder.fcmp_ordered('<', xr, yr) + no_nan_xi_yi = builder.fcmp_ordered('ord', xi, yi) + xr_eq_yr = builder.fcmp_ordered('==', xr, yr) + xi_le_yi = builder.fcmp_ordered('<=', xi, yi) + first_term = builder.and_(xr_lt_yr, no_nan_xi_yi) + second_term = builder.and_(xr_eq_yr, xi_le_yi) + return builder.or_(first_term, second_term) + + +def np_complex_gt_impl(context, builder, sig, args): + # equivalent to macro CGT in NumPy's loops.c.src + # ((xr > yr && !npy_isnan(xi) && !npy_isnan(yi)) || (xr == yr && xi > yi)) + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + + ty = sig.args[0] + in1, in2 = [context.make_complex(builder, ty, value=arg) for arg in args] + xr = in1.real + xi = in1.imag + yr = in2.real + yi = in2.imag + + xr_gt_yr = builder.fcmp_ordered('>', xr, yr) + no_nan_xi_yi = builder.fcmp_ordered('ord', xi, yi) + xr_eq_yr = builder.fcmp_ordered('==', xr, yr) + xi_gt_yi = builder.fcmp_ordered('>', xi, yi) + first_term = builder.and_(xr_gt_yr, no_nan_xi_yi) + second_term = builder.and_(xr_eq_yr, xi_gt_yi) + return builder.or_(first_term, second_term) + + +def np_complex_lt_impl(context, builder, sig, args): + # equivalent to macro CLT in NumPy's loops.c.src + # ((xr < yr && !npy_isnan(xi) && !npy_isnan(yi)) || (xr == yr && xi < yi)) + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + + ty = sig.args[0] + in1, in2 = [context.make_complex(builder, ty, value=arg) for arg in args] + xr = in1.real + xi = in1.imag + yr = in2.real + yi = in2.imag + + xr_lt_yr = builder.fcmp_ordered('<', xr, yr) + no_nan_xi_yi = builder.fcmp_ordered('ord', xi, yi) + xr_eq_yr = builder.fcmp_ordered('==', xr, yr) + xi_lt_yi = builder.fcmp_ordered('<', xi, yi) + first_term = builder.and_(xr_lt_yr, no_nan_xi_yi) + second_term = builder.and_(xr_eq_yr, xi_lt_yi) + return builder.or_(first_term, second_term) + + +def np_complex_eq_impl(context, builder, sig, args): + # equivalent to macro CEQ in NumPy's loops.c.src + # (xr == yr && xi == yi) + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + + ty = sig.args[0] + in1, in2 = [context.make_complex(builder, ty, value=arg) for arg in args] + xr = in1.real + xi = in1.imag + yr = in2.real + yi = in2.imag + + xr_eq_yr = builder.fcmp_ordered('==', xr, yr) + xi_eq_yi = builder.fcmp_ordered('==', xi, yi) + return builder.and_(xr_eq_yr, xi_eq_yi) + + +def np_complex_ne_impl(context, builder, sig, args): + # equivalent to macro CNE in NumPy's loops.c.src + # (xr != yr || xi != yi) + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + + ty = sig.args[0] + in1, in2 = [context.make_complex(builder, ty, value=arg) for arg in args] + xr = in1.real + xi = in1.imag + yr = in2.real + yi = in2.imag + + xr_ne_yr = builder.fcmp_unordered('!=', xr, yr) + xi_ne_yi = builder.fcmp_unordered('!=', xi, yi) + return builder.or_(xr_ne_yr, xi_ne_yi) + + +######################################################################## +# NumPy logical algebra + +# these are made generic for all types for now, assuming that +# cgutils.is_true works in the underlying types. + +def _complex_is_true(context, builder, ty, val): + complex_val = context.make_complex(builder, ty, value=val) + re_true = cgutils.is_true(builder, complex_val.real) + im_true = cgutils.is_true(builder, complex_val.imag) + return builder.or_(re_true, im_true) + + +def np_logical_and_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + a = cgutils.is_true(builder, args[0]) + b = cgutils.is_true(builder, args[1]) + return builder.and_(a, b) + + +def np_complex_logical_and_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + a = _complex_is_true(context, builder, sig.args[0], args[0]) + b = _complex_is_true(context, builder, sig.args[1], args[1]) + return builder.and_(a, b) + + +def np_logical_or_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + a = cgutils.is_true(builder, args[0]) + b = cgutils.is_true(builder, args[1]) + return builder.or_(a, b) + + +def np_complex_logical_or_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + a = _complex_is_true(context, builder, sig.args[0], args[0]) + b = _complex_is_true(context, builder, sig.args[1], args[1]) + return builder.or_(a, b) + + +def np_logical_xor_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + a = cgutils.is_true(builder, args[0]) + b = cgutils.is_true(builder, args[1]) + return builder.xor(a, b) + + +def np_complex_logical_xor_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2, return_type=types.boolean) + a = _complex_is_true(context, builder, sig.args[0], args[0]) + b = _complex_is_true(context, builder, sig.args[1], args[1]) + return builder.xor(a, b) + + +def np_logical_not_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return cgutils.is_false(builder, args[0]) + + +def np_complex_logical_not_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + a = _complex_is_true(context, builder, sig.args[0], args[0]) + return builder.not_(a) + +######################################################################## +# NumPy style max/min +# +# There are 2 different sets of functions to perform max and min in +# NumPy: maximum/minimum and fmax/fmin. +# Both differ in the way NaNs are handled, so the actual differences +# come in action only on float/complex numbers. The functions used for +# integers is shared. For booleans maximum is equivalent to or, and +# minimum is equivalent to and. Datetime support will go elsewhere. + +def np_int_smax_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + arg1, arg2 = args + arg1_sge_arg2 = builder.icmp_signed('>=', arg1, arg2) + return builder.select(arg1_sge_arg2, arg1, arg2) + + +def np_int_umax_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + arg1, arg2 = args + arg1_uge_arg2 = builder.icmp_unsigned('>=', arg1, arg2) + return builder.select(arg1_uge_arg2, arg1, arg2) + + +def np_real_maximum_impl(context, builder, sig, args): + # maximum prefers nan (tries to return a nan). + _check_arity_and_homogeneity(sig, args, 2) + + arg1, arg2 = args + arg1_nan = builder.fcmp_unordered('uno', arg1, arg1) + any_nan = builder.fcmp_unordered('uno', arg1, arg2) + nan_result = builder.select(arg1_nan, arg1, arg2) + + arg1_ge_arg2 = builder.fcmp_ordered('>=', arg1, arg2) + non_nan_result = builder.select(arg1_ge_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_real_fmax_impl(context, builder, sig, args): + # fmax prefers non-nan (tries to return a non-nan). + _check_arity_and_homogeneity(sig, args, 2) + + arg1, arg2 = args + arg2_nan = builder.fcmp_unordered('uno', arg2, arg2) + any_nan = builder.fcmp_unordered('uno', arg1, arg2) + nan_result = builder.select(arg2_nan, arg1, arg2) + + arg1_ge_arg2 = builder.fcmp_ordered('>=', arg1, arg2) + non_nan_result = builder.select(arg1_ge_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_complex_maximum_impl(context, builder, sig, args): + # maximum prefers nan (tries to return a nan). + # There is an extra caveat with complex numbers, as there is more + # than one type of nan. NumPy's docs state that the nan in the + # first argument is returned when both arguments are nans. + # If only one nan is found, that nan is returned. + _check_arity_and_homogeneity(sig, args, 2) + ty = sig.args[0] + bc_sig = typing.signature(types.boolean, ty) + bcc_sig = typing.signature(types.boolean, *[ty]*2) + arg1, arg2 = args + arg1_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg1]) + arg2_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg2]) + any_nan = builder.or_(arg1_nan, arg2_nan) + nan_result = builder.select(arg1_nan, arg1, arg2) + + arg1_ge_arg2 = np_complex_ge_impl(context, builder, bcc_sig, args) + non_nan_result = builder.select(arg1_ge_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_complex_fmax_impl(context, builder, sig, args): + # fmax prefers non-nan (tries to return a non-nan). + # There is an extra caveat with complex numbers, as there is more + # than one type of nan. NumPy's docs state that the nan in the + # first argument is returned when both arguments are nans. + _check_arity_and_homogeneity(sig, args, 2) + ty = sig.args[0] + bc_sig = typing.signature(types.boolean, ty) + bcc_sig = typing.signature(types.boolean, *[ty]*2) + arg1, arg2 = args + arg1_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg1]) + arg2_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg2]) + any_nan = builder.or_(arg1_nan, arg2_nan) + nan_result = builder.select(arg2_nan, arg1, arg2) + + arg1_ge_arg2 = np_complex_ge_impl(context, builder, bcc_sig, args) + non_nan_result = builder.select(arg1_ge_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_int_smin_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + arg1, arg2 = args + arg1_sle_arg2 = builder.icmp_signed('<=', arg1, arg2) + return builder.select(arg1_sle_arg2, arg1, arg2) + + +def np_int_umin_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + arg1, arg2 = args + arg1_ule_arg2 = builder.icmp_unsigned('<=', arg1, arg2) + return builder.select(arg1_ule_arg2, arg1, arg2) + + +def np_real_minimum_impl(context, builder, sig, args): + # minimum prefers nan (tries to return a nan). + _check_arity_and_homogeneity(sig, args, 2) + + arg1, arg2 = args + arg1_nan = builder.fcmp_unordered('uno', arg1, arg1) + any_nan = builder.fcmp_unordered('uno', arg1, arg2) + nan_result = builder.select(arg1_nan, arg1, arg2) + + arg1_le_arg2 = builder.fcmp_ordered('<=', arg1, arg2) + non_nan_result = builder.select(arg1_le_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_real_fmin_impl(context, builder, sig, args): + # fmin prefers non-nan (tries to return a non-nan). + _check_arity_and_homogeneity(sig, args, 2) + + arg1, arg2 = args + arg1_nan = builder.fcmp_unordered('uno', arg1, arg1) + any_nan = builder.fcmp_unordered('uno', arg1, arg2) + nan_result = builder.select(arg1_nan, arg2, arg1) + + arg1_le_arg2 = builder.fcmp_ordered('<=', arg1, arg2) + non_nan_result = builder.select(arg1_le_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_complex_minimum_impl(context, builder, sig, args): + # minimum prefers nan (tries to return a nan). + # There is an extra caveat with complex numbers, as there is more + # than one type of nan. NumPy's docs state that the nan in the + # first argument is returned when both arguments are nans. + # If only one nan is found, that nan is returned. + _check_arity_and_homogeneity(sig, args, 2) + ty = sig.args[0] + bc_sig = typing.signature(types.boolean, ty) + bcc_sig = typing.signature(types.boolean, *[ty]*2) + arg1, arg2 = args + arg1_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg1]) + arg2_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg2]) + any_nan = builder.or_(arg1_nan, arg2_nan) + nan_result = builder.select(arg1_nan, arg1, arg2) + + arg1_le_arg2 = np_complex_le_impl(context, builder, bcc_sig, args) + non_nan_result = builder.select(arg1_le_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +def np_complex_fmin_impl(context, builder, sig, args): + # fmin prefers non-nan (tries to return a non-nan). + # There is an extra caveat with complex numbers, as there is more + # than one type of nan. NumPy's docs state that the nan in the + # first argument is returned when both arguments are nans. + _check_arity_and_homogeneity(sig, args, 2) + ty = sig.args[0] + bc_sig = typing.signature(types.boolean, ty) + bcc_sig = typing.signature(types.boolean, *[ty]*2) + arg1, arg2 = args + arg1_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg1]) + arg2_nan = np_complex_isnan_impl(context, builder, bc_sig, [arg2]) + any_nan = builder.or_(arg1_nan, arg2_nan) + nan_result = builder.select(arg2_nan, arg1, arg2) + + arg1_le_arg2 = np_complex_le_impl(context, builder, bcc_sig, args) + non_nan_result = builder.select(arg1_le_arg2, arg1, arg2) + + return builder.select(any_nan, nan_result, non_nan_result) + + +######################################################################## +# NumPy floating point misc + +def np_int_isnan_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return cgutils.false_bit + + +def np_real_isnan_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return mathimpl.is_nan(builder, args[0]) + + +def np_complex_isnan_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + + x, = args + ty, = sig.args + complex_val = context.make_complex(builder, ty, value=x) + return cmathimpl.is_nan(builder, complex_val) + + +def np_int_isfinite_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return cgutils.true_bit + + +def np_datetime_isfinite_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return builder.icmp_unsigned('!=', args[0], npdatetime.NAT) + + +def np_datetime_isnat_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return builder.icmp_signed('==', args[0], npdatetime.NAT) + + +def np_real_isfinite_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return mathimpl.is_finite(builder, args[0]) + + +def np_complex_isfinite_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + x, = args + ty, = sig.args + complex_val = context.make_complex(builder, ty, value=x) + return cmathimpl.is_finite(builder, complex_val) + + +def np_int_isinf_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return cgutils.false_bit + + +def np_real_isinf_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + return mathimpl.is_inf(builder, args[0]) + + +def np_complex_isinf_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + x, = args + ty, = sig.args + complex_val = context.make_complex(builder, ty, value=x) + return cmathimpl.is_inf(builder, complex_val) + + +def np_real_signbit_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 1, return_type=types.boolean) + # there's no signbit intrinsic in LLVM, so just bitcast as int, mask the + # signbit and cmp against 0. + masks = { + types.float16: context.get_constant(types.uint16, 0x8000), + types.float32: context.get_constant(types.uint32, 0x80000000), + types.float64: context.get_constant(types.uint64, 0x8000000000000000), + } + arg_ty = sig.args[0] + arg_int_ty = getattr(types, f'uint{arg_ty.bitwidth}') + arg_ll_int_ty = context.get_value_type(arg_int_ty) + int_res = builder.and_(builder.bitcast(args[0], arg_ll_int_ty), + masks[arg_ty]) + bool_res = builder.icmp_unsigned('!=', int_res, int_res.type(0)) + return bool_res + + +def np_real_copysign_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + return mathimpl.copysign_float_impl(context, builder, sig, args) + +def np_real_nextafter_impl(context, builder, sig, args): + _check_arity_and_homogeneity(sig, args, 2) + + dispatch_table = { + types.float32: 'numba_nextafterf', + types.float64: 'numba_nextafter', + } + + return _dispatch_func_by_name_type(context, builder, sig, args, + dispatch_table, 'nextafter') + +def np_real_spacing_impl(context, builder, sig, args): + # This is different to how NumPy does it, NumPy has a specialisation of + # nextafter called _next, which is used. See: + # https://github.com/numpy/numpy/blob/12c2b7dd62fc0c14b81c8892ed5f4f59cc94d09c/numpy/core/src/npymath/ieee754.c.src#L32-L38 + # Numba elects to use `nextafter` for a similar behaviour to save + # translating this very involved function. Further, the NumPy comments note + # that there is a lot of redundancy present between the two. + _check_arity_and_homogeneity(sig, args, 1) + + dispatch_table = { + types.float32: 'numba_nextafterf', + types.float64: 'numba_nextafter', + } + + [ty] = sig.args + inner_sig = typing.signature(sig.return_type, ty, ty) + ll_ty = args[0].type + ll_inf = ll_ty(np.inf) + fnty = llvmlite.ir.FunctionType(ll_ty, [ll_ty, ll_ty]) + fn = cgutils.insert_pure_function(builder.module, fnty, + name='llvm.copysign') + ll_sinf = builder.call(fn, [ll_inf, args[0]]) + inner_args = args + [ll_sinf,] + nextafter = _dispatch_func_by_name_type(context, builder, inner_sig, + inner_args, dispatch_table, + 'nextafter') + return builder.fsub(nextafter, args[0]) + + +def np_real_ldexp_impl(context, builder, sig, args): + # this one is slightly different to other ufuncs. + # arguments are not homogeneous and second arg may come as + # an 'i' or an 'l'. + + # the function expects the second argument to be have a C int type + x1, x2 = args + ty1, ty2 = sig.args + # note that types.intc should be equivalent to int_ that is + # 'NumPy's default int') + x2 = context.cast(builder, x2, ty2, types.intc) + f_fi_sig = typing.signature(ty1, ty1, types.intc) + return mathimpl.ldexp_impl(context, builder, f_fi_sig, (x1, x2)) diff --git a/venv/lib/python3.10/site-packages/numba/np/npyimpl.py b/venv/lib/python3.10/site-packages/numba/np/npyimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..78768ecf62c5f8560486491c3aaace1c33edd270 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/npyimpl.py @@ -0,0 +1,878 @@ +""" +Implementation of functions in the Numpy package. +""" + + +import math +import sys +import itertools +from collections import namedtuple + +import llvmlite.ir as ir + +import numpy as np +import operator + +from numba.np import arrayobj, ufunc_db, numpy_support +from numba.np.ufunc.sigparse import parse_signature +from numba.core.imputils import (Registry, impl_ret_new_ref, force_error_model, + impl_ret_borrowed) +from numba.core import typing, types, utils, cgutils, callconv, config +from numba.np.numpy_support import ( + ufunc_find_matching_loop, select_array_wrapper, from_dtype, _ufunc_loop_sig +) +from numba.np.arrayobj import _getitem_array_generic +from numba.core.typing import npydecl +from numba.core.extending import overload, intrinsic + +from numba.core import errors + +registry = Registry('npyimpl') + + +######################################################################## + +# In the way we generate code, ufuncs work with scalar as well as +# with array arguments. The following helper classes help dealing +# with scalar and array arguments in a regular way. +# +# In short, the classes provide a uniform interface. The interface +# handles the indexing of as many dimensions as the array may have. +# For scalars, all indexing is ignored and when the value is read, +# the scalar is returned. For arrays code for actual indexing is +# generated and reading performs the appropriate indirection. + +class _ScalarIndexingHelper(object): + def update_indices(self, loop_indices, name): + pass + + def as_values(self): + pass + + +class _ScalarHelper(object): + """Helper class to handle scalar arguments (and result). + Note that store_data is only used when generating code for + a scalar ufunc and to write the output value. + + For loading, the value is directly used without having any + kind of indexing nor memory backing it up. This is the use + for input arguments. + + For storing, a variable is created in the stack where the + value will be written. + + Note that it is not supported (as it is unneeded for our + current use-cases) reading back a stored value. This class + will always "load" the original value it got at its creation. + """ + def __init__(self, ctxt, bld, val, ty): + self.context = ctxt + self.builder = bld + self.val = val + self.base_type = ty + intpty = ctxt.get_value_type(types.intp) + self.shape = [ir.Constant(intpty, 1)] + + lty = ctxt.get_data_type(ty) if ty != types.boolean else ir.IntType(1) + self._ptr = cgutils.alloca_once(bld, lty) + + def create_iter_indices(self): + return _ScalarIndexingHelper() + + def load_data(self, indices): + return self.val + + def store_data(self, indices, val): + self.builder.store(val, self._ptr) + + @property + def return_val(self): + return self.builder.load(self._ptr) + + +class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper', + ('array', 'indices'))): + def update_indices(self, loop_indices, name): + bld = self.array.builder + intpty = self.array.context.get_value_type(types.intp) + ONE = ir.Constant(ir.IntType(intpty.width), 1) + + # we are only interested in as many inner dimensions as dimensions + # the indexed array has (the outer dimensions are broadcast, so + # ignoring the outer indices produces the desired result. + indices = loop_indices[len(loop_indices) - len(self.indices):] + for src, dst, dim in zip(indices, self.indices, self.array.shape): + cond = bld.icmp_unsigned('>', dim, ONE) + with bld.if_then(cond): + bld.store(src, dst) + + def as_values(self): + """ + The indexing helper is built using alloca for each value, so it + actually contains pointers to the actual indices to load. Note + that update_indices assumes the same. This method returns the + indices as values + """ + bld = self.array.builder + return [bld.load(index) for index in self.indices] + + +class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder', + 'shape', 'strides', 'data', + 'layout', 'base_type', 'ndim', + 'return_val'))): + """Helper class to handle array arguments/result. + It provides methods to generate code loading/storing specific + items as well as support code for handling indices. + """ + def create_iter_indices(self): + intpty = self.context.get_value_type(types.intp) + ZERO = ir.Constant(ir.IntType(intpty.width), 0) + + indices = [] + for i in range(self.ndim): + x = cgutils.alloca_once(self.builder, ir.IntType(intpty.width)) + self.builder.store(ZERO, x) + indices.append(x) + return _ArrayIndexingHelper(self, indices) + + def _load_effective_address(self, indices): + return cgutils.get_item_pointer2(self.context, + self.builder, + data=self.data, + shape=self.shape, + strides=self.strides, + layout=self.layout, + inds=indices) + + def load_data(self, indices): + model = self.context.data_model_manager[self.base_type] + ptr = self._load_effective_address(indices) + return model.load_from_data_pointer(self.builder, ptr) + + def store_data(self, indices, value): + ctx = self.context + bld = self.builder + store_value = ctx.get_value_as_data(bld, self.base_type, value) + assert ctx.get_data_type(self.base_type) == store_value.type + bld.store(store_value, self._load_effective_address(indices)) + + +class _ArrayGUHelper(namedtuple('_ArrayHelper', ('context', 'builder', + 'shape', 'strides', 'data', + 'layout', 'base_type', 'ndim', + 'inner_arr_ty', 'is_input_arg'))): + """Helper class to handle array arguments/result. + It provides methods to generate code loading/storing specific + items as well as support code for handling indices. + + Contrary to _ArrayHelper, this class can create a view to a subarray + """ + def create_iter_indices(self): + intpty = self.context.get_value_type(types.intp) + ZERO = ir.Constant(ir.IntType(intpty.width), 0) + + indices = [] + for i in range(self.ndim - self.inner_arr_ty.ndim): + x = cgutils.alloca_once(self.builder, ir.IntType(intpty.width)) + self.builder.store(ZERO, x) + indices.append(x) + return _ArrayIndexingHelper(self, indices) + + def _load_effective_address(self, indices): + context = self.context + builder = self.builder + arr_ty = types.Array(self.base_type, self.ndim, self.layout) + arr = context.make_array(arr_ty)(context, builder, self.data) + + return cgutils.get_item_pointer2(context, + builder, + data=arr.data, + shape=self.shape, + strides=self.strides, + layout=self.layout, + inds=indices) + + def load_data(self, indices): + context, builder = self.context, self.builder + + if self.inner_arr_ty.ndim == 0 and self.is_input_arg: + # scalar case for input arguments + model = context.data_model_manager[self.base_type] + ptr = self._load_effective_address(indices) + return model.load_from_data_pointer(builder, ptr) + elif self.inner_arr_ty.ndim == 0 and not self.is_input_arg: + # Output arrays are handled as 1d with shape=(1,) when its + # signature represents a scalar. For instance: "(n),(m) -> ()" + intpty = context.get_value_type(types.intp) + one = intpty(1) + + fromty = types.Array(self.base_type, self.ndim, self.layout) + toty = types.Array(self.base_type, 1, self.layout) + itemsize = intpty(arrayobj.get_itemsize(context, fromty)) + + # create a view from the original ndarray to a 1d array + arr_from = self.context.make_array(fromty)(context, + builder, + self.data) + arr_to = self.context.make_array(toty)(context, builder) + arrayobj.populate_array( + arr_to, + data=self._load_effective_address(indices), + shape=cgutils.pack_array(builder, [one]), + strides=cgutils.pack_array(builder, [itemsize]), + itemsize=arr_from.itemsize, + meminfo=arr_from.meminfo, + parent=arr_from.parent) + return arr_to._getvalue() + else: + # generic case + # getitem n-dim array -> m-dim array, where N > M + index_types = (types.int64,) * (self.ndim - self.inner_arr_ty.ndim) + arrty = types.Array(self.base_type, self.ndim, self.layout) + arr = self.context.make_array(arrty)(context, builder, self.data) + res = _getitem_array_generic(context, builder, + self.inner_arr_ty, arrty, arr, + index_types, indices) + return impl_ret_borrowed(context, builder, self.inner_arr_ty, res) + + def guard_shape(self, loopshape): + inner_ndim = self.inner_arr_ty.ndim + def raise_impl(loop_shape, array_shape): + # This would in fact be a test for broadcasting. + # Broadcast would fail if, ignoring the core dimensions, the + # remaining ones are different than indices given by loop shape. + + remaining = len(array_shape) - inner_ndim + _raise = (remaining > len(loop_shape)) + if not _raise: + for i in range(remaining): + _raise |= (array_shape[i] != loop_shape[i]) + if _raise: + # Ideally we should call `np.broadcast_shapes` with loop and + # array shapes. But since broadcasting is not supported here, + # we just raise an error + # TODO: check why raising a dynamic exception here fails + raise ValueError('Loop and array shapes are incompatible') + + context, builder = self.context, self.builder + sig = types.none( + types.UniTuple(types.intp, len(loopshape)), + types.UniTuple(types.intp, len(self.shape)), + ) + tup = (context.make_tuple(builder, sig.args[0], loopshape), + context.make_tuple(builder, sig.args[1], self.shape)) + context.compile_internal(builder, raise_impl, sig, tup) + + def guard_match_core_dims(self, other: '_ArrayGUHelper', ndims: int): + # arguments with the same signature should match their core dimensions + # + # @guvectorize('(n,m), (n,m) -> (n)') + # def foo(x, y, res): + # ... + # + # x and y should have the same core (2D) dimensions + def raise_impl(self_shape, other_shape): + same = True + a, b = len(self_shape) - ndims, len(other_shape) - ndims + for i in range(ndims): + same &= self_shape[a + i] == other_shape[b + i] + if not same: + # NumPy raises the following: + # ValueError: gufunc: Input operand 1 has a mismatch in its + # core dimension 0, with gufunc signature (n),(n) -> () + # (size 3 is different from 2) + # But since we cannot raise a dynamic exception here, we just + # (try) something meaninful + msg = ('Operand has a mismatch in one of its core dimensions. ' + 'Please, check if all arguments to a @guvectorize ' + 'function have the same core dimensions.') + raise ValueError(msg) + + context, builder = self.context, self.builder + sig = types.none( + types.UniTuple(types.intp, len(self.shape)), + types.UniTuple(types.intp, len(other.shape)), + ) + tup = (context.make_tuple(builder, sig.args[0], self.shape), + context.make_tuple(builder, sig.args[1], other.shape),) + context.compile_internal(builder, raise_impl, sig, tup) + + +def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'): + """returns an instance of the appropriate Helper (either + _ScalarHelper or _ArrayHelper) class to handle the argument. + using the polymorphic interface of the Helper classes, scalar + and array cases can be handled with the same code""" + + # first un-Optional Optionals + if isinstance(tyinp, types.Optional): + oty = tyinp + tyinp = tyinp.type + inp = ctxt.cast(bld, inp, oty, tyinp) + + # then prepare the arg for a concrete instance + if isinstance(tyinp, types.ArrayCompatible): + ary = ctxt.make_array(tyinp)(ctxt, bld, inp) + shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim) + strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim) + return _ArrayHelper(ctxt, bld, shape, strides, ary.data, + tyinp.layout, tyinp.dtype, tyinp.ndim, inp) + elif (types.unliteral(tyinp) in types.number_domain | {types.boolean} + or isinstance(tyinp, types.scalars._NPDatetimeBase)): + return _ScalarHelper(ctxt, bld, inp, tyinp) + else: + raise NotImplementedError('unsupported type for {0}: {1}'.format(where, + str(tyinp))) + + +if config.USE_LEGACY_TYPE_SYSTEM: + _broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp), + types.intp, types.CPointer(types.intp)) +else: + _broadcast_onto_sig = types.np_intp(types.np_intp, types.CPointer(types.np_intp), + types.np_intp, types.CPointer(types.np_intp)) + +def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape): + '''Low-level utility function used in calculating a shape for + an implicit output array. This function assumes that the + destination shape is an LLVM pointer to a C-style array that was + already initialized to a size of one along all axes. + + Returns an integer value: + >= 1 : Succeeded. Return value should equal the number of dimensions in + the destination shape. + 0 : Failed to broadcast because source shape is larger than the + destination shape (this case should be weeded out at type + checking). + < 0 : Failed to broadcast onto destination axis, at axis number == + -(return_value + 1). + ''' + if src_ndim > dest_ndim: + # This check should have been done during type checking, but + # let's be defensive anyway... + return 0 + else: + src_index = 0 + dest_index = dest_ndim - src_ndim + while src_index < src_ndim: + src_dim_size = src_shape[src_index] + dest_dim_size = dest_shape[dest_index] + # Check to see if we've already mutated the destination + # shape along this axis. + if dest_dim_size != 1: + # If we have mutated the destination shape already, + # then the source axis size must either be one, + # or the destination axis size. + if src_dim_size != dest_dim_size and src_dim_size != 1: + return -(dest_index + 1) + elif src_dim_size != 1: + # If the destination size is still its initial + dest_shape[dest_index] = src_dim_size + src_index += 1 + dest_index += 1 + return dest_index + +def _build_array(context, builder, array_ty, input_types, inputs): + """Utility function to handle allocation of an implicit output array + given the target context, builder, output array type, and a list of + _ArrayHelper instances. + """ + # First, strip optional types, ufunc loops are typed on concrete types + input_types = [x.type if isinstance(x, types.Optional) else x + for x in input_types] + + intp_ty = context.get_value_type(types.intp) + def make_intp_const(val): + return context.get_constant(types.intp, val) + + ZERO = make_intp_const(0) + ONE = make_intp_const(1) + + src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, + "src_shape") + dest_ndim = make_intp_const(array_ty.ndim) + dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim, + "dest_shape") + dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index) + for index in range(array_ty.ndim)) + + # Initialize the destination shape with all ones. + for dest_shape_addr in dest_shape_addrs: + builder.store(ONE, dest_shape_addr) + + # For each argument, try to broadcast onto the destination shape, + # mutating along any axis where the argument shape is not one and + # the destination shape is one. + for arg_number, arg in enumerate(inputs): + if not hasattr(arg, "ndim"): # Skip scalar arguments + continue + arg_ndim = make_intp_const(arg.ndim) + for index in range(arg.ndim): + builder.store(arg.shape[index], + cgutils.gep_inbounds(builder, src_shape, index)) + arg_result = context.compile_internal( + builder, _broadcast_onto, _broadcast_onto_sig, + [arg_ndim, src_shape, dest_ndim, dest_shape]) + with cgutils.if_unlikely(builder, + builder.icmp_signed('<', arg_result, ONE)): + msg = "unable to broadcast argument %d to output array" % ( + arg_number,) + + loc = errors.loc_info.get('loc', None) + if loc is not None: + msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line) + + context.call_conv.return_user_exc(builder, ValueError, (msg,)) + + real_array_ty = array_ty.as_array + + dest_shape_tup = tuple(builder.load(dest_shape_addr) + for dest_shape_addr in dest_shape_addrs) + array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty, + dest_shape_tup) + + # Get the best argument to call __array_wrap__ on + array_wrapper_index = select_array_wrapper(input_types) + array_wrapper_ty = input_types[array_wrapper_index] + try: + # __array_wrap__(source wrapped array, out array) -> out wrapped array + array_wrap = context.get_function('__array_wrap__', + array_ty(array_wrapper_ty, real_array_ty)) + except NotImplementedError: + # If it's the same priority as a regular array, assume we + # should use the allocated array unchanged. + if array_wrapper_ty.array_priority != types.Array.array_priority: + raise + out_val = array_val._getvalue() + else: + wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue()) + out_val = array_wrap(builder, wrap_args) + + ndim = array_ty.ndim + shape = cgutils.unpack_tuple(builder, array_val.shape, ndim) + strides = cgutils.unpack_tuple(builder, array_val.strides, ndim) + return _ArrayHelper(context, builder, shape, strides, array_val.data, + array_ty.layout, array_ty.dtype, ndim, + out_val) + +# ufuncs either return a single result when nout == 1, else a tuple of results + +def _unpack_output_types(ufunc, sig): + if ufunc.nout == 1: + return [sig.return_type] + else: + return list(sig.return_type) + + +def _unpack_output_values(ufunc, builder, values): + if ufunc.nout == 1: + return [values] + else: + return cgutils.unpack_tuple(builder, values) + + +def _pack_output_values(ufunc, context, builder, typ, values): + if ufunc.nout == 1: + return values[0] + else: + return context.make_tuple(builder, typ, values) + + +def numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel_class): + # This is the code generator that builds all the looping needed + # to execute a numpy functions over several dimensions (including + # scalar cases). + # + # context - the code generation context + # builder - the code emitter + # sig - signature of the ufunc + # args - the args to the ufunc + # ufunc - the ufunc itself + # kernel_class - a code generating subclass of _Kernel that provides + + arguments = [_prepare_argument(context, builder, arg, tyarg) + for arg, tyarg in zip(args, sig.args)] + + if len(arguments) < ufunc.nin: + raise RuntimeError( + "Not enough inputs to {}, expected {} got {}" + .format(ufunc.__name__, ufunc.nin, len(arguments))) + + for out_i, ret_ty in enumerate(_unpack_output_types(ufunc, sig)): + if ufunc.nin + out_i >= len(arguments): + # this out argument is not provided + if isinstance(ret_ty, types.ArrayCompatible): + output = _build_array(context, builder, ret_ty, sig.args, arguments) + else: + output = _prepare_argument( + context, builder, + ir.Constant(context.get_value_type(ret_ty), None), ret_ty) + arguments.append(output) + elif context.enable_nrt: + # Incref the output + context.nrt.incref(builder, ret_ty, args[ufunc.nin + out_i]) + + inputs = arguments[:ufunc.nin] + outputs = arguments[ufunc.nin:] + assert len(outputs) == ufunc.nout + + outer_sig = _ufunc_loop_sig( + [a.base_type for a in outputs], + [a.base_type for a in inputs] + ) + kernel = kernel_class(context, builder, outer_sig) + intpty = context.get_value_type(types.intp) + + indices = [inp.create_iter_indices() for inp in inputs] + + # assume outputs are all the same size, which numpy requires + + loopshape = outputs[0].shape + + # count the number of C and F layout arrays, respectively + input_layouts = [inp.layout for inp in inputs + if isinstance(inp, _ArrayHelper)] + num_c_layout = len([x for x in input_layouts if x == 'C']) + num_f_layout = len([x for x in input_layouts if x == 'F']) + + # Only choose F iteration order if more arrays are in F layout. + # Default to C order otherwise. + # This is a best effort for performance. NumPy has more fancy logic that + # uses array iterators in non-trivial cases. + if num_f_layout > num_c_layout: + order = 'F' + else: + order = 'C' + + with cgutils.loop_nest(builder, loopshape, intp=intpty, order=order) as loop_indices: + vals_in = [] + for i, (index, arg) in enumerate(zip(indices, inputs)): + index.update_indices(loop_indices, i) + vals_in.append(arg.load_data(index.as_values())) + + vals_out = _unpack_output_values(ufunc, builder, kernel.generate(*vals_in)) + for val_out, output in zip(vals_out, outputs): + output.store_data(loop_indices, val_out) + + out = _pack_output_values(ufunc, context, builder, sig.return_type, [o.return_val for o in outputs]) + return impl_ret_new_ref(context, builder, sig.return_type, out) + + +def numpy_gufunc_kernel(context, builder, sig, args, ufunc, kernel_class): + arguments = [] + expected_ndims = kernel_class.dufunc.expected_ndims() + expected_ndims = expected_ndims[0] + expected_ndims[1] + is_input = [True] * ufunc.nin + [False] * ufunc.nout + for arg, ty, exp_ndim, is_inp in zip(args, sig.args, expected_ndims, is_input): # noqa: E501 + if isinstance(ty, types.ArrayCompatible): + # Create an array helper that iteration returns a subarray + # with ndim specified by "exp_ndim" + arr = context.make_array(ty)(context, builder, arg) + shape = cgutils.unpack_tuple(builder, arr.shape, ty.ndim) + strides = cgutils.unpack_tuple(builder, arr.strides, ty.ndim) + inner_arr_ty = ty.copy(ndim=exp_ndim) + ndim = ty.ndim + layout = ty.layout + base_type = ty.dtype + array_helper = _ArrayGUHelper(context, builder, + shape, strides, arg, + layout, base_type, ndim, + inner_arr_ty, is_inp) + arguments.append(array_helper) + else: + scalar_helper = _ScalarHelper(context, builder, arg, ty) + arguments.append(scalar_helper) + kernel = kernel_class(context, builder, sig) + + layouts = [arg.layout for arg in arguments + if isinstance(arg, _ArrayGUHelper)] + num_c_layout = len([x for x in layouts if x == 'C']) + num_f_layout = len([x for x in layouts if x == 'F']) + + # Only choose F iteration order if more arrays are in F layout. + # Default to C order otherwise. + # This is a best effort for performance. NumPy has more fancy logic that + # uses array iterators in non-trivial cases. + if num_f_layout > num_c_layout: + order = 'F' + else: + order = 'C' + + outputs = arguments[ufunc.nin:] + intpty = context.get_value_type(types.intp) + indices = [inp.create_iter_indices() for inp in arguments] + loopshape_ndim = outputs[0].ndim - outputs[0].inner_arr_ty.ndim + loopshape = outputs[0].shape[ : loopshape_ndim] + + _sig = parse_signature(ufunc.gufunc_builder.signature) + for (idx_a, sig_a), (idx_b, sig_b) in itertools.combinations( + zip(range(len(arguments)), + _sig[0] + _sig[1]), + r = 2 + ): + # For each pair of arguments, both inputs and outputs, must match their + # inner dimensions if their signatures are the same. + arg_a, arg_b = arguments[idx_a], arguments[idx_b] + if sig_a == sig_b and \ + all(isinstance(x, _ArrayGUHelper) for x in (arg_a, arg_b)): + arg_a, arg_b = arguments[idx_a], arguments[idx_b] + arg_a.guard_match_core_dims(arg_b, len(sig_a)) + + for arg in arguments[:ufunc.nin]: + if isinstance(arg, _ArrayGUHelper): + arg.guard_shape(loopshape) + + with cgutils.loop_nest(builder, + loopshape, + intp=intpty, + order=order) as loop_indices: + vals_in = [] + for i, (index, arg) in enumerate(zip(indices, arguments)): + index.update_indices(loop_indices, i) + vals_in.append(arg.load_data(index.as_values())) + + kernel.generate(*vals_in) + + +# Kernels are the code to be executed inside the multidimensional loop. +class _Kernel(object): + def __init__(self, context, builder, outer_sig): + self.context = context + self.builder = builder + self.outer_sig = outer_sig + + def cast(self, val, fromty, toty): + """Numpy uses cast semantics that are different from standard Python + (for example, it does allow casting from complex to float). + + This method acts as a patch to context.cast so that it allows + complex to real/int casts. + + """ + if (isinstance(fromty, types.Complex) and + not isinstance(toty, types.Complex)): + # attempt conversion of the real part to the specified type. + # note that NumPy issues a warning in this kind of conversions + newty = fromty.underlying_float + attr = self.context.get_getattr(fromty, 'real') + val = attr(self.context, self.builder, fromty, val, 'real') + fromty = newty + # let the regular cast do the rest... + + return self.context.cast(self.builder, val, fromty, toty) + + def generate(self, *args): + isig = self.inner_sig + osig = self.outer_sig + cast_args = [self.cast(val, inty, outty) + for val, inty, outty in + zip(args, osig.args, isig.args)] + if self.cres.objectmode: + func_type = self.context.call_conv.get_function_type( + types.pyobject, [types.pyobject] * len(isig.args)) + else: + func_type = self.context.call_conv.get_function_type( + isig.return_type, isig.args) + module = self.builder.block.function.module + entry_point = cgutils.get_or_insert_function( + module, func_type, + self.cres.fndesc.llvm_func_name) + entry_point.attributes.add("alwaysinline") + + _, res = self.context.call_conv.call_function( + self.builder, entry_point, isig.return_type, isig.args, + cast_args) + return self.cast(res, isig.return_type, osig.return_type) + + +def _ufunc_db_function(ufunc): + """Use the ufunc loop type information to select the code generation + function from the table provided by the dict_of_kernels. The dict + of kernels maps the loop identifier to a function with the + following signature: (context, builder, signature, args). + + The loop type information has the form 'AB->C'. The letters to the + left of '->' are the input types (specified as NumPy letter + types). The letters to the right of '->' are the output + types. There must be 'ufunc.nin' letters to the left of '->', and + 'ufunc.nout' letters to the right. + + For example, a binary float loop resulting in a float, will have + the following signature: 'ff->f'. + + A given ufunc implements many loops. The list of loops implemented + for a given ufunc can be accessed using the 'types' attribute in + the ufunc object. The NumPy machinery selects the first loop that + fits a given calling signature (in our case, what we call the + outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'. + """ + + class _KernelImpl(_Kernel): + def __init__(self, context, builder, outer_sig): + super(_KernelImpl, self).__init__(context, builder, outer_sig) + loop = ufunc_find_matching_loop( + ufunc, outer_sig.args + tuple(_unpack_output_types(ufunc, outer_sig))) + self.fn = context.get_ufunc_info(ufunc).get(loop.ufunc_sig) + self.inner_sig = _ufunc_loop_sig(loop.outputs, loop.inputs) + + if self.fn is None: + msg = "Don't know how to lower ufunc '{0}' for loop '{1}'" + raise NotImplementedError(msg.format(ufunc.__name__, loop)) + + def generate(self, *args): + isig = self.inner_sig + osig = self.outer_sig + + cast_args = [self.cast(val, inty, outty) + for val, inty, outty in zip(args, osig.args, + isig.args)] + with force_error_model(self.context, 'numpy'): + res = self.fn(self.context, self.builder, isig, cast_args) + dmm = self.context.data_model_manager + res = dmm[isig.return_type].from_return(self.builder, res) + return self.cast(res, isig.return_type, osig.return_type) + + return _KernelImpl + + +################################################################################ +# Helper functions that register the ufuncs + +def register_ufunc_kernel(ufunc, kernel, lower): + def do_ufunc(context, builder, sig, args): + return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel) + + _any = types.Any + in_args = (_any,) * ufunc.nin + + # Add a lowering for each out argument that is missing. + for n_explicit_out in range(ufunc.nout + 1): + out_args = (types.Array,) * n_explicit_out + lower(ufunc, *in_args, *out_args)(do_ufunc) + + return kernel + + +def register_unary_operator_kernel(operator, ufunc, kernel, lower, + inplace=False): + assert not inplace # are there any inplace unary operators? + def lower_unary_operator(context, builder, sig, args): + return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel) + _arr_kind = types.Array + lower(operator, _arr_kind)(lower_unary_operator) + + +def register_binary_operator_kernel(op, ufunc, kernel, lower, inplace=False): + def lower_binary_operator(context, builder, sig, args): + return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel) + + def lower_inplace_operator(context, builder, sig, args): + # The visible signature is (A, B) -> A + # The implementation's signature (with explicit output) + # is (A, B, A) -> A + args = tuple(args) + (args[0],) + sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],)) + return numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel) + + _any = types.Any + _arr_kind = types.Array + formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)] + for sig in formal_sigs: + if not inplace: + lower(op, *sig)(lower_binary_operator) + else: + lower(op, *sig)(lower_inplace_operator) + + +################################################################################ +# Use the contents of ufunc_db to initialize the supported ufuncs + +@registry.lower(operator.pos, types.Array) +def array_positive_impl(context, builder, sig, args): + '''Lowering function for +(array) expressions. Defined here + (numba.targets.npyimpl) since the remaining array-operator + lowering functions are also registered in this module. + ''' + class _UnaryPositiveKernel(_Kernel): + def generate(self, *args): + [val] = args + return val + + return numpy_ufunc_kernel(context, builder, sig, args, np.positive, + _UnaryPositiveKernel) + + +def register_ufuncs(ufuncs, lower): + kernels = {} + for ufunc in ufuncs: + db_func = _ufunc_db_function(ufunc) + kernels[ufunc] = register_ufunc_kernel(ufunc, db_func, lower) + + for _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map, + npydecl.NumpyRulesArrayOperator._op_map, + ): + for operator, ufunc_name in _op_map.items(): + ufunc = getattr(np, ufunc_name) + kernel = kernels[ufunc] + if ufunc.nin == 1: + register_unary_operator_kernel(operator, ufunc, kernel, lower) + elif ufunc.nin == 2: + register_binary_operator_kernel(operator, ufunc, kernel, lower) + else: + raise RuntimeError("There shouldn't be any non-unary or binary operators") + + for _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map, + ): + for operator, ufunc_name in _op_map.items(): + ufunc = getattr(np, ufunc_name) + kernel = kernels[ufunc] + if ufunc.nin == 1: + register_unary_operator_kernel(operator, ufunc, kernel, lower, + inplace=True) + elif ufunc.nin == 2: + register_binary_operator_kernel(operator, ufunc, kernel, lower, + inplace=True) + else: + raise RuntimeError("There shouldn't be any non-unary or binary operators") + + +register_ufuncs(ufunc_db.get_ufuncs(), registry.lower) + + +@intrinsic +def _make_dtype_object(typingctx, desc): + """Given a string or NumberClass description *desc*, returns the dtype object. + """ + def from_nb_type(nb_type): + return_type = types.DType(nb_type) + sig = return_type(desc) + + def codegen(context, builder, signature, args): + # All dtype objects are dummy values in LLVM. + # They only exist in the type level. + return context.get_dummy_value() + + return sig, codegen + + if isinstance(desc, types.Literal): + # Convert the str description into np.dtype then to numba type. + nb_type = from_dtype(np.dtype(desc.literal_value)) + return from_nb_type(nb_type) + elif isinstance(desc, types.functions.NumberClass): + thestr = str(desc.dtype) + # Convert the str description into np.dtype then to numba type. + nb_type = from_dtype(np.dtype(thestr)) + return from_nb_type(nb_type) + +@overload(np.dtype) +def numpy_dtype(desc): + """Provide an implementation so that numpy.dtype function can be lowered. + """ + if isinstance(desc, (types.Literal, types.functions.NumberClass)): + def imp(desc): + return _make_dtype_object(desc) + return imp + else: + raise errors.NumbaTypeError('unknown dtype descriptor: {}'.format(desc)) diff --git a/venv/lib/python3.10/site-packages/numba/np/numpy_support.py b/venv/lib/python3.10/site-packages/numba/np/numpy_support.py new file mode 100644 index 0000000000000000000000000000000000000000..e8de393a6e1622c996cb443a9b78bfe2080258a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/numpy_support.py @@ -0,0 +1,796 @@ +import collections +import ctypes +import re + +import numpy as np + +from numba.core import errors, types, config +from numba.core.typing.templates import signature +from numba.np import npdatetime_helpers +from numba.core.errors import TypingError + +# re-export +from numba.core.cgutils import is_nonelike # noqa: F401 + + +numpy_version = tuple(map(int, np.__version__.split('.')[:2])) + + +if config.USE_LEGACY_TYPE_SYSTEM: + FROM_DTYPE = { + np.dtype('bool'): types.boolean, + np.dtype('int8'): types.int8, + np.dtype('int16'): types.int16, + np.dtype('int32'): types.int32, + np.dtype('int64'): types.int64, + + np.dtype('uint8'): types.uint8, + np.dtype('uint16'): types.uint16, + np.dtype('uint32'): types.uint32, + np.dtype('uint64'): types.uint64, + + np.dtype('float32'): types.float32, + np.dtype('float64'): types.float64, + np.dtype('float16'): types.float16, + np.dtype('complex64'): types.complex64, + np.dtype('complex128'): types.complex128, + + np.dtype(object): types.pyobject, + } +else: + FROM_DTYPE = { + np.dtype('bool'): types.np_bool_, + np.dtype('int8'): types.np_int8, + np.dtype('int16'): types.np_int16, + np.dtype('int32'): types.np_int32, + np.dtype('int64'): types.np_int64, + + np.dtype('uint8'): types.np_uint8, + np.dtype('uint16'): types.np_uint16, + np.dtype('uint32'): types.np_uint32, + np.dtype('uint64'): types.np_uint64, + + np.dtype('float32'): types.np_float32, + np.dtype('float64'): types.np_float64, + np.dtype('float16'): types.np_float16, + np.dtype('complex64'): types.np_complex64, + np.dtype('complex128'): types.np_complex128, + + np.dtype(object): types.pyobject, + } + + +re_typestr = re.compile(r'[<>=\|]([a-z])(\d+)?$', re.I) +re_datetimestr = re.compile(r'[<>=\|]([mM])8?(\[([a-z]+)\])?$', re.I) + +sizeof_unicode_char = np.dtype('U1').itemsize + + +def _from_str_dtype(dtype): + m = re_typestr.match(dtype.str) + if not m: + raise errors.NumbaNotImplementedError(dtype) + groups = m.groups() + typecode = groups[0] + if typecode == 'U': + # unicode + if dtype.byteorder not in '=|': + raise errors.NumbaNotImplementedError("Does not support non-native " + "byteorder") + count = dtype.itemsize // sizeof_unicode_char + assert count == int(groups[1]), "Unicode char size mismatch" + return types.UnicodeCharSeq(count) + + elif typecode == 'S': + # char + count = dtype.itemsize + assert count == int(groups[1]), "Char size mismatch" + return types.CharSeq(count) + + else: + raise errors.NumbaNotImplementedError(dtype) + + +def _from_datetime_dtype(dtype): + m = re_datetimestr.match(dtype.str) + if not m: + raise errors.NumbaNotImplementedError(dtype) + groups = m.groups() + typecode = groups[0] + unit = groups[2] or '' + if typecode == 'm': + return types.NPTimedelta(unit) + elif typecode == 'M': + return types.NPDatetime(unit) + else: + raise errors.NumbaNotImplementedError(dtype) + + +def from_dtype(dtype): + """ + Return a Numba Type instance corresponding to the given Numpy *dtype*. + NumbaNotImplementedError is raised on unsupported Numpy dtypes. + """ + if type(dtype) is type and issubclass(dtype, np.generic): + dtype = np.dtype(dtype) + elif getattr(dtype, "fields", None) is not None: + return from_struct_dtype(dtype) + + try: + return FROM_DTYPE[dtype] + except KeyError: + pass + + try: + char = dtype.char + except AttributeError: + pass + else: + if char in 'SU': + return _from_str_dtype(dtype) + if char in 'mM': + return _from_datetime_dtype(dtype) + if char in 'V' and dtype.subdtype is not None: + subtype = from_dtype(dtype.subdtype[0]) + return types.NestedArray(subtype, dtype.shape) + + raise errors.NumbaNotImplementedError(dtype) + + +_as_dtype_letters = { + types.NPDatetime: 'M8', + types.NPTimedelta: 'm8', + types.CharSeq: 'S', + types.UnicodeCharSeq: 'U', +} + + +def as_dtype(nbtype): + """ + Return a numpy dtype instance corresponding to the given Numba type. + NotImplementedError is if no correspondence is known. + """ + nbtype = types.unliteral(nbtype) + if isinstance(nbtype, (types.Complex, types.Integer, types.Float)): + return np.dtype(str(nbtype)) + if isinstance(nbtype, (types.Boolean)): + return np.dtype('?') + if isinstance(nbtype, (types.NPDatetime, types.NPTimedelta)): + letter = _as_dtype_letters[type(nbtype)] + if nbtype.unit: + return np.dtype('%s[%s]' % (letter, nbtype.unit)) + else: + return np.dtype(letter) + if isinstance(nbtype, (types.CharSeq, types.UnicodeCharSeq)): + letter = _as_dtype_letters[type(nbtype)] + return np.dtype('%s%d' % (letter, nbtype.count)) + if isinstance(nbtype, types.Record): + return as_struct_dtype(nbtype) + if isinstance(nbtype, types.EnumMember): + return as_dtype(nbtype.dtype) + if isinstance(nbtype, types.npytypes.DType): + return as_dtype(nbtype.dtype) + if isinstance(nbtype, types.NumberClass): + return as_dtype(nbtype.dtype) + if isinstance(nbtype, types.NestedArray): + spec = (as_dtype(nbtype.dtype), tuple(nbtype.shape)) + return np.dtype(spec) + if isinstance(nbtype, types.PyObject): + return np.dtype(object) + + msg = f"{nbtype} cannot be represented as a NumPy dtype" + raise errors.NumbaNotImplementedError(msg) + + +def as_struct_dtype(rec): + """Convert Numba Record type to NumPy structured dtype + """ + assert isinstance(rec, types.Record) + names = [] + formats = [] + offsets = [] + titles = [] + # Fill the fields if they are not a title. + for k, t in rec.members: + if not rec.is_title(k): + names.append(k) + formats.append(as_dtype(t)) + offsets.append(rec.offset(k)) + titles.append(rec.fields[k].title) + + fields = { + 'names': names, + 'formats': formats, + 'offsets': offsets, + 'itemsize': rec.size, + 'titles': titles, + } + _check_struct_alignment(rec, fields) + return np.dtype(fields, align=rec.aligned) + + +def _check_struct_alignment(rec, fields): + """Check alignment compatibility with Numpy""" + if rec.aligned: + for k, dt in zip(fields['names'], fields['formats']): + llvm_align = rec.alignof(k) + npy_align = dt.alignment + if llvm_align is not None and npy_align != llvm_align: + msg = ( + 'NumPy is using a different alignment ({}) ' + 'than Numba/LLVM ({}) for {}. ' + 'This is likely a NumPy bug.' + ) + raise ValueError(msg.format(npy_align, llvm_align, dt)) + + +def map_arrayscalar_type(val): + if isinstance(val, np.generic): + # We can't blindly call np.dtype() as it loses information + # on some types, e.g. datetime64 and timedelta64. + dtype = val.dtype + else: + try: + dtype = np.dtype(type(val)) + except TypeError: + raise errors.NumbaNotImplementedError("no corresponding numpy " + "dtype for %r" % type(val)) + return from_dtype(dtype) + + +def is_array(val): + return isinstance(val, np.ndarray) + + +def map_layout(val): + if val.flags['C_CONTIGUOUS']: + layout = 'C' + elif val.flags['F_CONTIGUOUS']: + layout = 'F' + else: + layout = 'A' + return layout + + +def select_array_wrapper(inputs): + """ + Given the array-compatible input types to an operation (e.g. ufunc), + select the appropriate input for wrapping the operation output, + according to each input's __array_priority__. + + An index into *inputs* is returned. + """ + max_prio = float('-inf') + selected_index = None + for index, ty in enumerate(inputs): + # Ties are broken by choosing the first winner, as in Numpy + if (isinstance(ty, types.ArrayCompatible) and + ty.array_priority > max_prio): + selected_index = index + max_prio = ty.array_priority + + assert selected_index is not None + return selected_index + + +def resolve_output_type(context, inputs, formal_output): + """ + Given the array-compatible input types to an operation (e.g. ufunc), + and the operation's formal output type (a types.Array instance), + resolve the actual output type using the typing *context*. + + This uses a mechanism compatible with Numpy's __array_priority__ / + __array_wrap__. + """ + selected_input = inputs[select_array_wrapper(inputs)] + args = selected_input, formal_output + sig = context.resolve_function_type('__array_wrap__', args, {}) + if sig is None: + if selected_input.array_priority == types.Array.array_priority: + # If it's the same priority as a regular array, assume we + # should return the output unchanged. + # (we can't define __array_wrap__ explicitly for types.Buffer, + # as that would be inherited by most array-compatible objects) + return formal_output + raise errors.TypingError("__array_wrap__ failed for %s" % (args,)) + return sig.return_type + + +def supported_ufunc_loop(ufunc, loop): + """Return whether the *loop* for the *ufunc* is supported -in nopython-. + + *loop* should be a UFuncLoopSpec instance, and *ufunc* a numpy ufunc. + + For ufuncs implemented using the ufunc_db, it is supported if the ufunc_db + contains a lowering definition for 'loop' in the 'ufunc' entry. + + For other ufuncs, it is type based. The loop will be considered valid if it + only contains the following letter types: '?bBhHiIlLqQfd'. Note this is + legacy and when implementing new ufuncs the ufunc_db should be preferred, + as it allows for a more fine-grained incremental support. + """ + # NOTE: Assuming ufunc for the CPUContext + from numba.np import ufunc_db + loop_sig = loop.ufunc_sig + try: + # check if the loop has a codegen description in the + # ufunc_db. If so, we can proceed. + + # note that as of now not all ufuncs have an entry in the + # ufunc_db + supported_loop = loop_sig in ufunc_db.get_ufunc_info(ufunc) + except KeyError: + # for ufuncs not in ufunc_db, base the decision of whether the + # loop is supported on its types + loop_types = [x.char for x in loop.numpy_inputs + loop.numpy_outputs] + supported_types = '?bBhHiIlLqQfd' + # check if all the types involved in the ufunc loop are + # supported in this mode + supported_loop = all(t in supported_types for t in loop_types) + + return supported_loop + + +class UFuncLoopSpec(collections.namedtuple('_UFuncLoopSpec', + ('inputs', 'outputs', 'ufunc_sig'))): + """ + An object describing a ufunc loop's inner types. Properties: + - inputs: the inputs' Numba types + - outputs: the outputs' Numba types + - ufunc_sig: the string representing the ufunc's type signature, in + Numpy format (e.g. "ii->i") + """ + + __slots__ = () + + @property + def numpy_inputs(self): + return [as_dtype(x) for x in self.inputs] + + @property + def numpy_outputs(self): + return [as_dtype(x) for x in self.outputs] + + +def _ufunc_loop_sig(out_tys, in_tys): + if len(out_tys) == 1: + return signature(out_tys[0], *in_tys) + else: + return signature(types.Tuple(out_tys), *in_tys) + + +def ufunc_can_cast(from_, to, has_mixed_inputs, casting='safe'): + """ + A variant of np.can_cast() that can allow casting any integer to + any real or complex type, in case the operation has mixed-kind + inputs. + + For example we want `np.power(float32, int32)` to be computed using + SP arithmetic and return `float32`. + However, `np.sqrt(int32)` should use DP arithmetic and return `float64`. + """ + from_ = np.dtype(from_) + to = np.dtype(to) + if has_mixed_inputs and from_.kind in 'iu' and to.kind in 'cf': + # Decide that all integers can cast to any real or complex type. + return True + return np.can_cast(from_, to, casting) + + +def ufunc_find_matching_loop(ufunc, arg_types): + """Find the appropriate loop to be used for a ufunc based on the types + of the operands + + ufunc - The ufunc we want to check + arg_types - The tuple of arguments to the ufunc, including any + explicit output(s). + return value - A UFuncLoopSpec identifying the loop, or None + if no matching loop is found. + """ + + # Separate logical input from explicit output arguments + input_types = arg_types[:ufunc.nin] + output_types = arg_types[ufunc.nin:] + assert (len(input_types) == ufunc.nin) + + try: + np_input_types = [as_dtype(x) for x in input_types] + except errors.NumbaNotImplementedError: + return None + try: + np_output_types = [as_dtype(x) for x in output_types] + except errors.NumbaNotImplementedError: + return None + + # Whether the inputs are mixed integer / floating-point + has_mixed_inputs = ( + any(dt.kind in 'iu' for dt in np_input_types) and + any(dt.kind in 'cf' for dt in np_input_types)) + + def choose_types(numba_types, ufunc_letters): + """ + Return a list of Numba types representing *ufunc_letters*, + except when the letter designates a datetime64 or timedelta64, + in which case the type is taken from *numba_types*. + """ + assert len(ufunc_letters) >= len(numba_types) + types = [tp if letter in 'mM' else from_dtype(np.dtype(letter)) + for tp, letter in zip(numba_types, ufunc_letters)] + # Add missing types (presumably implicit outputs) + types += [from_dtype(np.dtype(letter)) + for letter in ufunc_letters[len(numba_types):]] + return types + + def set_output_dt_units(inputs, outputs, ufunc_inputs, ufunc_name): + """ + Sets the output unit of a datetime type based on the input units + + Timedelta is a special dtype that requires the time unit to be + specified (day, month, etc). Not every operation with timedelta inputs + leads to an output of timedelta output. However, for those that do, + the unit of output must be inferred based on the units of the inputs. + + At the moment this function takes care of two cases: + a) where all inputs are timedelta with the same unit (mm), and + therefore the output has the same unit. + This case is used for arr.sum, and for arr1+arr2 where all arrays + are timedeltas. + If in the future this needs to be extended to a case with mixed units, + the rules should be implemented in `npdatetime_helpers` and called + from this function to set the correct output unit. + b) where left operand is a timedelta, i.e. the "m?" case. This case + is used for division, eg timedelta / int. + + At the time of writing, Numba does not support addition of timedelta + and other types, so this function does not consider the case "?m", + i.e. where timedelta is the right operand to a non-timedelta left + operand. To extend it in the future, just add another elif clause. + """ + def make_specific(outputs, unit): + new_outputs = [] + for out in outputs: + if isinstance(out, types.NPTimedelta) and out.unit == "": + new_outputs.append(types.NPTimedelta(unit)) + else: + new_outputs.append(out) + return new_outputs + + def make_datetime_specific(outputs, dt_unit, td_unit): + new_outputs = [] + for out in outputs: + if isinstance(out, types.NPDatetime) and out.unit == "": + unit = npdatetime_helpers.combine_datetime_timedelta_units( + dt_unit, td_unit) + if unit is None: + raise TypingError(f"ufunc '{ufunc_name}' is not " + + "supported between " + + f"datetime64[{dt_unit}] " + + f"and timedelta64[{td_unit}]" + ) + new_outputs.append(types.NPDatetime(unit)) + else: + new_outputs.append(out) + return new_outputs + + if ufunc_inputs == 'mm': + if all(inp.unit == inputs[0].unit for inp in inputs): + # Case with operation on same units. Operations on different + # units not adjusted for now but might need to be + # added in the future + unit = inputs[0].unit + new_outputs = make_specific(outputs, unit) + else: + return outputs + return new_outputs + elif ufunc_inputs == 'mM': + # case where the left operand has timedelta type + # and the right operand has datetime + td_unit = inputs[0].unit + dt_unit = inputs[1].unit + return make_datetime_specific(outputs, dt_unit, td_unit) + + elif ufunc_inputs == 'Mm': + # case where the right operand has timedelta type + # and the left operand has datetime + dt_unit = inputs[0].unit + td_unit = inputs[1].unit + return make_datetime_specific(outputs, dt_unit, td_unit) + + elif ufunc_inputs[0] == 'm': + # case where the left operand has timedelta type + unit = inputs[0].unit + new_outputs = make_specific(outputs, unit) + return new_outputs + + # In NumPy, the loops are evaluated from first to last. The first one + # that is viable is the one used. One loop is viable if it is possible + # to cast every input operand to the one expected by the ufunc. + # Also under NumPy 1.10+ the output must be able to be cast back + # to a close enough type ("same_kind"). + + for candidate in ufunc.types: + ufunc_inputs = candidate[:ufunc.nin] + ufunc_outputs = candidate[-ufunc.nout:] if ufunc.nout else [] + + if 'e' in ufunc_inputs: + # Skip float16 arrays since we don't have implementation for them + continue + if 'O' in ufunc_inputs: + # Skip object arrays + continue + found = True + # Skip if any input or output argument is mismatching + for outer, inner in zip(np_input_types, ufunc_inputs): + # (outer is a dtype instance, inner is a type char) + if outer.char in 'mM' or inner in 'mM': + # For datetime64 and timedelta64, we want to retain + # precise typing (i.e. the units); therefore we look for + # an exact match. + if outer.char != inner: + found = False + break + elif not ufunc_can_cast(outer.char, inner, + has_mixed_inputs, 'safe'): + found = False + break + if found: + # Can we cast the inner result to the outer result type? + for outer, inner in zip(np_output_types, ufunc_outputs): + if (outer.char not in 'mM' and not + ufunc_can_cast(inner, outer.char, + has_mixed_inputs, 'same_kind')): + found = False + break + if found: + # Found: determine the Numba types for the loop's inputs and + # outputs. + try: + inputs = choose_types(input_types, ufunc_inputs) + outputs = choose_types(output_types, ufunc_outputs) + # if the left operand or both are timedeltas, or the first + # argument is datetime and the second argument is timedelta, + # then the output units need to be determined. + if ufunc_inputs[0] == 'm' or ufunc_inputs == 'Mm': + outputs = set_output_dt_units(inputs, outputs, + ufunc_inputs, ufunc.__name__) + + except errors.NumbaNotImplementedError: + # One of the selected dtypes isn't supported by Numba + # (e.g. float16), try other candidates + continue + else: + return UFuncLoopSpec(inputs, outputs, candidate) + + return None + + +def _is_aligned_struct(struct): + return struct.isalignedstruct + + +def from_struct_dtype(dtype): + """Convert a NumPy structured dtype to Numba Record type + """ + if dtype.hasobject: + msg = "dtypes that contain object are not supported." + raise errors.NumbaNotImplementedError(msg) + + fields = [] + for name, info in dtype.fields.items(): + # *info* may have 3 element + [elemdtype, offset] = info[:2] + title = info[2] if len(info) == 3 else None + + ty = from_dtype(elemdtype) + infos = { + 'type': ty, + 'offset': offset, + 'title': title, + } + fields.append((name, infos)) + + # Note: dtype.alignment is not consistent. + # It is different after passing into a recarray. + # recarray(N, dtype=mydtype).dtype.alignment != mydtype.alignment + size = dtype.itemsize + aligned = _is_aligned_struct(dtype) + + return types.Record(fields, size, aligned) + + +def _get_bytes_buffer(ptr, nbytes): + """ + Get a ctypes array of *nbytes* starting at *ptr*. + """ + if isinstance(ptr, ctypes.c_void_p): + ptr = ptr.value + arrty = ctypes.c_byte * nbytes + return arrty.from_address(ptr) + + +def _get_array_from_ptr(ptr, nbytes, dtype): + return np.frombuffer(_get_bytes_buffer(ptr, nbytes), dtype) + + +def carray(ptr, shape, dtype=None): + """ + Return a Numpy array view over the data pointed to by *ptr* with the + given *shape*, in C order. If *dtype* is given, it is used as the + array's dtype, otherwise the array's dtype is inferred from *ptr*'s type. + """ + from numba.core.typing.ctypes_utils import from_ctypes + + try: + # Use ctypes parameter protocol if available + ptr = ptr._as_parameter_ + except AttributeError: + pass + + # Normalize dtype, to accept e.g. "int64" or np.int64 + if dtype is not None: + dtype = np.dtype(dtype) + + if isinstance(ptr, ctypes.c_void_p): + if dtype is None: + raise TypeError("explicit dtype required for void* argument") + p = ptr + elif isinstance(ptr, ctypes._Pointer): + ptrty = from_ctypes(ptr.__class__) + assert isinstance(ptrty, types.CPointer) + ptr_dtype = as_dtype(ptrty.dtype) + if dtype is not None and dtype != ptr_dtype: + raise TypeError("mismatching dtype '%s' for pointer %s" + % (dtype, ptr)) + dtype = ptr_dtype + p = ctypes.cast(ptr, ctypes.c_void_p) + else: + raise TypeError("expected a ctypes pointer, got %r" % (ptr,)) + + nbytes = dtype.itemsize * np.prod(shape, dtype=np.intp) + return _get_array_from_ptr(p, nbytes, dtype).reshape(shape) + + +def farray(ptr, shape, dtype=None): + """ + Return a Numpy array view over the data pointed to by *ptr* with the + given *shape*, in Fortran order. If *dtype* is given, it is used as the + array's dtype, otherwise the array's dtype is inferred from *ptr*'s type. + """ + if not isinstance(shape, int): + shape = shape[::-1] + return carray(ptr, shape, dtype).T + + +def is_contiguous(dims, strides, itemsize): + """Is the given shape, strides, and itemsize of C layout? + + Note: The code is usable as a numba-compiled function + """ + nd = len(dims) + # Check and skip 1s or 0s in inner dims + innerax = nd - 1 + while innerax > -1 and dims[innerax] <= 1: + innerax -= 1 + + # Early exit if all axis are 1s or 0s + if innerax < 0: + return True + + # Check itemsize matches innermost stride + if itemsize != strides[innerax]: + return False + + # Check and skip 1s or 0s in outer dims + outerax = 0 + while outerax < innerax and dims[outerax] <= 1: + outerax += 1 + + # Check remaining strides to be contiguous + ax = innerax + while ax > outerax: + if strides[ax] * dims[ax] != strides[ax - 1]: + return False + ax -= 1 + return True + + +def is_fortran(dims, strides, itemsize): + """Is the given shape, strides, and itemsize of F layout? + + Note: The code is usable as a numba-compiled function + """ + nd = len(dims) + # Check and skip 1s or 0s in inner dims + firstax = 0 + while firstax < nd and dims[firstax] <= 1: + firstax += 1 + + # Early exit if all axis are 1s or 0s + if firstax >= nd: + return True + + # Check itemsize matches innermost stride + if itemsize != strides[firstax]: + return False + + # Check and skip 1s or 0s in outer dims + lastax = nd - 1 + while lastax > firstax and dims[lastax] <= 1: + lastax -= 1 + + # Check remaining strides to be contiguous + ax = firstax + while ax < lastax: + if strides[ax] * dims[ax] != strides[ax + 1]: + return False + ax += 1 + return True + + +def type_can_asarray(arr): + """ Returns True if the type of 'arr' is supported by the Numba `np.asarray` + implementation, False otherwise. + """ + + ok = (types.Array, types.Sequence, types.Tuple, types.StringLiteral, + types.Number, types.Boolean, types.containers.ListType) + + return isinstance(arr, ok) + + +def type_is_scalar(typ): + """ Returns True if the type of 'typ' is a scalar type, according to + NumPy rules. False otherwise. + https://numpy.org/doc/stable/reference/arrays.scalars.html#built-in-scalar-types + """ + + ok = (types.Boolean, types.Number, types.UnicodeType, types.StringLiteral, + types.NPTimedelta, types.NPDatetime) + return isinstance(typ, ok) + + +def check_is_integer(v, name): + """Raises TypingError if the value is not an integer.""" + if not isinstance(v, (int, types.Integer)): + raise TypingError('{} must be an integer'.format(name)) + + +def lt_floats(a, b): + # Adapted from NumPy commit 717c7acf which introduced the behavior of + # putting NaNs at the end. + # The code is later moved to numpy/core/src/npysort/npysort_common.h + # This info is gathered as of NumPy commit d8c09c50 + return a < b or (np.isnan(b) and not np.isnan(a)) + + +def lt_complex(a, b): + if np.isnan(a.real): + if np.isnan(b.real): + if np.isnan(a.imag): + return False + else: + if np.isnan(b.imag): + return True + else: + return a.imag < b.imag + else: + return False + + else: + if np.isnan(b.real): + return True + else: + if np.isnan(a.imag): + if np.isnan(b.imag): + return a.real < b.real + else: + return False + else: + if np.isnan(b.imag): + return True + else: + if a.real < b.real: + return True + elif a.real == b.real: + return a.imag < b.imag + return False diff --git a/venv/lib/python3.10/site-packages/numba/np/old_arraymath.py b/venv/lib/python3.10/site-packages/numba/np/old_arraymath.py new file mode 100644 index 0000000000000000000000000000000000000000..5f75519ecb1e4a956102401b97df4fb6f9bbb72a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/old_arraymath.py @@ -0,0 +1,4985 @@ +""" +Implementation of math operations on Array objects. +""" + + +import math +from collections import namedtuple +import operator + +import llvmlite.ir +import numpy as np + +from numba.core import types, cgutils +from numba.core.extending import overload, overload_method, register_jitable +from numba.np.numpy_support import (as_dtype, type_can_asarray, type_is_scalar, + numpy_version, is_nonelike, + check_is_integer, lt_floats, lt_complex) +from numba.core.imputils import (lower_builtin, impl_ret_borrowed, + impl_ret_new_ref, impl_ret_untracked) +from numba.np.arrayobj import (make_array, load_item, store_item, + _empty_nd_impl) +from numba.np.linalg import ensure_blas + +from numba.core.extending import intrinsic +from numba.core.errors import (RequireLiteralValue, TypingError, + NumbaValueError, NumbaNotImplementedError, + NumbaTypeError) +from numba.cpython.unsafe.tuple import tuple_setitem + + +def _check_blas(): + # Checks if a BLAS is available so e.g. dot will work + try: + ensure_blas() + except ImportError: + return False + return True + + +_HAVE_BLAS = _check_blas() + + +@intrinsic +def _create_tuple_result_shape(tyctx, shape_list, shape_tuple): + """ + This routine converts shape list where the axis dimension has already + been popped to a tuple for indexing of the same size. The original shape + tuple is also required because it contains a length field at compile time + whereas the shape list does not. + """ + + # The new tuple's size is one less than the original tuple since axis + # dimension removed. + nd = len(shape_tuple) - 1 + # The return type of this intrinsic is an int tuple of length nd. + tupty = types.UniTuple(types.intp, nd) + # The function signature for this intrinsic. + function_sig = tupty(shape_list, shape_tuple) + + def codegen(cgctx, builder, signature, args): + lltupty = cgctx.get_value_type(tupty) + # Create an empty int tuple. + tup = cgutils.get_null_value(lltupty) + + # Get the shape list from the args and we don't need shape tuple. + [in_shape, _] = args + + def array_indexer(a, i): + return a[i] + + # loop to fill the tuple + for i in range(nd): + dataidx = cgctx.get_constant(types.intp, i) + # compile and call array_indexer + data = cgctx.compile_internal(builder, array_indexer, + types.intp(shape_list, types.intp), + [in_shape, dataidx]) + tup = builder.insert_value(tup, data, i) + return tup + + return function_sig, codegen + + +@intrinsic +def _gen_index_tuple(tyctx, shape_tuple, value, axis): + """ + Generates a tuple that can be used to index a specific slice from an + array for sum with axis. shape_tuple is the size of the dimensions of + the input array. 'value' is the value to put in the indexing tuple + in the axis dimension and 'axis' is that dimension. For this to work, + axis has to be a const. + """ + if not isinstance(axis, types.Literal): + raise RequireLiteralValue('axis argument must be a constant') + # Get the value of the axis constant. + axis_value = axis.literal_value + # The length of the indexing tuple to be output. + nd = len(shape_tuple) + + # If the axis value is impossible for the given size array then + # just fake it like it was for axis 0. This will stop compile errors + # when it looks like it could be called from array_sum_axis but really + # can't because that routine checks the axis mismatch and raise an + # exception. + if axis_value >= nd: + axis_value = 0 + + # Calculate the type of the indexing tuple. All the non-axis + # dimensions have slice2 type and the axis dimension has int type. + before = axis_value + after = nd - before - 1 + + types_list = [] + types_list += [types.slice2_type] * before + types_list += [types.intp] + types_list += [types.slice2_type] * after + + # Creates the output type of the function. + tupty = types.Tuple(types_list) + # Defines the signature of the intrinsic. + function_sig = tupty(shape_tuple, value, axis) + + def codegen(cgctx, builder, signature, args): + lltupty = cgctx.get_value_type(tupty) + # Create an empty indexing tuple. + tup = cgutils.get_null_value(lltupty) + + # We only need value of the axis dimension here. + # The rest are constants defined above. + [_, value_arg, _] = args + + def create_full_slice(): + return slice(None, None) + + # loop to fill the tuple with slice(None,None) before + # the axis dimension. + + # compile and call create_full_slice + slice_data = cgctx.compile_internal(builder, create_full_slice, + types.slice2_type(), + []) + for i in range(0, axis_value): + tup = builder.insert_value(tup, slice_data, i) + + # Add the axis dimension 'value'. + tup = builder.insert_value(tup, value_arg, axis_value) + + # loop to fill the tuple with slice(None,None) after + # the axis dimension. + for i in range(axis_value + 1, nd): + tup = builder.insert_value(tup, slice_data, i) + return tup + + return function_sig, codegen + + +#---------------------------------------------------------------------------- +# Basic stats and aggregates + +@lower_builtin(np.sum, types.Array) +@lower_builtin("array.sum", types.Array) +def array_sum(context, builder, sig, args): + zero = sig.return_type(0) + + def array_sum_impl(arr): + c = zero + for v in np.nditer(arr): + c += v.item() + return c + + res = context.compile_internal(builder, array_sum_impl, sig, args, + locals=dict(c=sig.return_type)) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@register_jitable +def _array_sum_axis_nop(arr, v): + return arr + + +def gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero): + def inner(arr, axis): + """ + function that performs sums over one specific axis + + The third parameter to gen_index_tuple that generates the indexing + tuples has to be a const so we can't just pass "axis" through since + that isn't const. We can check for specific values and have + different instances that do take consts. Supporting axis summation + only up to the fourth dimension for now. + + typing/arraydecl.py:sum_expand defines the return type for sum with + axis. It is one dimension less than the input array. + """ + ndim = arr.ndim + + if not is_axis_const: + # Catch where axis is negative or greater than 3. + if axis < 0 or axis > 3: + raise ValueError("Numba does not support sum with axis " + "parameter outside the range 0 to 3.") + + # Catch the case where the user misspecifies the axis to be + # more than the number of the array's dimensions. + if axis >= ndim: + raise ValueError("axis is out of bounds for array") + + # Convert the shape of the input array to a list. + ashape = list(arr.shape) + # Get the length of the axis dimension. + axis_len = ashape[axis] + # Remove the axis dimension from the list of dimensional lengths. + ashape.pop(axis) + # Convert this shape list back to a tuple using above intrinsic. + ashape_without_axis = _create_tuple_result_shape(ashape, arr.shape) + # Tuple needed here to create output array with correct size. + result = np.full(ashape_without_axis, zero, type(zero)) + + # Iterate through the axis dimension. + for axis_index in range(axis_len): + if is_axis_const: + # constant specialized version works for any valid axis value + index_tuple_generic = _gen_index_tuple(arr.shape, axis_index, + const_axis_val) + result += arr[index_tuple_generic] + else: + # Generate a tuple used to index the input array. + # The tuple is ":" in all dimensions except the axis + # dimension where it is "axis_index". + if axis == 0: + index_tuple1 = _gen_index_tuple(arr.shape, axis_index, 0) + result += arr[index_tuple1] + elif axis == 1: + index_tuple2 = _gen_index_tuple(arr.shape, axis_index, 1) + result += arr[index_tuple2] + elif axis == 2: + index_tuple3 = _gen_index_tuple(arr.shape, axis_index, 2) + result += arr[index_tuple3] + elif axis == 3: + index_tuple4 = _gen_index_tuple(arr.shape, axis_index, 3) + result += arr[index_tuple4] + return op(result, 0) + return inner + + +@lower_builtin(np.sum, types.Array, types.intp, types.DTypeSpec) +@lower_builtin(np.sum, types.Array, types.IntegerLiteral, types.DTypeSpec) +@lower_builtin("array.sum", types.Array, types.intp, types.DTypeSpec) +@lower_builtin("array.sum", types.Array, types.IntegerLiteral, types.DTypeSpec) +def array_sum_axis_dtype(context, builder, sig, args): + retty = sig.return_type + zero = getattr(retty, 'dtype', retty)(0) + # if the return is scalar in type then "take" the 0th element of the + # 0d array accumulator as the return value + if getattr(retty, 'ndim', None) is None: + op = np.take + else: + op = _array_sum_axis_nop + [ty_array, ty_axis, ty_dtype] = sig.args + is_axis_const = False + const_axis_val = 0 + if isinstance(ty_axis, types.Literal): + # this special-cases for constant axis + const_axis_val = ty_axis.literal_value + # fix negative axis + if const_axis_val < 0: + const_axis_val = ty_array.ndim + const_axis_val + if const_axis_val < 0 or const_axis_val > ty_array.ndim: + raise ValueError("'axis' entry is out of bounds") + + ty_axis = context.typing_context.resolve_value_type(const_axis_val) + axis_val = context.get_constant(ty_axis, const_axis_val) + # rewrite arguments + args = args[0], axis_val, args[2] + # rewrite sig + sig = sig.replace(args=[ty_array, ty_axis, ty_dtype]) + is_axis_const = True + + gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero) + compiled = register_jitable(gen_impl) + + def array_sum_impl_axis(arr, axis, dtype): + return compiled(arr, axis) + + res = context.compile_internal(builder, array_sum_impl_axis, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +@lower_builtin(np.sum, types.Array, types.DTypeSpec) +@lower_builtin("array.sum", types.Array, types.DTypeSpec) +def array_sum_dtype(context, builder, sig, args): + zero = sig.return_type(0) + + def array_sum_impl(arr, dtype): + c = zero + for v in np.nditer(arr): + c += v.item() + return c + + res = context.compile_internal(builder, array_sum_impl, sig, args, + locals=dict(c=sig.return_type)) + return impl_ret_borrowed(context, builder, sig.return_type, res) + + +@lower_builtin(np.sum, types.Array, types.intp) +@lower_builtin(np.sum, types.Array, types.IntegerLiteral) +@lower_builtin("array.sum", types.Array, types.intp) +@lower_builtin("array.sum", types.Array, types.IntegerLiteral) +def array_sum_axis(context, builder, sig, args): + retty = sig.return_type + zero = getattr(retty, 'dtype', retty)(0) + # if the return is scalar in type then "take" the 0th element of the + # 0d array accumulator as the return value + if getattr(retty, 'ndim', None) is None: + op = np.take + else: + op = _array_sum_axis_nop + [ty_array, ty_axis] = sig.args + is_axis_const = False + const_axis_val = 0 + if isinstance(ty_axis, types.Literal): + # this special-cases for constant axis + const_axis_val = ty_axis.literal_value + # fix negative axis + if const_axis_val < 0: + const_axis_val = ty_array.ndim + const_axis_val + if const_axis_val < 0 or const_axis_val > ty_array.ndim: + msg = f"'axis' entry ({const_axis_val}) is out of bounds" + raise NumbaValueError(msg) + + ty_axis = context.typing_context.resolve_value_type(const_axis_val) + axis_val = context.get_constant(ty_axis, const_axis_val) + # rewrite arguments + args = args[0], axis_val + # rewrite sig + sig = sig.replace(args=[ty_array, ty_axis]) + is_axis_const = True + + gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero) + compiled = register_jitable(gen_impl) + + def array_sum_impl_axis(arr, axis): + return compiled(arr, axis) + + res = context.compile_internal(builder, array_sum_impl_axis, sig, args) + return impl_ret_new_ref(context, builder, sig.return_type, res) + + +def get_accumulator(dtype, value): + if dtype.type == np.timedelta64: + acc_init = np.int64(value).view(dtype) + else: + acc_init = dtype.type(value) + return acc_init + + +@overload(np.prod) +@overload_method(types.Array, "prod") +def array_prod(a): + if isinstance(a, types.Array): + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 1) + + def array_prod_impl(a): + c = acc_init + for v in np.nditer(a): + c *= v.item() + return c + + return array_prod_impl + + +@overload(np.cumsum) +@overload_method(types.Array, "cumsum") +def array_cumsum(a): + if isinstance(a, types.Array): + is_integer = a.dtype in types.signed_domain + is_bool = a.dtype == types.bool_ + if (is_integer and a.dtype.bitwidth < types.intp.bitwidth)\ + or is_bool: + dtype = as_dtype(types.intp) + else: + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 0) + + def array_cumsum_impl(a): + out = np.empty(a.size, dtype) + c = acc_init + for idx, v in enumerate(a.flat): + c += v + out[idx] = c + return out + + return array_cumsum_impl + + +@overload(np.cumprod) +@overload_method(types.Array, "cumprod") +def array_cumprod(a): + if isinstance(a, types.Array): + is_integer = a.dtype in types.signed_domain + is_bool = a.dtype == types.bool_ + if (is_integer and a.dtype.bitwidth < types.intp.bitwidth)\ + or is_bool: + dtype = as_dtype(types.intp) + else: + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 1) + + def array_cumprod_impl(a): + out = np.empty(a.size, dtype) + c = acc_init + for idx, v in enumerate(a.flat): + c *= v + out[idx] = c + return out + + return array_cumprod_impl + + +@overload(np.mean) +@overload_method(types.Array, "mean") +def array_mean(a): + if isinstance(a, types.Array): + is_number = a.dtype in types.integer_domain | frozenset([types.bool_]) + if is_number: + dtype = as_dtype(types.float64) + else: + dtype = as_dtype(a.dtype) + + acc_init = get_accumulator(dtype, 0) + + def array_mean_impl(a): + # Can't use the naive `arr.sum() / arr.size`, as it would return + # a wrong result on integer sum overflow. + c = acc_init + for v in np.nditer(a): + c += v.item() + return c / a.size + + return array_mean_impl + + +@overload(np.var) +@overload_method(types.Array, "var") +def array_var(a): + if isinstance(a, types.Array): + def array_var_impl(a): + # Compute the mean + m = a.mean() + + # Compute the sum of square diffs + ssd = 0 + for v in np.nditer(a): + val = (v.item() - m) + ssd += np.real(val * np.conj(val)) + return ssd / a.size + + return array_var_impl + + +@overload(np.std) +@overload_method(types.Array, "std") +def array_std(a): + if isinstance(a, types.Array): + def array_std_impl(a): + return a.var() ** 0.5 + + return array_std_impl + + +@register_jitable +def min_comparator(a, min_val): + return a < min_val + + +@register_jitable +def max_comparator(a, min_val): + return a > min_val + + +@register_jitable +def return_false(a): + return False + + +@overload(np.min) +@overload(np.amin) +@overload_method(types.Array, "min") +def npy_min(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + pre_return_func = np.isnat + comparator = min_comparator + elif isinstance(a.dtype, types.Complex): + pre_return_func = return_false + + def comp_func(a, min_val): + if a.real < min_val.real: + return True + elif a.real == min_val.real: + if a.imag < min_val.imag: + return True + return False + + comparator = register_jitable(comp_func) + elif isinstance(a.dtype, types.Float): + pre_return_func = np.isnan + comparator = min_comparator + else: + pre_return_func = return_false + comparator = min_comparator + + def impl_min(a): + if a.size == 0: + raise ValueError("zero-size array to reduction operation " + "minimum which has no identity") + + it = np.nditer(a) + min_value = next(it).take(0) + if pre_return_func(min_value): + return min_value + + for view in it: + v = view.item() + if pre_return_func(v): + return v + if comparator(v, min_value): + min_value = v + return min_value + + return impl_min + + +@overload(np.max) +@overload(np.amax) +@overload_method(types.Array, "max") +def npy_max(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + pre_return_func = np.isnat + comparator = max_comparator + elif isinstance(a.dtype, types.Complex): + pre_return_func = return_false + + def comp_func(a, max_val): + if a.real > max_val.real: + return True + elif a.real == max_val.real: + if a.imag > max_val.imag: + return True + return False + + comparator = register_jitable(comp_func) + elif isinstance(a.dtype, types.Float): + pre_return_func = np.isnan + comparator = max_comparator + else: + pre_return_func = return_false + comparator = max_comparator + + def impl_max(a): + if a.size == 0: + raise ValueError("zero-size array to reduction operation " + "maximum which has no identity") + + it = np.nditer(a) + max_value = next(it).take(0) + if pre_return_func(max_value): + return max_value + + for view in it: + v = view.item() + if pre_return_func(v): + return v + if comparator(v, max_value): + max_value = v + return max_value + + return impl_max + + +@register_jitable +def array_argmin_impl_datetime(arry): + if arry.size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + it = np.nditer(arry) + min_value = next(it).take(0) + min_idx = 0 + if np.isnat(min_value): + return min_idx + + idx = 1 + for view in it: + v = view.item() + if np.isnat(v): + return idx + if v < min_value: + min_value = v + min_idx = idx + idx += 1 + return min_idx + + +@register_jitable +def array_argmin_impl_float(arry): + if arry.size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + for v in arry.flat: + min_value = v + min_idx = 0 + break + if np.isnan(min_value): + return min_idx + + idx = 0 + for v in arry.flat: + if np.isnan(v): + return idx + if v < min_value: + min_value = v + min_idx = idx + idx += 1 + return min_idx + + +@register_jitable +def array_argmin_impl_generic(arry): + if arry.size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + for v in arry.flat: + min_value = v + min_idx = 0 + break + else: + raise RuntimeError('unreachable') + + idx = 0 + for v in arry.flat: + if v < min_value: + min_value = v + min_idx = idx + idx += 1 + return min_idx + + +@overload(np.argmin) +@overload_method(types.Array, "argmin") +def array_argmin(a, axis=None): + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + flatten_impl = array_argmin_impl_datetime + elif isinstance(a.dtype, types.Float): + flatten_impl = array_argmin_impl_float + else: + flatten_impl = array_argmin_impl_generic + + if is_nonelike(axis): + def array_argmin_impl(a, axis=None): + return flatten_impl(a) + else: + array_argmin_impl = build_argmax_or_argmin_with_axis_impl( + a, axis, flatten_impl + ) + return array_argmin_impl + + +@register_jitable +def array_argmax_impl_datetime(arry): + if arry.size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + it = np.nditer(arry) + max_value = next(it).take(0) + max_idx = 0 + if np.isnat(max_value): + return max_idx + + idx = 1 + for view in it: + v = view.item() + if np.isnat(v): + return idx + if v > max_value: + max_value = v + max_idx = idx + idx += 1 + return max_idx + + +@register_jitable +def array_argmax_impl_float(arry): + if arry.size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + for v in arry.flat: + max_value = v + max_idx = 0 + break + if np.isnan(max_value): + return max_idx + + idx = 0 + for v in arry.flat: + if np.isnan(v): + return idx + if v > max_value: + max_value = v + max_idx = idx + idx += 1 + return max_idx + + +@register_jitable +def array_argmax_impl_generic(arry): + if arry.size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + for v in arry.flat: + max_value = v + max_idx = 0 + break + + idx = 0 + for v in arry.flat: + if v > max_value: + max_value = v + max_idx = idx + idx += 1 + return max_idx + + +def build_argmax_or_argmin_with_axis_impl(a, axis, flatten_impl): + """ + Given a function that implements the logic for handling a flattened + array, return the implementation function. + """ + check_is_integer(axis, "axis") + retty = types.intp + + tuple_buffer = tuple(range(a.ndim)) + + def impl(a, axis=None): + if axis < 0: + axis = a.ndim + axis + + if axis < 0 or axis >= a.ndim: + raise ValueError("axis is out of bounds") + + # Short circuit 1-dimensional arrays: + if a.ndim == 1: + return flatten_impl(a) + + # Make chosen axis the last axis: + tmp = tuple_buffer + for i in range(axis, a.ndim - 1): + tmp = tuple_setitem(tmp, i, i + 1) + transpose_index = tuple_setitem(tmp, a.ndim - 1, axis) + transposed_arr = a.transpose(transpose_index) + + # Flatten along that axis; since we've transposed, we can just get + # batches off the overall flattened array. + m = transposed_arr.shape[-1] + raveled = transposed_arr.ravel() + assert raveled.size == a.size + assert transposed_arr.size % m == 0 + out = np.empty(transposed_arr.size // m, retty) + for i in range(out.size): + out[i] = flatten_impl(raveled[i * m:(i + 1) * m]) + + # Reshape based on axis we didn't flatten over: + return out.reshape(transposed_arr.shape[:-1]) + + return impl + + +@overload(np.argmax) +@overload_method(types.Array, "argmax") +def array_argmax(a, axis=None): + if isinstance(a.dtype, (types.NPDatetime, types.NPTimedelta)): + flatten_impl = array_argmax_impl_datetime + elif isinstance(a.dtype, types.Float): + flatten_impl = array_argmax_impl_float + else: + flatten_impl = array_argmax_impl_generic + + if is_nonelike(axis): + def array_argmax_impl(a, axis=None): + return flatten_impl(a) + else: + array_argmax_impl = build_argmax_or_argmin_with_axis_impl( + a, axis, flatten_impl + ) + return array_argmax_impl + + +@overload(np.all) +@overload_method(types.Array, "all") +def np_all(a): + def flat_all(a): + for v in np.nditer(a): + if not v.item(): + return False + return True + + return flat_all + + +@register_jitable +def _allclose_scalars(a_v, b_v, rtol=1e-05, atol=1e-08, equal_nan=False): + a_v_isnan = np.isnan(a_v) + b_v_isnan = np.isnan(b_v) + + # only one of the values is NaN and the + # other is not. + if ( (not a_v_isnan and b_v_isnan) or + (a_v_isnan and not b_v_isnan) ): + return False + + # either both of the values are NaN + # or both are numbers + if a_v_isnan and b_v_isnan: + if not equal_nan: + return False + else: + if np.isinf(a_v) or np.isinf(b_v): + return a_v == b_v + + if np.abs(a_v - b_v) > atol + rtol * np.abs(b_v * 1.0): + return False + + return True + + +@overload(np.allclose) +@overload_method(types.Array, "allclose") +def np_allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + + if not type_can_asarray(a): + raise TypingError('The first argument "a" must be array-like') + + if not type_can_asarray(b): + raise TypingError('The second argument "b" must be array-like') + + if not isinstance(rtol, (float, types.Float)): + raise TypingError('The third argument "rtol" must be a ' + 'floating point') + + if not isinstance(atol, (float, types.Float)): + raise TypingError('The fourth argument "atol" must be a ' + 'floating point') + + if not isinstance(equal_nan, (bool, types.Boolean)): + raise TypingError('The fifth argument "equal_nan" must be a ' + 'boolean') + + is_a_scalar = isinstance(a, types.Number) + is_b_scalar = isinstance(b, types.Number) + + if is_a_scalar and is_b_scalar: + def np_allclose_impl_scalar_scalar(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + return _allclose_scalars(a, b, rtol=rtol, atol=atol, + equal_nan=equal_nan) + return np_allclose_impl_scalar_scalar + elif is_a_scalar and not is_b_scalar: + def np_allclose_impl_scalar_array(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + b = np.asarray(b) + for bv in np.nditer(b): + if not _allclose_scalars(a, bv.item(), rtol=rtol, atol=atol, + equal_nan=equal_nan): + return False + return True + return np_allclose_impl_scalar_array + elif not is_a_scalar and is_b_scalar: + def np_allclose_impl_array_scalar(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + a = np.asarray(a) + for av in np.nditer(a): + if not _allclose_scalars(av.item(), b, rtol=rtol, atol=atol, + equal_nan=equal_nan): + return False + return True + return np_allclose_impl_array_scalar + elif not is_a_scalar and not is_b_scalar: + def np_allclose_impl_array_array(a, b, rtol=1e-05, atol=1e-08, + equal_nan=False): + a = np.asarray(a) + b = np.asarray(b) + a_a, b_b = np.broadcast_arrays(a, b) + + for av, bv in np.nditer((a_a, b_b)): + if not _allclose_scalars(av.item(), bv.item(), rtol=rtol, + atol=atol, equal_nan=equal_nan): + return False + + return True + + return np_allclose_impl_array_array + + +@overload(np.any) +@overload_method(types.Array, "any") +def np_any(a): + def flat_any(a): + for v in np.nditer(a): + if v.item(): + return True + return False + + return flat_any + + +@overload(np.average) +def np_average(a, axis=None, weights=None): + + if weights is None or isinstance(weights, types.NoneType): + def np_average_impl(a, axis=None, weights=None): + arr = np.asarray(a) + return np.mean(arr) + else: + if axis is None or isinstance(axis, types.NoneType): + def np_average_impl(a, axis=None, weights=None): + arr = np.asarray(a) + weights = np.asarray(weights) + + if arr.shape != weights.shape: + if axis is None: + raise TypeError( + "Numba does not support average when shapes of " + "a and weights differ.") + if weights.ndim != 1: + raise TypeError( + "1D weights expected when shapes of " + "a and weights differ.") + + scl = np.sum(weights) + if scl == 0.0: + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized.") + + avg = np.sum(np.multiply(arr, weights)) / scl + return avg + else: + def np_average_impl(a, axis=None, weights=None): + raise TypeError("Numba does not support average with axis.") + + return np_average_impl + + +def get_isnan(dtype): + """ + A generic isnan() function + """ + if isinstance(dtype, (types.Float, types.Complex)): + return np.isnan + else: + @register_jitable + def _trivial_isnan(x): + return False + return _trivial_isnan + + +@overload(np.iscomplex) +def np_iscomplex(x): + if type_can_asarray(x): + # NumPy uses asanyarray here! + return lambda x: np.asarray(x).imag != 0 + return None + + +@overload(np.isreal) +def np_isreal(x): + if type_can_asarray(x): + # NumPy uses asanyarray here! + return lambda x: np.asarray(x).imag == 0 + return None + + +@overload(np.iscomplexobj) +def iscomplexobj(x): + # Implementation based on NumPy + # https://github.com/numpy/numpy/blob/d9b1e32cb8ef90d6b4a47853241db2a28146a57d/numpy/lib/type_check.py#L282-L320 + dt = determine_dtype(x) + if isinstance(x, types.Optional): + dt = determine_dtype(x.type) + iscmplx = np.issubdtype(dt, np.complexfloating) + + if isinstance(x, types.Optional): + def impl(x): + if x is None: + return False + return iscmplx + else: + def impl(x): + return iscmplx + return impl + + +@overload(np.isrealobj) +def isrealobj(x): + # Return True if x is not a complex type. + # Implementation based on NumPy + # https://github.com/numpy/numpy/blob/ccfbcc1cd9a4035a467f2e982a565ab27de25b6b/numpy/lib/type_check.py#L290-L322 + def impl(x): + return not np.iscomplexobj(x) + return impl + + +@overload(np.isscalar) +def np_isscalar(element): + res = type_is_scalar(element) + + def impl(element): + return res + return impl + + +def is_np_inf_impl(x, out, fn): + + # if/else branch should be unified after PR #5606 is merged + if is_nonelike(out): + def impl(x, out=None): + return np.logical_and(np.isinf(x), fn(np.signbit(x))) + else: + def impl(x, out=None): + return np.logical_and(np.isinf(x), fn(np.signbit(x)), out) + + return impl + + +@overload(np.isneginf) +def isneginf(x, out=None): + fn = register_jitable(lambda x: x) + return is_np_inf_impl(x, out, fn) + + +@overload(np.isposinf) +def isposinf(x, out=None): + fn = register_jitable(lambda x: ~x) + return is_np_inf_impl(x, out, fn) + + +@register_jitable +def less_than(a, b): + return a < b + + +@register_jitable +def greater_than(a, b): + return a > b + + +@register_jitable +def check_array(a): + if a.size == 0: + raise ValueError('zero-size array to reduction operation not possible') + + +def nan_min_max_factory(comparison_op, is_complex_dtype): + if is_complex_dtype: + def impl(a): + arr = np.asarray(a) + check_array(arr) + it = np.nditer(arr) + return_val = next(it).take(0) + for view in it: + v = view.item() + if np.isnan(return_val.real) and not np.isnan(v.real): + return_val = v + else: + if comparison_op(v.real, return_val.real): + return_val = v + elif v.real == return_val.real: + if comparison_op(v.imag, return_val.imag): + return_val = v + return return_val + else: + def impl(a): + arr = np.asarray(a) + check_array(arr) + it = np.nditer(arr) + return_val = next(it).take(0) + for view in it: + v = view.item() + if not np.isnan(v): + if not comparison_op(return_val, v): + return_val = v + return return_val + + return impl + + +real_nanmin = register_jitable( + nan_min_max_factory(less_than, is_complex_dtype=False) +) +real_nanmax = register_jitable( + nan_min_max_factory(greater_than, is_complex_dtype=False) +) +complex_nanmin = register_jitable( + nan_min_max_factory(less_than, is_complex_dtype=True) +) +complex_nanmax = register_jitable( + nan_min_max_factory(greater_than, is_complex_dtype=True) +) + + +@register_jitable +def _isclose_item(x, y, rtol, atol, equal_nan): + if np.isnan(x) and np.isnan(y): + return equal_nan + elif np.isinf(x) and np.isinf(y): + return (x > 0) == (y > 0) + elif np.isinf(x) or np.isinf(y): + return False + else: + return abs(x - y) <= atol + rtol * abs(y) + + +@overload(np.isclose) +def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + if not type_can_asarray(a): + raise TypingError('The first argument "a" must be array-like') + + if not type_can_asarray(b): + raise TypingError('The second argument "b" must be array-like') + + if not isinstance(rtol, (float, types.Float)): + raise TypingError('The third argument "rtol" must be a ' + 'floating point') + + if not isinstance(atol, (float, types.Float)): + raise TypingError('The fourth argument "atol" must be a ' + 'floating point') + + if not isinstance(equal_nan, (bool, types.Boolean)): + raise TypingError('The fifth argument "equal_nan" must be a ' + 'boolean') + + if isinstance(a, types.Array) and isinstance(b, types.Number): + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + x = a.reshape(-1) + y = b + out = np.zeros(len(x), np.bool_) + for i in range(len(out)): + out[i] = _isclose_item(x[i], y, rtol, atol, equal_nan) + return out.reshape(a.shape) + + elif isinstance(a, types.Number) and isinstance(b, types.Array): + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + x = a + y = b.reshape(-1) + out = np.zeros(len(y), np.bool_) + for i in range(len(out)): + out[i] = _isclose_item(x, y[i], rtol, atol, equal_nan) + return out.reshape(b.shape) + + elif isinstance(a, types.Array) and isinstance(b, types.Array): + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + shape = np.broadcast_shapes(a.shape, b.shape) + a_ = np.broadcast_to(a, shape) + b_ = np.broadcast_to(b, shape) + + out = np.zeros(len(a_), dtype=np.bool_) + for i, (av, bv) in enumerate(np.nditer((a_, b_))): + out[i] = _isclose_item(av.item(), bv.item(), rtol, atol, + equal_nan) + return np.broadcast_to(out, shape) + + else: + def isclose_impl(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + return _isclose_item(a, b, rtol, atol, equal_nan) + + return isclose_impl + + +@overload(np.nanmin) +def np_nanmin(a): + dt = determine_dtype(a) + if np.issubdtype(dt, np.complexfloating): + return complex_nanmin + else: + return real_nanmin + + +@overload(np.nanmax) +def np_nanmax(a): + dt = determine_dtype(a) + if np.issubdtype(dt, np.complexfloating): + return complex_nanmax + else: + return real_nanmax + + +@overload(np.nanmean) +def np_nanmean(a): + if not isinstance(a, types.Array): + return + isnan = get_isnan(a.dtype) + + def nanmean_impl(a): + c = 0.0 + count = 0 + for view in np.nditer(a): + v = view.item() + if not isnan(v): + c += v.item() + count += 1 + # np.divide() doesn't raise ZeroDivisionError + return np.divide(c, count) + + return nanmean_impl + + +@overload(np.nanvar) +def np_nanvar(a): + if not isinstance(a, types.Array): + return + isnan = get_isnan(a.dtype) + + def nanvar_impl(a): + # Compute the mean + m = np.nanmean(a) + + # Compute the sum of square diffs + ssd = 0.0 + count = 0 + for view in np.nditer(a): + v = view.item() + if not isnan(v): + val = (v.item() - m) + ssd += np.real(val * np.conj(val)) + count += 1 + # np.divide() doesn't raise ZeroDivisionError + return np.divide(ssd, count) + + return nanvar_impl + + +@overload(np.nanstd) +def np_nanstd(a): + if not isinstance(a, types.Array): + return + + def nanstd_impl(a): + return np.nanvar(a) ** 0.5 + + return nanstd_impl + + +@overload(np.nansum) +def np_nansum(a): + if not isinstance(a, types.Array): + return + if isinstance(a.dtype, types.Integer): + retty = types.intp + else: + retty = a.dtype + zero = retty(0) + isnan = get_isnan(a.dtype) + + def nansum_impl(a): + c = zero + for view in np.nditer(a): + v = view.item() + if not isnan(v): + c += v + return c + + return nansum_impl + + +@overload(np.nanprod) +def np_nanprod(a): + if not isinstance(a, types.Array): + return + if isinstance(a.dtype, types.Integer): + retty = types.intp + else: + retty = a.dtype + one = retty(1) + isnan = get_isnan(a.dtype) + + def nanprod_impl(a): + c = one + for view in np.nditer(a): + v = view.item() + if not isnan(v): + c *= v + return c + + return nanprod_impl + + +@overload(np.nancumprod) +def np_nancumprod(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.Boolean, types.Integer)): + # dtype cannot possibly contain NaN + return lambda a: np.cumprod(a) + else: + retty = a.dtype + is_nan = get_isnan(retty) + one = retty(1) + + def nancumprod_impl(a): + out = np.empty(a.size, retty) + c = one + for idx, v in enumerate(a.flat): + if ~is_nan(v): + c *= v + out[idx] = c + return out + + return nancumprod_impl + + +@overload(np.nancumsum) +def np_nancumsum(a): + if not isinstance(a, types.Array): + return + + if isinstance(a.dtype, (types.Boolean, types.Integer)): + # dtype cannot possibly contain NaN + return lambda a: np.cumsum(a) + else: + retty = a.dtype + is_nan = get_isnan(retty) + zero = retty(0) + + def nancumsum_impl(a): + out = np.empty(a.size, retty) + c = zero + for idx, v in enumerate(a.flat): + if ~is_nan(v): + c += v + out[idx] = c + return out + + return nancumsum_impl + + +@register_jitable +def prepare_ptp_input(a): + arr = _asarray(a) + if len(arr) == 0: + raise ValueError('zero-size array reduction not possible') + else: + return arr + + +def _compute_current_val_impl_gen(op, current_val, val): + if isinstance(current_val, types.Complex): + # The sort order for complex numbers is lexicographic. If both the + # real and imaginary parts are non-nan then the order is determined + # by the real parts except when they are equal, in which case the + # order is determined by the imaginary parts. + # https://github.com/numpy/numpy/blob/577a86e/numpy/core/fromnumeric.py#L874-L877 # noqa: E501 + def impl(current_val, val): + if op(val.real, current_val.real): + return val + elif (val.real == current_val.real + and op(val.imag, current_val.imag)): + return val + return current_val + else: + def impl(current_val, val): + return val if op(val, current_val) else current_val + return impl + + +def _compute_a_max(current_val, val): + pass + + +def _compute_a_min(current_val, val): + pass + + +@overload(_compute_a_max) +def _compute_a_max_impl(current_val, val): + return _compute_current_val_impl_gen(operator.gt, current_val, val) + + +@overload(_compute_a_min) +def _compute_a_min_impl(current_val, val): + return _compute_current_val_impl_gen(operator.lt, current_val, val) + + +def _early_return(val): + pass + + +@overload(_early_return) +def _early_return_impl(val): + UNUSED = 0 + if isinstance(val, types.Complex): + def impl(val): + if np.isnan(val.real): + if np.isnan(val.imag): + return True, np.nan + np.nan * 1j + else: + return True, np.nan + 0j + else: + return False, UNUSED + elif isinstance(val, types.Float): + def impl(val): + if np.isnan(val): + return True, np.nan + else: + return False, UNUSED + else: + def impl(val): + return False, UNUSED + return impl + + +@overload(np.ptp) +def np_ptp(a): + + if hasattr(a, 'dtype'): + if isinstance(a.dtype, types.Boolean): + raise TypingError("Boolean dtype is unsupported (as per NumPy)") + # Numpy raises a TypeError + + def np_ptp_impl(a): + arr = prepare_ptp_input(a) + + a_flat = arr.flat + a_min = a_flat[0] + a_max = a_flat[0] + + for i in range(arr.size): + val = a_flat[i] + take_branch, retval = _early_return(val) + if take_branch: + return retval + a_max = _compute_a_max(a_max, val) + a_min = _compute_a_min(a_min, val) + + return a_max - a_min + + return np_ptp_impl + + +if numpy_version < (2, 0): + overload_method(types.Array, 'ptp')(np_ptp) + +#---------------------------------------------------------------------------- +# Median and partitioning + + +@register_jitable +def nan_aware_less_than(a, b): + if np.isnan(a): + return False + else: + if np.isnan(b): + return True + else: + return a < b + + +def _partition_factory(pivotimpl, argpartition=False): + def _partition(A, low, high, I=None): + mid = (low + high) >> 1 + # NOTE: the pattern of swaps below for the pivot choice and the + # partitioning gives good results (i.e. regular O(n log n)) + # on sorted, reverse-sorted, and uniform arrays. Subtle changes + # risk breaking this property. + + # Use median of three {low, middle, high} as the pivot + if pivotimpl(A[mid], A[low]): + A[low], A[mid] = A[mid], A[low] + if argpartition: + I[low], I[mid] = I[mid], I[low] + if pivotimpl(A[high], A[mid]): + A[high], A[mid] = A[mid], A[high] + if argpartition: + I[high], I[mid] = I[mid], I[high] + if pivotimpl(A[mid], A[low]): + A[low], A[mid] = A[mid], A[low] + if argpartition: + I[low], I[mid] = I[mid], I[low] + pivot = A[mid] + + A[high], A[mid] = A[mid], A[high] + if argpartition: + I[high], I[mid] = I[mid], I[high] + i = low + j = high - 1 + while True: + while i < high and pivotimpl(A[i], pivot): + i += 1 + while j >= low and pivotimpl(pivot, A[j]): + j -= 1 + if i >= j: + break + A[i], A[j] = A[j], A[i] + if argpartition: + I[i], I[j] = I[j], I[i] + i += 1 + j -= 1 + # Put the pivot back in its final place (all items before `i` + # are smaller than the pivot, all items at/after `i` are larger) + A[i], A[high] = A[high], A[i] + if argpartition: + I[i], I[high] = I[high], I[i] + return i + return _partition + + +_partition = register_jitable(_partition_factory(less_than)) +_partition_w_nan = register_jitable(_partition_factory(nan_aware_less_than)) +_argpartition_w_nan = register_jitable(_partition_factory( + nan_aware_less_than, + argpartition=True) +) + + +def _select_factory(partitionimpl): + def _select(arry, k, low, high, idx=None): + """ + Select the k'th smallest element in array[low:high + 1]. + """ + i = partitionimpl(arry, low, high, idx) + while i != k: + if i < k: + low = i + 1 + i = partitionimpl(arry, low, high, idx) + else: + high = i - 1 + i = partitionimpl(arry, low, high, idx) + return arry[k] + return _select + + +_select = register_jitable(_select_factory(_partition)) +_select_w_nan = register_jitable(_select_factory(_partition_w_nan)) +_arg_select_w_nan = register_jitable(_select_factory(_argpartition_w_nan)) + + +@register_jitable +def _select_two(arry, k, low, high): + """ + Select the k'th and k+1'th smallest elements in array[low:high + 1]. + + This is significantly faster than doing two independent selections + for k and k+1. + """ + while True: + assert high > low # by construction + i = _partition(arry, low, high) + if i < k: + low = i + 1 + elif i > k + 1: + high = i - 1 + elif i == k: + _select(arry, k + 1, i + 1, high) + break + else: # i == k + 1 + _select(arry, k, low, i - 1) + break + + return arry[k], arry[k + 1] + + +@register_jitable +def _median_inner(temp_arry, n): + """ + The main logic of the median() call. *temp_arry* must be disposable, + as this function will mutate it. + """ + low = 0 + high = n - 1 + half = n >> 1 + if n & 1 == 0: + a, b = _select_two(temp_arry, half - 1, low, high) + return (a + b) / 2 + else: + return _select(temp_arry, half, low, high) + + +@overload(np.median) +def np_median(a): + if not isinstance(a, types.Array): + return + + def median_impl(a): + # np.median() works on the flattened array, and we need a temporary + # workspace anyway + temp_arry = a.flatten() + n = temp_arry.shape[0] + return _median_inner(temp_arry, n) + + return median_impl + + +@register_jitable +def _collect_percentiles_inner(a, q): + #TODO: This needs rewriting to be closer to NumPy, particularly the nan/inf + # handling which is generally subject to algorithmic changes. + n = len(a) + + if n == 1: + # single element array; output same for all percentiles + out = np.full(len(q), a[0], dtype=np.float64) + else: + out = np.empty(len(q), dtype=np.float64) + for i in range(len(q)): + percentile = q[i] + + # bypass pivoting where requested percentile is 100 + if percentile == 100: + val = np.max(a) + # heuristics to handle infinite values a la NumPy + if ~np.all(np.isfinite(a)): + if ~np.isfinite(val): + val = np.nan + + # bypass pivoting where requested percentile is 0 + elif percentile == 0: + val = np.min(a) + # convoluted heuristics to handle infinite values a la NumPy + if ~np.all(np.isfinite(a)): + num_pos_inf = np.sum(a == np.inf) + num_neg_inf = np.sum(a == -np.inf) + num_finite = n - (num_neg_inf + num_pos_inf) + if num_finite == 0: + val = np.nan + if num_pos_inf == 1 and n == 2: + val = np.nan + if num_neg_inf > 1: + val = np.nan + if num_finite == 1: + if num_pos_inf > 1: + if num_neg_inf != 1: + val = np.nan + + else: + # linear interp between closest ranks + rank = 1 + (n - 1) * np.true_divide(percentile, 100.0) + f = math.floor(rank) + m = rank - f + lower, upper = _select_two(a, k=int(f - 1), low=0, high=(n - 1)) + val = lower * (1 - m) + upper * m + out[i] = val + + return out + + +@register_jitable +def _can_collect_percentiles(a, nan_mask, skip_nan): + if skip_nan: + a = a[~nan_mask] + if len(a) == 0: + return False # told to skip nan, but no elements remain + else: + if np.any(nan_mask): + return False # told *not* to skip nan, but nan encountered + + if len(a) == 1: # single element array + val = a[0] + return np.isfinite(val) # can collect percentiles if element is finite + else: + return True + + +@register_jitable +def check_valid(q, q_upper_bound): + valid = True + + # avoid expensive reductions where possible + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if q[i] < 0.0 or q[i] > q_upper_bound or np.isnan(q[i]): + valid = False + break + else: + if np.any(np.isnan(q)) or np.any(q < 0.0) or np.any(q > q_upper_bound): + valid = False + + return valid + + +@register_jitable +def percentile_is_valid(q): + if not check_valid(q, q_upper_bound=100.0): + raise ValueError('Percentiles must be in the range [0, 100]') + + +@register_jitable +def quantile_is_valid(q): + if not check_valid(q, q_upper_bound=1.0): + raise ValueError('Quantiles must be in the range [0, 1]') + + +@register_jitable +def _collect_percentiles(a, q, check_q, factor, skip_nan): + q = np.asarray(q, dtype=np.float64).flatten() + check_q(q) + q = q * factor + + temp_arry = np.asarray(a, dtype=np.float64).flatten() + nan_mask = np.isnan(temp_arry) + + if _can_collect_percentiles(temp_arry, nan_mask, skip_nan): + temp_arry = temp_arry[~nan_mask] + out = _collect_percentiles_inner(temp_arry, q) + else: + out = np.full(len(q), np.nan) + + return out + + +def _percentile_quantile_inner(a, q, skip_nan, factor, check_q): + """ + The underlying algorithm to find percentiles and quantiles + is the same, hence we converge onto the same code paths + in this inner function implementation + """ + dt = determine_dtype(a) + if np.issubdtype(dt, np.complexfloating): + raise TypingError('Not supported for complex dtype') + # this could be supported, but would require a + # lexicographic comparison + + def np_percentile_q_scalar_impl(a, q): + return _collect_percentiles(a, q, check_q, factor, skip_nan)[0] + + def np_percentile_impl(a, q): + return _collect_percentiles(a, q, check_q, factor, skip_nan) + + if isinstance(q, (types.Number, types.Boolean)): + return np_percentile_q_scalar_impl + elif isinstance(q, types.Array) and q.ndim == 0: + return np_percentile_q_scalar_impl + else: + return np_percentile_impl + + +@overload(np.percentile) +def np_percentile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=False, factor=1.0, check_q=percentile_is_valid + ) + + +@overload(np.nanpercentile) +def np_nanpercentile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=True, factor=1.0, check_q=percentile_is_valid + ) + + +@overload(np.quantile) +def np_quantile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=False, factor=100.0, check_q=quantile_is_valid + ) + + +@overload(np.nanquantile) +def np_nanquantile(a, q): + return _percentile_quantile_inner( + a, q, skip_nan=True, factor=100.0, check_q=quantile_is_valid + ) + + +@overload(np.nanmedian) +def np_nanmedian(a): + if not isinstance(a, types.Array): + return + isnan = get_isnan(a.dtype) + + def nanmedian_impl(a): + # Create a temporary workspace with only non-NaN values + temp_arry = np.empty(a.size, a.dtype) + n = 0 + for view in np.nditer(a): + v = view.item() + if not isnan(v): + temp_arry[n] = v + n += 1 + + # all NaNs + if n == 0: + return np.nan + + return _median_inner(temp_arry, n) + + return nanmedian_impl + + +@register_jitable +def np_partition_impl_inner(a, kth_array): + + # allocate and fill empty array rather than copy a and mutate in place + # as the latter approach fails to preserve strides + out = np.empty_like(a) + + idx = np.ndindex(a.shape[:-1]) # Numpy default partition axis is -1 + for s in idx: + arry = a[s].copy() + low = 0 + high = len(arry) - 1 + + for kth in kth_array: + _select_w_nan(arry, kth, low, high) + low = kth # narrow span of subsequent partition + + out[s] = arry + return out + + +@register_jitable +def np_argpartition_impl_inner(a, kth_array): + + # allocate and fill empty array rather than copy a and mutate in place + # as the latter approach fails to preserve strides + out = np.empty_like(a, dtype=np.intp) + + idx = np.ndindex(a.shape[:-1]) # Numpy default partition axis is -1 + for s in idx: + arry = a[s].copy() + idx_arry = np.arange(len(arry)) + low = 0 + high = len(arry) - 1 + + for kth in kth_array: + _arg_select_w_nan(arry, kth, low, high, idx_arry) + low = kth # narrow span of subsequent partition + + out[s] = idx_arry + return out + + +@register_jitable +def valid_kths(a, kth): + """ + Returns a sorted, unique array of kth values which serve + as indexers for partitioning the input array, a. + + If the absolute value of any of the provided values + is greater than a.shape[-1] an exception is raised since + we are partitioning along the last axis (per Numpy default + behaviour). + + Values less than 0 are transformed to equivalent positive + index values. + """ + # cast boolean to int, where relevant + kth_array = _asarray(kth).astype(np.int64) + + if kth_array.ndim != 1: + raise ValueError('kth must be scalar or 1-D') + # numpy raises ValueError: object too deep for desired array + + if np.any(np.abs(kth_array) >= a.shape[-1]): + raise ValueError("kth out of bounds") + + out = np.empty_like(kth_array) + + for index, val in np.ndenumerate(kth_array): + if val < 0: + out[index] = val + a.shape[-1] # equivalent positive index + else: + out[index] = val + + return np.unique(out) + + +@overload(np.partition) +def np_partition(a, kth): + + if not isinstance(a, (types.Array, types.Sequence, types.Tuple)): + raise NumbaTypeError('The first argument must be an array-like') + + if isinstance(a, types.Array) and a.ndim == 0: + msg = 'The first argument must be at least 1-D (found 0-D)' + raise NumbaTypeError(msg) + + kthdt = getattr(kth, 'dtype', kth) + if not isinstance(kthdt, (types.Boolean, types.Integer)): + # bool gets cast to int subsequently + raise NumbaTypeError('Partition index must be integer') + + def np_partition_impl(a, kth): + a_tmp = _asarray(a) + if a_tmp.size == 0: + return a_tmp.copy() + else: + kth_array = valid_kths(a_tmp, kth) + return np_partition_impl_inner(a_tmp, kth_array) + + return np_partition_impl + + +@overload(np.argpartition) +def np_argpartition(a, kth): + + if not isinstance(a, (types.Array, types.Sequence, types.Tuple)): + raise NumbaTypeError('The first argument must be an array-like') + + if isinstance(a, types.Array) and a.ndim == 0: + msg = 'The first argument must be at least 1-D (found 0-D)' + raise NumbaTypeError(msg) + + kthdt = getattr(kth, 'dtype', kth) + if not isinstance(kthdt, (types.Boolean, types.Integer)): + # bool gets cast to int subsequently + raise NumbaTypeError('Partition index must be integer') + + def np_argpartition_impl(a, kth): + a_tmp = _asarray(a) + if a_tmp.size == 0: + return a_tmp.copy().astype('intp') + else: + kth_array = valid_kths(a_tmp, kth) + return np_argpartition_impl_inner(a_tmp, kth_array) + + return np_argpartition_impl + + +#---------------------------------------------------------------------------- +# Building matrices + +@register_jitable +def _tri_impl(N, M, k): + shape = max(0, N), max(0, M) # numpy floors each dimension at 0 + out = np.empty(shape, dtype=np.float64) # numpy default dtype + + for i in range(shape[0]): + m_max = min(max(0, i + k + 1), shape[1]) + out[i, :m_max] = 1 + out[i, m_max:] = 0 + + return out + + +@overload(np.tri) +def np_tri(N, M=None, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + def tri_impl(N, M=None, k=0): + if M is None: + M = N + return _tri_impl(N, M, k) + + return tri_impl + + +@register_jitable +def _make_square(m): + """ + Takes a 1d array and tiles it to form a square matrix + - i.e. a facsimile of np.tile(m, (len(m), 1)) + """ + assert m.ndim == 1 + + len_m = len(m) + out = np.empty((len_m, len_m), dtype=m.dtype) + + for i in range(len_m): + out[i] = m + + return out + + +@register_jitable +def np_tril_impl_2d(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint) + return np.where(mask, m, np.zeros_like(m, dtype=m.dtype)) + + +@overload(np.tril) +def my_tril(m, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + def np_tril_impl_1d(m, k=0): + m_2d = _make_square(m) + return np_tril_impl_2d(m_2d, k) + + def np_tril_impl_multi(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint) + idx = np.ndindex(m.shape[:-2]) + z = np.empty_like(m) + zero_opt = np.zeros_like(mask, dtype=m.dtype) + for sel in idx: + z[sel] = np.where(mask, m[sel], zero_opt) + return z + + if m.ndim == 1: + return np_tril_impl_1d + elif m.ndim == 2: + return np_tril_impl_2d + else: + return np_tril_impl_multi + + +@overload(np.tril_indices) +def np_tril_indices(n, k=0, m=None): + + # we require integer arguments, unlike numpy + check_is_integer(n, 'n') + check_is_integer(k, 'k') + if not is_nonelike(m): + check_is_integer(m, 'm') + + def np_tril_indices_impl(n, k=0, m=None): + return np.nonzero(np.tri(n, m, k=k)) + return np_tril_indices_impl + + +@overload(np.tril_indices_from) +def np_tril_indices_from(arr, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + if arr.ndim != 2: + raise TypingError("input array must be 2-d") + + def np_tril_indices_from_impl(arr, k=0): + return np.tril_indices(arr.shape[0], k=k, m=arr.shape[1]) + return np_tril_indices_from_impl + + +@register_jitable +def np_triu_impl_2d(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint) + return np.where(mask, np.zeros_like(m, dtype=m.dtype), m) + + +@overload(np.triu) +def my_triu(m, k=0): + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + def np_triu_impl_1d(m, k=0): + m_2d = _make_square(m) + return np_triu_impl_2d(m_2d, k) + + def np_triu_impl_multi(m, k=0): + mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint) + idx = np.ndindex(m.shape[:-2]) + z = np.empty_like(m) + zero_opt = np.zeros_like(mask, dtype=m.dtype) + for sel in idx: + z[sel] = np.where(mask, zero_opt, m[sel]) + return z + + if m.ndim == 1: + return np_triu_impl_1d + elif m.ndim == 2: + return np_triu_impl_2d + else: + return np_triu_impl_multi + + +@overload(np.triu_indices) +def np_triu_indices(n, k=0, m=None): + + # we require integer arguments, unlike numpy + check_is_integer(n, 'n') + check_is_integer(k, 'k') + if not is_nonelike(m): + check_is_integer(m, 'm') + + def np_triu_indices_impl(n, k=0, m=None): + return np.nonzero(1 - np.tri(n, m, k=k - 1)) + return np_triu_indices_impl + + +@overload(np.triu_indices_from) +def np_triu_indices_from(arr, k=0): + + # we require k to be integer, unlike numpy + check_is_integer(k, 'k') + + if arr.ndim != 2: + raise TypingError("input array must be 2-d") + + def np_triu_indices_from_impl(arr, k=0): + return np.triu_indices(arr.shape[0], k=k, m=arr.shape[1]) + return np_triu_indices_from_impl + + +def _prepare_array(arr): + pass + + +@overload(_prepare_array) +def _prepare_array_impl(arr): + if arr in (None, types.none): + return lambda arr: np.array(()) + else: + return lambda arr: _asarray(arr).ravel() + + +def _dtype_of_compound(inobj): + obj = inobj + while True: + if isinstance(obj, (types.Number, types.Boolean)): + return as_dtype(obj) + l = getattr(obj, '__len__', None) + if l is not None and l() == 0: # empty tuple or similar + return np.float64 + dt = getattr(obj, 'dtype', None) + if dt is None: + raise NumbaTypeError("type has no dtype attr") + if isinstance(obj, types.Sequence): + obj = obj.dtype + else: + return as_dtype(dt) + + +@overload(np.ediff1d) +def np_ediff1d(ary, to_end=None, to_begin=None): + + if isinstance(ary, types.Array): + if isinstance(ary.dtype, types.Boolean): + raise NumbaTypeError("Boolean dtype is unsupported (as per NumPy)") + # Numpy tries to do this: return ary[1:] - ary[:-1] which + # results in a TypeError exception being raised + + # Check that to_end and to_begin are compatible with ary + ary_dt = _dtype_of_compound(ary) + to_begin_dt = None + if not (is_nonelike(to_begin)): + to_begin_dt = _dtype_of_compound(to_begin) + to_end_dt = None + if not (is_nonelike(to_end)): + to_end_dt = _dtype_of_compound(to_end) + + if to_begin_dt is not None and not np.can_cast(to_begin_dt, ary_dt): + msg = "dtype of to_begin must be compatible with input ary" + raise NumbaTypeError(msg) + + if to_end_dt is not None and not np.can_cast(to_end_dt, ary_dt): + msg = "dtype of to_end must be compatible with input ary" + raise NumbaTypeError(msg) + + def np_ediff1d_impl(ary, to_end=None, to_begin=None): + # transform each input into an equivalent 1d array + start = _prepare_array(to_begin) + mid = _prepare_array(ary) + end = _prepare_array(to_end) + + out_dtype = mid.dtype + # output array dtype determined by ary dtype, per NumPy + # (for the most part); an exception to the rule is a zero length + # array-like, where NumPy falls back to np.float64; this behaviour + # is *not* replicated + + if len(mid) > 0: + out = np.empty((len(start) + len(mid) + len(end) - 1), + dtype=out_dtype) + start_idx = len(start) + mid_idx = len(start) + len(mid) - 1 + out[:start_idx] = start + out[start_idx:mid_idx] = np.diff(mid) + out[mid_idx:] = end + else: + out = np.empty((len(start) + len(end)), dtype=out_dtype) + start_idx = len(start) + out[:start_idx] = start + out[start_idx:] = end + return out + + return np_ediff1d_impl + + +def _select_element(arr): + pass + + +@overload(_select_element) +def _select_element_impl(arr): + zerod = getattr(arr, 'ndim', None) == 0 + if zerod: + def impl(arr): + x = np.array((1,), dtype=arr.dtype) + x[:] = arr + return x[0] + return impl + else: + def impl(arr): + return arr + return impl + + +def _get_d(dx, x): + pass + + +@overload(_get_d) +def get_d_impl(x, dx): + if is_nonelike(x): + def impl(x, dx): + return np.asarray(dx) + else: + def impl(x, dx): + return np.diff(np.asarray(x)) + return impl + + +@overload(np.trapz) +def np_trapz(y, x=None, dx=1.0): + + if isinstance(y, (types.Number, types.Boolean)): + raise TypingError('y cannot be a scalar') + elif isinstance(y, types.Array) and y.ndim == 0: + raise TypingError('y cannot be 0D') + # NumPy raises IndexError: list assignment index out of range + + # inspired by: + # https://github.com/numpy/numpy/blob/7ee52003/numpy/lib/function_base.py#L4040-L4065 # noqa: E501 + def impl(y, x=None, dx=1.0): + yarr = np.asarray(y) + d = _get_d(x, dx) + y_ave = (yarr[..., slice(1, None)] + yarr[..., slice(None, -1)]) / 2.0 + ret = np.sum(d * y_ave, -1) + processed = _select_element(ret) + return processed + + return impl + + +# numpy 2.0 rename np.trapz to np.trapezoid +if numpy_version >= (2, 0): + overload(np.trapezoid)(np_trapz) + + +@register_jitable +def _np_vander(x, N, increasing, out): + """ + Generate an N-column Vandermonde matrix from a supplied 1-dimensional + array, x. Store results in an output matrix, out, which is assumed to + be of the required dtype. + + Values are accumulated using np.multiply to match the floating point + precision behaviour of numpy.vander. + """ + m, n = out.shape + assert m == len(x) + assert n == N + + if increasing: + for i in range(N): + if i == 0: + out[:, i] = 1 + else: + out[:, i] = np.multiply(x, out[:, (i - 1)]) + else: + for i in range(N - 1, -1, -1): + if i == N - 1: + out[:, i] = 1 + else: + out[:, i] = np.multiply(x, out[:, (i + 1)]) + + +@register_jitable +def _check_vander_params(x, N): + if x.ndim > 1: + raise ValueError('x must be a one-dimensional array or sequence.') + if N < 0: + raise ValueError('Negative dimensions are not allowed') + + +@overload(np.vander) +def np_vander(x, N=None, increasing=False): + if N not in (None, types.none): + if not isinstance(N, types.Integer): + raise TypingError('Second argument N must be None or an integer') + + def np_vander_impl(x, N=None, increasing=False): + if N is None: + N = len(x) + + _check_vander_params(x, N) + + # allocate output matrix using dtype determined in closure + out = np.empty((len(x), int(N)), dtype=dtype) + + _np_vander(x, N, increasing, out) + return out + + def np_vander_seq_impl(x, N=None, increasing=False): + if N is None: + N = len(x) + + x_arr = np.array(x) + _check_vander_params(x_arr, N) + + # allocate output matrix using dtype inferred when x_arr was created + out = np.empty((len(x), int(N)), dtype=x_arr.dtype) + + _np_vander(x_arr, N, increasing, out) + return out + + if isinstance(x, types.Array): + x_dt = as_dtype(x.dtype) + # replicate numpy behaviour w.r.t.type promotion + dtype = np.promote_types(x_dt, int) + return np_vander_impl + elif isinstance(x, (types.Tuple, types.Sequence)): + return np_vander_seq_impl + + +@overload(np.roll) +def np_roll(a, shift): + if not isinstance(shift, (types.Integer, types.Boolean)): + raise TypingError('shift must be an integer') + + def np_roll_impl(a, shift): + arr = np.asarray(a) + out = np.empty(arr.shape, dtype=arr.dtype) + # empty_like might result in different contiguity vs NumPy + + arr_flat = arr.flat + for i in range(arr.size): + idx = (i + shift) % arr.size + out.flat[idx] = arr_flat[i] + + return out + + if isinstance(a, (types.Number, types.Boolean)): + return lambda a, shift: np.asarray(a) + else: + return np_roll_impl + + +#---------------------------------------------------------------------------- +# Mathematical functions + +LIKELY_IN_CACHE_SIZE = 8 + + +@register_jitable +def binary_search_with_guess(key, arr, length, guess): + # NOTE: Do not refactor... see note in np_interp function impl below + # this is a facsimile of binary_search_with_guess prior to 1.15: + # https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L447 # noqa: E501 + imin = 0 + imax = length + + # Handle keys outside of the arr range first + if key > arr[length - 1]: + return length + elif key < arr[0]: + return -1 + + # If len <= 4 use linear search. + # From above we know key >= arr[0] when we start. + if length <= 4: + i = 1 + while i < length and key >= arr[i]: + i += 1 + return i - 1 + + if guess > length - 3: + guess = length - 3 + + if guess < 1: + guess = 1 + + # check most likely values: guess - 1, guess, guess + 1 + if key < arr[guess]: + if key < arr[guess - 1]: + imax = guess - 1 + + # last attempt to restrict search to items in cache + if guess > LIKELY_IN_CACHE_SIZE and \ + key >= arr[guess - LIKELY_IN_CACHE_SIZE]: + imin = guess - LIKELY_IN_CACHE_SIZE + else: + # key >= arr[guess - 1] + return guess - 1 + else: + # key >= arr[guess] + if key < arr[guess + 1]: + return guess + else: + # key >= arr[guess + 1] + if key < arr[guess + 2]: + return guess + 1 + else: + # key >= arr[guess + 2] + imin = guess + 2 + # last attempt to restrict search to items in cache + if (guess < (length - LIKELY_IN_CACHE_SIZE - 1)) and \ + (key < arr[guess + LIKELY_IN_CACHE_SIZE]): + imax = guess + LIKELY_IN_CACHE_SIZE + + # finally, find index by bisection + while imin < imax: + imid = imin + ((imax - imin) >> 1) + if key >= arr[imid]: + imin = imid + 1 + else: + imax = imid + + return imin - 1 + + +@register_jitable +def np_interp_impl_complex_inner(x, xp, fp, dtype): + # NOTE: Do not refactor... see note in np_interp function impl below + # this is a facsimile of arr_interp_complex post 1.16 with added + # branching to support np1.17 style NaN handling. + # https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L628 # noqa: E501 + dz = np.asarray(x) + dx = np.asarray(xp) + dy = np.asarray(fp) + + if len(dx) == 0: + raise ValueError('array of sample points is empty') + + if len(dx) != len(dy): + raise ValueError('fp and xp are not of the same size.') + + if dx.size == 1: + return np.full(dz.shape, fill_value=dy[0], dtype=dtype) + + dres = np.empty(dz.shape, dtype=dtype) + + lenx = dz.size + lenxp = len(dx) + lval = dy[0] + rval = dy[lenxp - 1] + + if lenxp == 1: + xp_val = dx[0] + fp_val = dy[0] + + for i in range(lenx): + x_val = dz.flat[i] + if x_val < xp_val: + dres.flat[i] = lval + elif x_val > xp_val: + dres.flat[i] = rval + else: + dres.flat[i] = fp_val + + else: + j = 0 + + # only pre-calculate slopes if there are relatively few of them. + if lenxp <= lenx: + slopes = np.empty((lenxp - 1), dtype=dtype) + else: + slopes = np.empty(0, dtype=dtype) + + if slopes.size: + for i in range(lenxp - 1): + inv_dx = 1 / (dx[i + 1] - dx[i]) + real = (dy[i + 1].real - dy[i].real) * inv_dx + imag = (dy[i + 1].imag - dy[i].imag) * inv_dx + slopes[i] = real + 1j * imag + + for i in range(lenx): + x_val = dz.flat[i] + + if np.isnan(x_val): + real = x_val + imag = 0.0 + dres.flat[i] = real + 1j * imag + continue + + j = binary_search_with_guess(x_val, dx, lenxp, j) + + if j == -1: + dres.flat[i] = lval + elif j == lenxp: + dres.flat[i] = rval + elif j == lenxp - 1: + dres.flat[i] = dy[j] + elif dx[j] == x_val: + # Avoid potential non-finite interpolation + dres.flat[i] = dy[j] + else: + if slopes.size: + slope = slopes[j] + else: + inv_dx = 1 / (dx[j + 1] - dx[j]) + real = (dy[j + 1].real - dy[j].real) * inv_dx + imag = (dy[j + 1].imag - dy[j].imag) * inv_dx + slope = real + 1j * imag + + # NumPy 1.17 handles NaN correctly - this is a copy of + # innermost part of arr_interp_complex post 1.17: + # https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L798-L812 # noqa: E501 + + # If we get NaN in one direction, try the other + real = slope.real * (x_val - dx[j]) + dy[j].real + if np.isnan(real): + real = slope.real * (x_val - dx[j + 1]) + dy[j + 1].real + if np.isnan(real) and dy[j].real == dy[j + 1].real: + real = dy[j].real + + imag = slope.imag * (x_val - dx[j]) + dy[j].imag + if np.isnan(imag): + imag = slope.imag * (x_val - dx[j + 1]) + dy[j + 1].imag + if np.isnan(imag) and dy[j].imag == dy[j + 1].imag: + imag = dy[j].imag + + dres.flat[i] = real + 1j * imag + + return dres + + +@register_jitable +def np_interp_impl_inner(x, xp, fp, dtype): + # NOTE: Do not refactor... see note in np_interp function impl below + # this is a facsimile of arr_interp post 1.16: + # https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L473 # noqa: E501 + dz = np.asarray(x, dtype=np.float64) + dx = np.asarray(xp, dtype=np.float64) + dy = np.asarray(fp, dtype=np.float64) + + if len(dx) == 0: + raise ValueError('array of sample points is empty') + + if len(dx) != len(dy): + raise ValueError('fp and xp are not of the same size.') + + if dx.size == 1: + return np.full(dz.shape, fill_value=dy[0], dtype=dtype) + + dres = np.empty(dz.shape, dtype=dtype) + + lenx = dz.size + lenxp = len(dx) + lval = dy[0] + rval = dy[lenxp - 1] + + if lenxp == 1: + xp_val = dx[0] + fp_val = dy[0] + + for i in range(lenx): + x_val = dz.flat[i] + if x_val < xp_val: + dres.flat[i] = lval + elif x_val > xp_val: + dres.flat[i] = rval + else: + dres.flat[i] = fp_val + + else: + j = 0 + + # only pre-calculate slopes if there are relatively few of them. + if lenxp <= lenx: + slopes = (dy[1:] - dy[:-1]) / (dx[1:] - dx[:-1]) + else: + slopes = np.empty(0, dtype=dtype) + + for i in range(lenx): + x_val = dz.flat[i] + + if np.isnan(x_val): + dres.flat[i] = x_val + continue + + j = binary_search_with_guess(x_val, dx, lenxp, j) + + if j == -1: + dres.flat[i] = lval + elif j == lenxp: + dres.flat[i] = rval + elif j == lenxp - 1: + dres.flat[i] = dy[j] + elif dx[j] == x_val: + # Avoid potential non-finite interpolation + dres.flat[i] = dy[j] + else: + if slopes.size: + slope = slopes[j] + else: + slope = (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j]) + + dres.flat[i] = slope * (x_val - dx[j]) + dy[j] + + # NOTE: this is in np1.17 + # https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501 + # Permanent reference: + # https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L610-L616 # noqa: E501 + # + # If we get nan in one direction, try the other + if np.isnan(dres.flat[i]): + dres.flat[i] = slope * (x_val - dx[j + 1]) + dy[j + 1] # noqa: E501 + if np.isnan(dres.flat[i]) and dy[j] == dy[j + 1]: + dres.flat[i] = dy[j] + + return dres + + +@overload(np.interp) +def np_interp(x, xp, fp): + # Replicating basic interp is relatively simple, but matching the behaviour + # of NumPy for edge cases is really quite hard. After a couple of attempts + # to avoid translation of the C source it was deemed necessary. + + if hasattr(xp, 'ndim') and xp.ndim > 1: + raise TypingError('xp must be 1D') + if hasattr(fp, 'ndim') and fp.ndim > 1: + raise TypingError('fp must be 1D') + + complex_dtype_msg = ( + "Cannot cast array data from complex dtype to float64 dtype" + ) + + xp_dt = determine_dtype(xp) + if np.issubdtype(xp_dt, np.complexfloating): + raise TypingError(complex_dtype_msg) + + fp_dt = determine_dtype(fp) + dtype = np.result_type(fp_dt, np.float64) + + if np.issubdtype(dtype, np.complexfloating): + inner = np_interp_impl_complex_inner + else: + inner = np_interp_impl_inner + + def np_interp_impl(x, xp, fp): + return inner(x, xp, fp, dtype) + + def np_interp_scalar_impl(x, xp, fp): + return inner(x, xp, fp, dtype).flat[0] + + if isinstance(x, types.Number): + if isinstance(x, types.Complex): + raise TypingError(complex_dtype_msg) + return np_interp_scalar_impl + + return np_interp_impl + + +#---------------------------------------------------------------------------- +# Statistics + +@register_jitable +def row_wise_average(a): + assert a.ndim == 2 + + m, n = a.shape + out = np.empty((m, 1), dtype=a.dtype) + + for i in range(m): + out[i, 0] = np.sum(a[i, :]) / n + + return out + + +@register_jitable +def np_cov_impl_inner(X, bias, ddof): + + # determine degrees of freedom + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + # determine the normalization factor + fact = X.shape[1] - ddof + + # numpy warns if less than 0 and floors at 0 + fact = max(fact, 0.0) + + # de-mean + X -= row_wise_average(X) + + # calculate result - requires blas + c = np.dot(X, np.conj(X.T)) + c *= np.true_divide(1, fact) + return c + + +def _prepare_cov_input_inner(): + pass + + +@overload(_prepare_cov_input_inner) +def _prepare_cov_input_impl(m, y, rowvar, dtype): + if y in (None, types.none): + def _prepare_cov_input_inner(m, y, rowvar, dtype): + m_arr = np.atleast_2d(_asarray(m)) + + if not rowvar: + m_arr = m_arr.T + + return m_arr + else: + def _prepare_cov_input_inner(m, y, rowvar, dtype): + m_arr = np.atleast_2d(_asarray(m)) + y_arr = np.atleast_2d(_asarray(y)) + + # transpose if asked to and not a (1, n) vector - this looks + # wrong as you might end up transposing one and not the other, + # but it's what numpy does + if not rowvar: + if m_arr.shape[0] != 1: + m_arr = m_arr.T + if y_arr.shape[0] != 1: + y_arr = y_arr.T + + m_rows, m_cols = m_arr.shape + y_rows, y_cols = y_arr.shape + + if m_cols != y_cols: + raise ValueError("m and y have incompatible dimensions") + + # allocate and fill output array + out = np.empty((m_rows + y_rows, m_cols), dtype=dtype) + out[:m_rows, :] = m_arr + out[-y_rows:, :] = y_arr + + return out + + return _prepare_cov_input_inner + + +@register_jitable +def _handle_m_dim_change(m): + if m.ndim == 2 and m.shape[0] == 1: + msg = ("2D array containing a single row is unsupported due to " + "ambiguity in type inference. To use numpy.cov in this case " + "simply pass the row as a 1D array, i.e. m[0].") + raise RuntimeError(msg) + + +_handle_m_dim_nop = register_jitable(lambda x: x) + + +def determine_dtype(array_like): + array_like_dt = np.float64 + if isinstance(array_like, types.Array): + array_like_dt = as_dtype(array_like.dtype) + elif isinstance(array_like, (types.Number, types.Boolean)): + array_like_dt = as_dtype(array_like) + elif isinstance(array_like, (types.UniTuple, types.Tuple)): + coltypes = set() + for val in array_like: + if hasattr(val, 'count'): + [coltypes.add(v) for v in val] + else: + coltypes.add(val) + if len(coltypes) > 1: + array_like_dt = np.promote_types(*[as_dtype(ty) for ty in coltypes]) + elif len(coltypes) == 1: + array_like_dt = as_dtype(coltypes.pop()) + + return array_like_dt + + +def check_dimensions(array_like, name): + if isinstance(array_like, types.Array): + if array_like.ndim > 2: + raise NumbaTypeError("{0} has more than 2 dimensions".format(name)) + elif isinstance(array_like, types.Sequence): + if isinstance(array_like.key[0], types.Sequence): + if isinstance(array_like.key[0].key[0], types.Sequence): + msg = "{0} has more than 2 dimensions".format(name) + raise NumbaTypeError(msg) + + +@register_jitable +def _handle_ddof(ddof): + if not np.isfinite(ddof): + raise ValueError('Cannot convert non-finite ddof to integer') + if ddof - int(ddof) != 0: + raise ValueError('ddof must be integral value') + + +_handle_ddof_nop = register_jitable(lambda x: x) + + +@register_jitable +def _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER, + _M_DIM_HANDLER): + _M_DIM_HANDLER(m) + _DDOF_HANDLER(ddof) + return _prepare_cov_input_inner(m, y, rowvar, dtype) + + +def scalar_result_expected(mandatory_input, optional_input): + opt_is_none = optional_input in (None, types.none) + + if isinstance(mandatory_input, types.Array) and mandatory_input.ndim == 1: + return opt_is_none + + if isinstance(mandatory_input, types.BaseTuple): + if all(isinstance(x, (types.Number, types.Boolean)) + for x in mandatory_input.types): + return opt_is_none + else: + if (len(mandatory_input.types) == 1 and + isinstance(mandatory_input.types[0], types.BaseTuple)): + return opt_is_none + + if isinstance(mandatory_input, (types.Number, types.Boolean)): + return opt_is_none + + if isinstance(mandatory_input, types.Sequence): + if (not isinstance(mandatory_input.key[0], types.Sequence) and + opt_is_none): + return True + + return False + + +@register_jitable +def _clip_corr(x): + return np.where(np.fabs(x) > 1, np.sign(x), x) + + +@register_jitable +def _clip_complex(x): + real = _clip_corr(x.real) + imag = _clip_corr(x.imag) + return real + 1j * imag + + +@overload(np.cov) +def np_cov(m, y=None, rowvar=True, bias=False, ddof=None): + + # reject problem if m and / or y are more than 2D + check_dimensions(m, 'm') + check_dimensions(y, 'y') + + # reject problem if ddof invalid (either upfront if type is + # obviously invalid, or later if value found to be non-integral) + if ddof in (None, types.none): + _DDOF_HANDLER = _handle_ddof_nop + else: + if isinstance(ddof, (types.Integer, types.Boolean)): + _DDOF_HANDLER = _handle_ddof_nop + elif isinstance(ddof, types.Float): + _DDOF_HANDLER = _handle_ddof + else: + raise TypingError('ddof must be a real numerical scalar type') + + # special case for 2D array input with 1 row of data - select + # handler function which we'll call later when we have access + # to the shape of the input array + _M_DIM_HANDLER = _handle_m_dim_nop + if isinstance(m, types.Array): + _M_DIM_HANDLER = _handle_m_dim_change + + # infer result dtype + m_dt = determine_dtype(m) + y_dt = determine_dtype(y) + dtype = np.result_type(m_dt, y_dt, np.float64) + + def np_cov_impl(m, y=None, rowvar=True, bias=False, ddof=None): + X = _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER, + _M_DIM_HANDLER).astype(dtype) + + if np.any(np.array(X.shape) == 0): + return np.full((X.shape[0], X.shape[0]), fill_value=np.nan, + dtype=dtype) + else: + return np_cov_impl_inner(X, bias, ddof) + + def np_cov_impl_single_variable(m, y=None, rowvar=True, bias=False, + ddof=None): + X = _prepare_cov_input(m, y, rowvar, ddof, dtype, _DDOF_HANDLER, + _M_DIM_HANDLER).astype(dtype) + + if np.any(np.array(X.shape) == 0): + variance = np.nan + else: + variance = np_cov_impl_inner(X, bias, ddof).flat[0] + + return np.array(variance) + + if scalar_result_expected(m, y): + return np_cov_impl_single_variable + else: + return np_cov_impl + + +@overload(np.corrcoef) +def np_corrcoef(x, y=None, rowvar=True): + + x_dt = determine_dtype(x) + y_dt = determine_dtype(y) + dtype = np.result_type(x_dt, y_dt, np.float64) + + if dtype == np.complex128: + clip_fn = _clip_complex + else: + clip_fn = _clip_corr + + def np_corrcoef_impl(x, y=None, rowvar=True): + c = np.cov(x, y, rowvar) + d = np.diag(c) + stddev = np.sqrt(d.real) + + for i in range(c.shape[0]): + c[i, :] /= stddev + c[:, i] /= stddev + + return clip_fn(c) + + def np_corrcoef_impl_single_variable(x, y=None, rowvar=True): + c = np.cov(x, y, rowvar) + return c / c + + if scalar_result_expected(x, y): + return np_corrcoef_impl_single_variable + else: + return np_corrcoef_impl + + +#---------------------------------------------------------------------------- +# Element-wise computations + + +@overload(np.argwhere) +def np_argwhere(a): + # needs to be much more array-like for the array impl to work, Numba bug + # in one of the underlying function calls? + + use_scalar = isinstance(a, (types.Number, types.Boolean)) + if type_can_asarray(a) and not use_scalar: + def impl(a): + arr = np.asarray(a) + if arr.shape == (): + return np.zeros((0, 1), dtype=types.intp) + return np.transpose(np.vstack(np.nonzero(arr))) + else: + falseish = (0, 0) + trueish = (1, 0) + + def impl(a): + if a is not None and bool(a): + return np.zeros(trueish, dtype=types.intp) + else: + return np.zeros(falseish, dtype=types.intp) + + return impl + + +@overload(np.flatnonzero) +def np_flatnonzero(a): + + if type_can_asarray(a): + def impl(a): + arr = np.asarray(a) + return np.nonzero(np.ravel(arr))[0] + else: + def impl(a): + if a is not None and bool(a): + data = [0] + else: + data = [x for x in range(0)] + return np.array(data, dtype=types.intp) + + return impl + + +@register_jitable +def _fill_diagonal_params(a, wrap): + if a.ndim == 2: + m = a.shape[0] + n = a.shape[1] + step = 1 + n + if wrap: + end = n * m + else: + end = n * min(m, n) + else: + shape = np.array(a.shape) + + if not np.all(np.diff(shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + step = 1 + (np.cumprod(shape[:-1])).sum() + end = shape.prod() + + return end, step + + +@register_jitable +def _fill_diagonal_scalar(a, val, wrap): + end, step = _fill_diagonal_params(a, wrap) + + for i in range(0, end, step): + a.flat[i] = val + + +@register_jitable +def _fill_diagonal(a, val, wrap): + end, step = _fill_diagonal_params(a, wrap) + ctr = 0 + v_len = len(val) + + for i in range(0, end, step): + a.flat[i] = val[ctr] + ctr += 1 + ctr = ctr % v_len + + +@register_jitable +def _check_val_int(a, val): + iinfo = np.iinfo(a.dtype) + v_min = iinfo.min + v_max = iinfo.max + + # check finite values are within bounds + if np.any(~np.isfinite(val)) or np.any(val < v_min) or np.any(val > v_max): + raise ValueError('Unable to safely conform val to a.dtype') + + +@register_jitable +def _check_val_float(a, val): + finfo = np.finfo(a.dtype) + v_min = finfo.min + v_max = finfo.max + + # check finite values are within bounds + finite_vals = val[np.isfinite(val)] + if np.any(finite_vals < v_min) or np.any(finite_vals > v_max): + raise ValueError('Unable to safely conform val to a.dtype') + + +# no check performed, needed for pathway where no check is required +_check_nop = register_jitable(lambda x, y: x) + + +def _asarray(x): + pass + + +@overload(_asarray) +def _asarray_impl(x): + if isinstance(x, types.Array): + return lambda x: x + elif isinstance(x, (types.Sequence, types.Tuple)): + return lambda x: np.array(x) + elif isinstance(x, (types.Number, types.Boolean)): + ty = as_dtype(x) + return lambda x: np.array([x], dtype=ty) + + +@overload(np.fill_diagonal) +def np_fill_diagonal(a, val, wrap=False): + + if a.ndim > 1: + # the following can be simplified after #3088; until then, employ + # a basic mechanism for catching cases where val is of a type/value + # which cannot safely be cast to a.dtype + if isinstance(a.dtype, types.Integer): + checker = _check_val_int + elif isinstance(a.dtype, types.Float): + checker = _check_val_float + else: + checker = _check_nop + + def scalar_impl(a, val, wrap=False): + tmpval = _asarray(val).flatten() + checker(a, tmpval) + _fill_diagonal_scalar(a, val, wrap) + + def non_scalar_impl(a, val, wrap=False): + tmpval = _asarray(val).flatten() + checker(a, tmpval) + _fill_diagonal(a, tmpval, wrap) + + if isinstance(val, (types.Float, types.Integer, types.Boolean)): + return scalar_impl + elif isinstance(val, (types.Tuple, types.Sequence, types.Array)): + return non_scalar_impl + else: + msg = "The first argument must be at least 2-D (found %s-D)" % a.ndim + raise TypingError(msg) + + +def _np_round_intrinsic(tp): + # np.round() always rounds half to even + return "llvm.rint.f%d" % (tp.bitwidth,) + + +@intrinsic +def _np_round_float(typingctx, val): + sig = val(val) + + def codegen(context, builder, sig, args): + [val] = args + tp = sig.args[0] + llty = context.get_value_type(tp) + module = builder.module + fnty = llvmlite.ir.FunctionType(llty, [llty]) + fn = cgutils.get_or_insert_function(module, fnty, + _np_round_intrinsic(tp)) + res = builder.call(fn, (val,)) + return impl_ret_untracked(context, builder, sig.return_type, res) + + return sig, codegen + + +@register_jitable +def round_ndigits(x, ndigits): + if math.isinf(x) or math.isnan(x): + return x + + # NOTE: this is CPython's algorithm, but perhaps this is overkill + # when emulating Numpy's behaviour. + if ndigits >= 0: + if ndigits > 22: + # pow1 and pow2 are each safe from overflow, but + # pow1*pow2 ~= pow(10.0, ndigits) might overflow. + pow1 = 10.0 ** (ndigits - 22) + pow2 = 1e22 + else: + pow1 = 10.0 ** ndigits + pow2 = 1.0 + y = (x * pow1) * pow2 + if math.isinf(y): + return x + return (_np_round_float(y) / pow2) / pow1 + + else: + pow1 = 10.0 ** (-ndigits) + y = x / pow1 + return _np_round_float(y) * pow1 + + +@overload(np.around) +@overload(np.round) +def impl_np_round(a, decimals=0, out=None): + if not type_can_asarray(a): + raise TypingError('The argument "a" must be array-like') + + if not (isinstance(out, types.Array) or is_nonelike(out)): + msg = 'The argument "out" must be an array if it is provided' + raise TypingError(msg) + + if isinstance(a, (types.Float, types.Integer, types.Complex)): + if is_nonelike(out): + if isinstance(a, types.Float): + def impl(a, decimals=0, out=None): + if decimals == 0: + return _np_round_float(a) + else: + return round_ndigits(a, decimals) + return impl + elif isinstance(a, types.Integer): + def impl(a, decimals=0, out=None): + if decimals == 0: + return a + else: + return int(round_ndigits(a, decimals)) + return impl + elif isinstance(a, types.Complex): + def impl(a, decimals=0, out=None): + if decimals == 0: + real = _np_round_float(a.real) + imag = _np_round_float(a.imag) + else: + real = round_ndigits(a.real, decimals) + imag = round_ndigits(a.imag, decimals) + return complex(real, imag) + return impl + else: + def impl(a, decimals=0, out=None): + out[0] = np.round(a, decimals) + return out + return impl + elif isinstance(a, types.Array): + if is_nonelike(out): + def impl(a, decimals=0, out=None): + out = np.empty_like(a) + return np.round(a, decimals, out) + return impl + else: + def impl(a, decimals=0, out=None): + if a.shape != out.shape: + raise ValueError("invalid output shape") + for index, val in np.ndenumerate(a): + out[index] = np.round(val, decimals) + return out + return impl + + +if numpy_version < (2, 0): + overload(np.round_)(impl_np_round) + + +@overload(np.sinc) +def impl_np_sinc(x): + if isinstance(x, types.Number): + def impl(x): + if x == 0.e0: # to match np impl + x = 1e-20 + x *= np.pi # np sinc is the normalised variant + return np.sin(x) / x + return impl + elif isinstance(x, types.Array): + def impl(x): + out = np.zeros_like(x) + for index, val in np.ndenumerate(x): + out[index] = np.sinc(val) + return out + return impl + else: + raise NumbaTypeError('Argument "x" must be a Number or array-like.') + + +@overload(np.angle) +def ov_np_angle(z, deg=False): + deg_mult = float(180 / np.pi) + + # non-complex scalar values are accepted as well + if isinstance(z, types.Number): + def impl(z, deg=False): + if deg: + return np.arctan2(z.imag, z.real) * deg_mult + else: + return np.arctan2(z.imag, z.real) + return impl + elif isinstance(z, types.Array): + dtype = z.dtype + + if isinstance(dtype, types.Complex): + ret_dtype = dtype.underlying_float + elif isinstance(dtype, types.Float): + ret_dtype = dtype + else: + return + + def impl(z, deg=False): + out = np.zeros_like(z, dtype=ret_dtype) + for index, val in np.ndenumerate(z): + out[index] = np.angle(val, deg) + return out + return impl + else: + raise NumbaTypeError('Argument "z" must be a complex ' + f'or Array[complex]. Got {z}') + + +@lower_builtin(np.nonzero, types.Array) +@lower_builtin("array.nonzero", types.Array) +def array_nonzero(context, builder, sig, args): + aryty = sig.args[0] + # Return type is a N-tuple of 1D C-contiguous arrays + retty = sig.return_type + outaryty = retty.dtype + nouts = retty.count + + ary = make_array(aryty)(context, builder, args[0]) + shape = cgutils.unpack_tuple(builder, ary.shape) + strides = cgutils.unpack_tuple(builder, ary.strides) + data = ary.data + layout = aryty.layout + + # First count the number of non-zero elements + zero = context.get_constant(types.intp, 0) + one = context.get_constant(types.intp, 1) + count = cgutils.alloca_once_value(builder, zero) + with cgutils.loop_nest(builder, shape, zero.type) as indices: + ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides, + layout, indices) + val = load_item(context, builder, aryty, ptr) + nz = context.is_true(builder, aryty.dtype, val) + with builder.if_then(nz): + builder.store(builder.add(builder.load(count), one), count) + + # Then allocate output arrays of the right size + out_shape = (builder.load(count),) + outs = [_empty_nd_impl(context, builder, outaryty, out_shape)._getvalue() + for i in range(nouts)] + outarys = [make_array(outaryty)(context, builder, out) for out in outs] + out_datas = [out.data for out in outarys] + + # And fill them up + index = cgutils.alloca_once_value(builder, zero) + with cgutils.loop_nest(builder, shape, zero.type) as indices: + ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides, + layout, indices) + val = load_item(context, builder, aryty, ptr) + nz = context.is_true(builder, aryty.dtype, val) + with builder.if_then(nz): + # Store element indices in output arrays + if not indices: + # For a 0-d array, store 0 in the unique output array + indices = (zero,) + cur = builder.load(index) + for i in range(nouts): + ptr = cgutils.get_item_pointer2(context, builder, out_datas[i], + out_shape, (), + 'C', [cur]) + store_item(context, builder, outaryty, indices[i], ptr) + builder.store(builder.add(cur, one), index) + + tup = context.make_tuple(builder, sig.return_type, outs) + return impl_ret_new_ref(context, builder, sig.return_type, tup) + + +def _where_zero_size_array_impl(dtype): + def impl(condition, x, y): + x_ = np.asarray(x).astype(dtype) + y_ = np.asarray(y).astype(dtype) + return x_ if condition else y_ + return impl + + +@register_jitable +def _where_generic_inner_impl(cond, x, y, res): + for idx, c in np.ndenumerate(cond): + res[idx] = x[idx] if c else y[idx] + return res + + +@register_jitable +def _where_fast_inner_impl(cond, x, y, res): + cf = cond.flat + xf = x.flat + yf = y.flat + rf = res.flat + for i in range(cond.size): + rf[i] = xf[i] if cf[i] else yf[i] + return res + + +def _where_generic_impl(dtype, layout): + use_faster_impl = layout in [{'C'}, {'F'}] + + def impl(condition, x, y): + cond1, x1, y1 = np.asarray(condition), np.asarray(x), np.asarray(y) + shape = np.broadcast_shapes(cond1.shape, x1.shape, y1.shape) + cond_ = np.broadcast_to(cond1, shape) + x_ = np.broadcast_to(x1, shape) + y_ = np.broadcast_to(y1, shape) + + if layout == 'F': + res = np.empty(shape[::-1], dtype=dtype).T + else: + res = np.empty(shape, dtype=dtype) + + if use_faster_impl: + return _where_fast_inner_impl(cond_, x_, y_, res) + else: + return _where_generic_inner_impl(cond_, x_, y_, res) + + return impl + + +@overload(np.where) +def ov_np_where(condition): + if not type_can_asarray(condition): + msg = 'The argument "condition" must be array-like' + raise NumbaTypeError(msg) + + def where_cond_none_none(condition): + return np.asarray(condition).nonzero() + return where_cond_none_none + + +@overload(np.where) +def ov_np_where_x_y(condition, x, y): + if not type_can_asarray(condition): + msg = 'The argument "condition" must be array-like' + raise NumbaTypeError(msg) + + # corner case: None is a valid value for np.where: + # >>> np.where([0, 1], None, 2) + # array([None, 2]) + # + # >>> np.where([0, 1], 2, None) + # array([2, None]) + # + # >>> np.where([0, 1], None, None) + # array([None, None]) + if is_nonelike(x) or is_nonelike(y): + # skip it for now as np.asarray(None) is not supported + raise NumbaTypeError('Argument "x" or "y" cannot be None') + + for arg, name in zip((x, y), ('x', 'y')): + if not type_can_asarray(arg): + msg = 'The argument "{}" must be array-like if provided' + raise NumbaTypeError(msg.format(name)) + + cond_arr = isinstance(condition, types.Array) + x_arr = isinstance(x, types.Array) + y_arr = isinstance(y, types.Array) + + if cond_arr: + x_dt = determine_dtype(x) + y_dt = determine_dtype(y) + dtype = np.promote_types(x_dt, y_dt) + + # corner case - 0 dim values + def check_0_dim(arg): + return isinstance(arg, types.Number) or ( + isinstance(arg, types.Array) and arg.ndim == 0) + special_0_case = all([check_0_dim(a) for a in (condition, x, y)]) + if special_0_case: + return _where_zero_size_array_impl(dtype) + + layout = condition.layout + if x_arr and y_arr: + if x.layout == y.layout == condition.layout: + layout = x.layout + else: + layout = 'A' + return _where_generic_impl(dtype, layout) + else: + def impl(condition, x, y): + return np.where(np.asarray(condition), np.asarray(x), np.asarray(y)) + return impl + + +@overload(np.real) +def np_real(val): + def np_real_impl(val): + return val.real + + return np_real_impl + + +@overload(np.imag) +def np_imag(val): + def np_imag_impl(val): + return val.imag + + return np_imag_impl + + +#---------------------------------------------------------------------------- +# Misc functions + +@overload(operator.contains) +def np_contains(arr, key): + if not isinstance(arr, types.Array): + return + + def np_contains_impl(arr, key): + for x in np.nditer(arr): + if x == key: + return True + return False + + return np_contains_impl + + +@overload(np.count_nonzero) +def np_count_nonzero(a, axis=None): + if not type_can_asarray(a): + raise TypingError("The argument to np.count_nonzero must be array-like") + + if is_nonelike(axis): + def impl(a, axis=None): + arr2 = np.ravel(a) + return np.sum(arr2 != 0) + return impl + else: + def impl(a, axis=None): + arr2 = a.astype(np.bool_) + return np.sum(arr2, axis=axis) + return impl + + +np_delete_handler_isslice = register_jitable(lambda x : x) +np_delete_handler_isarray = register_jitable(lambda x : np.asarray(x)) + + +@overload(np.delete) +def np_delete(arr, obj): + # Implementation based on numpy + # https://github.com/numpy/numpy/blob/af66e487a57bfd4850f4306e3b85d1dac3c70412/numpy/lib/function_base.py#L4065-L4267 # noqa: E501 + + if not isinstance(arr, (types.Array, types.Sequence)): + raise TypingError("arr must be either an Array or a Sequence") + + if isinstance(obj, (types.Array, types.Sequence, types.SliceType)): + if isinstance(obj, (types.SliceType)): + handler = np_delete_handler_isslice + else: + if not isinstance(obj.dtype, types.Integer): + raise TypingError('obj should be of Integer dtype') + handler = np_delete_handler_isarray + + def np_delete_impl(arr, obj): + arr = np.ravel(np.asarray(arr)) + N = arr.size + + keep = np.ones(N, dtype=np.bool_) + obj = handler(obj) + keep[obj] = False + return arr[keep] + return np_delete_impl + + else: # scalar value + if not isinstance(obj, types.Integer): + raise TypingError('obj should be of Integer dtype') + + def np_delete_scalar_impl(arr, obj): + arr = np.ravel(np.asarray(arr)) + N = arr.size + pos = obj + + if (pos < -N or pos >= N): + raise IndexError('obj must be less than the len(arr)') + # NumPy raises IndexError: index 'i' is out of + # bounds for axis 'x' with size 'n' + + if (pos < 0): + pos += N + + return np.concatenate((arr[:pos], arr[pos + 1:])) + return np_delete_scalar_impl + + +@overload(np.diff) +def np_diff_impl(a, n=1): + if not isinstance(a, types.Array) or a.ndim == 0: + return + + def diff_impl(a, n=1): + if n == 0: + return a.copy() + if n < 0: + raise ValueError("diff(): order must be non-negative") + size = a.shape[-1] + out_shape = a.shape[:-1] + (max(size - n, 0),) + out = np.empty(out_shape, a.dtype) + if out.size == 0: + return out + + # np.diff() works on each last dimension subarray independently. + # To make things easier, normalize input and output into 2d arrays + a2 = a.reshape((-1, size)) + out2 = out.reshape((-1, out.shape[-1])) + # A scratchpad for subarrays + work = np.empty(size, a.dtype) + + for major in range(a2.shape[0]): + # First iteration: diff a2 into work + for i in range(size - 1): + work[i] = a2[major, i + 1] - a2[major, i] + # Other iterations: diff work into itself + for niter in range(1, n): + for i in range(size - niter - 1): + work[i] = work[i + 1] - work[i] + # Copy final diff into out2 + out2[major] = work[:size - n] + + return out + + return diff_impl + + +@overload(np.array_equal) +def np_array_equal(a1, a2): + + if not (type_can_asarray(a1) and type_can_asarray(a2)): + raise TypingError('Both arguments to "array_equals" must be array-like') + + accepted = (types.Boolean, types.Number) + if isinstance(a1, accepted) and isinstance(a2, accepted): + # special case + def impl(a1, a2): + return a1 == a2 + else: + def impl(a1, a2): + a = np.asarray(a1) + b = np.asarray(a2) + if a.shape == b.shape: + return np.all(a == b) + return False + + return impl + + +@overload(np.intersect1d) +def jit_np_intersect1d(ar1, ar2, assume_unique=False): + # Not implemented to support return_indices + # https://github.com/numpy/numpy/blob/v1.19.0/numpy/lib + # /arraysetops.py#L347-L441 + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('intersect1d: first two args must be array-like') + if not isinstance(assume_unique, (types.Boolean, bool)): + raise TypingError('intersect1d: ' + 'argument "assume_unique" must be boolean') + + def np_intersects1d_impl(ar1, ar2, assume_unique=False): + ar1 = np.asarray(ar1) + ar2 = np.asarray(ar2) + + if not assume_unique: + ar1 = np.unique(ar1) + ar2 = np.unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + aux.sort() + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + return int1d + return np_intersects1d_impl + + +def validate_1d_array_like(func_name, seq): + if isinstance(seq, types.Array): + if seq.ndim != 1: + raise NumbaTypeError("{0}(): input should have dimension 1" + .format(func_name)) + elif not isinstance(seq, types.Sequence): + raise NumbaTypeError("{0}(): input should be an array or sequence" + .format(func_name)) + + +@overload(np.bincount) +def np_bincount(a, weights=None, minlength=0): + validate_1d_array_like("bincount", a) + + if not isinstance(a.dtype, types.Integer): + return + + check_is_integer(minlength, 'minlength') + + if weights not in (None, types.none): + validate_1d_array_like("bincount", weights) + # weights is promoted to double in C impl + # https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c#L93-L95 # noqa: E501 + out_dtype = np.float64 + + @register_jitable + def validate_inputs(a, weights, minlength): + if len(a) != len(weights): + raise ValueError("bincount(): weights and list don't have " + "the same length") + + @register_jitable + def count_item(out, idx, val, weights): + out[val] += weights[idx] + + else: + out_dtype = types.intp + + @register_jitable + def validate_inputs(a, weights, minlength): + pass + + @register_jitable + def count_item(out, idx, val, weights): + out[val] += 1 + + def bincount_impl(a, weights=None, minlength=0): + validate_inputs(a, weights, minlength) + if minlength < 0: + raise ValueError("'minlength' must not be negative") + + n = len(a) + a_max = a[0] if n > 0 else -1 + for i in range(1, n): + if a[i] < 0: + raise ValueError("bincount(): first argument must be " + "non-negative") + a_max = max(a_max, a[i]) + + out_length = max(a_max + 1, minlength) + out = np.zeros(out_length, out_dtype) + for i in range(n): + count_item(out, i, a[i], weights) + return out + + return bincount_impl + + +less_than_float = register_jitable(lt_floats) +less_than_complex = register_jitable(lt_complex) + + +@register_jitable +def less_than_or_equal_complex(a, b): + if np.isnan(a.real): + if np.isnan(b.real): + if np.isnan(a.imag): + return np.isnan(b.imag) + else: + if np.isnan(b.imag): + return True + else: + return a.imag <= b.imag + else: + return False + + else: + if np.isnan(b.real): + return True + else: + if np.isnan(a.imag): + if np.isnan(b.imag): + return a.real <= b.real + else: + return False + else: + if np.isnan(b.imag): + return True + else: + if a.real < b.real: + return True + elif a.real == b.real: + return a.imag <= b.imag + return False + + +@register_jitable +def _less_than_or_equal(a, b): + if isinstance(a, complex) or isinstance(b, complex): + return less_than_or_equal_complex(a, b) + + elif isinstance(b, float): + if np.isnan(b): + return True + + return a <= b + + +@register_jitable +def _less_than(a, b): + if isinstance(a, complex) or isinstance(b, complex): + return less_than_complex(a, b) + + elif isinstance(b, float): + return less_than_float(a, b) + + return a < b + + +@register_jitable +def _less_then_datetime64(a, b): + # Original numpy code is at: + # https://github.com/numpy/numpy/blob/3dad50936a8dc534a81a545365f69ee9ab162ffe/numpy/_core/src/npysort/npysort_common.h#L334-L346 + if np.isnat(a): + return 0 + + if np.isnat(b): + return 1 + + return a < b + + +@register_jitable +def _less_then_or_equal_datetime64(a, b): + return not _less_then_datetime64(b, a) + + +def _searchsorted(cmp): + # a facsimile of: + # https://github.com/numpy/numpy/blob/4f84d719657eb455a35fcdf9e75b83eb1f97024a/numpy/core/src/npysort/binsearch.cpp#L61 # noqa: E501 + + def impl(a, key_val, min_idx, max_idx): + while min_idx < max_idx: + # to avoid overflow + mid_idx = min_idx + ((max_idx - min_idx) >> 1) + mid_val = a[mid_idx] + if cmp(mid_val, key_val): + min_idx = mid_idx + 1 + else: + max_idx = mid_idx + return min_idx, max_idx + + return impl + + +VALID_SEARCHSORTED_SIDES = frozenset({'left', 'right'}) + + +def make_searchsorted_implementation(np_dtype, side): + assert side in VALID_SEARCHSORTED_SIDES + + if np_dtype.char in 'mM': + # is datetime + lt = _less_then_datetime64 + le = _less_then_or_equal_datetime64 + else: + lt = _less_than + le = _less_than_or_equal + + if side == 'left': + _impl = _searchsorted(lt) + _cmp = lt + else: + _impl = _searchsorted(le) + _cmp = le + + return register_jitable(_impl), register_jitable(_cmp) + + +@overload(np.searchsorted) +def searchsorted(a, v, side='left'): + side_val = getattr(side, 'literal_value', side) + + if side_val not in VALID_SEARCHSORTED_SIDES: + # could change this so that side doesn't need to be + # a compile-time constant + raise NumbaValueError(f"Invalid value given for 'side': {side_val}") + + if isinstance(v, (types.Array, types.Sequence)): + v_dt = as_dtype(v.dtype) + else: + v_dt = as_dtype(v) + + np_dt = np.promote_types(as_dtype(a.dtype), v_dt) + _impl, _cmp = make_searchsorted_implementation(np_dt, side_val) + + if isinstance(v, types.Array): + def impl(a, v, side='left'): + out = np.empty(v.size, dtype=np.intp) + last_key_val = v.flat[0] + min_idx = 0 + max_idx = len(a) + + for i in range(v.size): + key_val = v.flat[i] + + if _cmp(last_key_val, key_val): + max_idx = len(a) + else: + min_idx = 0 + if max_idx < len(a): + max_idx += 1 + else: + max_idx = len(a) + + last_key_val = key_val + min_idx, max_idx = _impl(a, key_val, min_idx, max_idx) + out[i] = min_idx + + return out.reshape(v.shape) + elif isinstance(v, types.Sequence): + def impl(a, v, side='left'): + v = np.asarray(v) + return np.searchsorted(a, v, side=side) + else: # presumably `v` is scalar + def impl(a, v, side='left'): + r, _ = _impl(a, v, 0, len(a)) + return r + return impl + + +@overload(np.digitize) +def np_digitize(x, bins, right=False): + + if isinstance(x, types.Array) and x.dtype in types.complex_domain: + raise TypingError('x may not be complex') + + @register_jitable + def _monotonicity(bins): + + # all bin edges hold the same value + if len(bins) == 0: + return 1 + + # Skip repeated values at the beginning of the array + last_value = bins[0] + i = 1 + while i < len(bins) and bins[i] == last_value: + i += 1 + + # all bin edges hold the same value + if i == len(bins): + return 1 + + next_value = bins[i] + + if last_value < next_value: + # Possibly monotonic increasing + for i in range(i + 1, len(bins)): + last_value = next_value + next_value = bins[i] + if last_value > next_value: + return 0 + return 1 + + else: + # last > next, possibly monotonic decreasing + for i in range(i + 1, len(bins)): + last_value = next_value + next_value = bins[i] + if last_value < next_value: + return 0 + return -1 + + def digitize_impl(x, bins, right=False): + + mono = _monotonicity(bins) + + if mono == 0: + raise ValueError( + "bins must be monotonically increasing or decreasing" + ) + + # this is backwards because the arguments below are swapped + if right: + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - np.searchsorted(bins[::-1], x, side='left') + else: + return np.searchsorted(bins, x, side='left') + else: + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - np.searchsorted(bins[::-1], x, side='right') + else: + return np.searchsorted(bins, x, side='right') + + return digitize_impl + + +_range = range + + +@overload(np.histogram) +def np_histogram(a, bins=10, range=None): + if isinstance(bins, (int, types.Integer)): + # With a uniform distribution of bins, use a fast algorithm + # independent of the number of bins + + if range in (None, types.none): + inf = float('inf') + + def histogram_impl(a, bins=10, range=None): + bin_min = inf + bin_max = -inf + for view in np.nditer(a): + v = view.item() + if bin_min > v: + bin_min = v + if bin_max < v: + bin_max = v + return np.histogram(a, bins, (bin_min, bin_max)) + + else: + def histogram_impl(a, bins=10, range=None): + if bins <= 0: + raise ValueError("histogram(): `bins` should be a " + "positive integer") + bin_min, bin_max = range + if not bin_min <= bin_max: + raise ValueError("histogram(): max must be larger than " + "min in range parameter") + + hist = np.zeros(bins, np.intp) + if bin_max > bin_min: + bin_ratio = bins / (bin_max - bin_min) + for view in np.nditer(a): + v = view.item() + b = math.floor((v - bin_min) * bin_ratio) + if 0 <= b < bins: + hist[int(b)] += 1 + elif v == bin_max: + hist[bins - 1] += 1 + + bins_array = np.linspace(bin_min, bin_max, bins + 1) + return hist, bins_array + + else: + # With a custom bins array, use a bisection search + + def histogram_impl(a, bins=10, range=None): + nbins = len(bins) - 1 + for i in _range(nbins): + # Note this also catches NaNs + if not bins[i] <= bins[i + 1]: + raise ValueError("histogram(): bins must increase " + "monotonically") + + bin_min = bins[0] + bin_max = bins[nbins] + hist = np.zeros(nbins, np.intp) + + if nbins > 0: + for view in np.nditer(a): + v = view.item() + if not bin_min <= v <= bin_max: + # Value is out of bounds, ignore (also catches NaNs) + continue + # Bisect in bins[:-1] + lo = 0 + hi = nbins - 1 + while lo < hi: + # Note the `+ 1` is necessary to avoid an infinite + # loop where mid = lo => lo = mid + mid = (lo + hi + 1) >> 1 + if v < bins[mid]: + hi = mid - 1 + else: + lo = mid + hist[lo] += 1 + + return hist, bins + + return histogram_impl + + +# Create np.finfo, np.iinfo and np.MachAr +# machar +_mach_ar_supported = ('ibeta', 'it', 'machep', 'eps', 'negep', 'epsneg', + 'iexp', 'minexp', 'xmin', 'maxexp', 'xmax', 'irnd', + 'ngrd', 'epsilon', 'tiny', 'huge', 'precision', + 'resolution',) +MachAr = namedtuple('MachAr', _mach_ar_supported) + +# Do not support MachAr field +# finfo +_finfo_supported = ('eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'min', + 'minexp', 'negep', 'nexp', 'nmant', 'precision', + 'resolution', 'tiny', 'bits',) + + +finfo = namedtuple('finfo', _finfo_supported) + +# iinfo +_iinfo_supported = ('min', 'max', 'bits',) + +iinfo = namedtuple('iinfo', _iinfo_supported) + + +def generate_xinfo_body(arg, np_func, container, attr): + nbty = getattr(arg, 'dtype', arg) + np_dtype = as_dtype(nbty) + try: + f = np_func(np_dtype) + except ValueError: # This exception instance comes from NumPy + # The np function might not support the dtype + return None + data = tuple([getattr(f, x) for x in attr]) + + @register_jitable + def impl(arg): + return container(*data) + return impl + + +@overload(np.finfo) +def ol_np_finfo(dtype): + fn = generate_xinfo_body(dtype, np.finfo, finfo, _finfo_supported) + + def impl(dtype): + return fn(dtype) + return impl + + +@overload(np.iinfo) +def ol_np_iinfo(int_type): + fn = generate_xinfo_body(int_type, np.iinfo, iinfo, _iinfo_supported) + + def impl(int_type): + return fn(int_type) + return impl + + +def _get_inner_prod(dta, dtb): + # gets an inner product implementation, if both types are float then + # BLAS is used else a local function + + @register_jitable + def _innerprod(a, b): + acc = 0 + for i in range(len(a)): + acc = acc + a[i] * b[i] + return acc + + # no BLAS... use local function regardless + if not _HAVE_BLAS: + return _innerprod + + flty = types.real_domain | types.complex_domain + floats = dta in flty and dtb in flty + if not floats: + return _innerprod + else: + a_dt = as_dtype(dta) + b_dt = as_dtype(dtb) + dt = np.promote_types(a_dt, b_dt) + + @register_jitable + def _dot_wrap(a, b): + return np.dot(a.astype(dt), b.astype(dt)) + return _dot_wrap + + +def _assert_1d(a, func_name): + if isinstance(a, types.Array): + if not a.ndim <= 1: + raise TypingError("%s() only supported on 1D arrays " % func_name) + + +def _np_correlate_core(ap1, ap2, mode, direction): + pass + + +@overload(_np_correlate_core) +def _np_correlate_core_impl(ap1, ap2, mode, direction): + a_dt = as_dtype(ap1.dtype) + b_dt = as_dtype(ap2.dtype) + dt = np.promote_types(a_dt, b_dt) + innerprod = _get_inner_prod(ap1.dtype, ap2.dtype) + + def impl(ap1, ap2, mode, direction): + # Implementation loosely based on `_pyarray_correlate` from + # https://github.com/numpy/numpy/blob/3bce2be74f228684ca2895ad02b63953f37e2a9d/numpy/core/src/multiarray/multiarraymodule.c#L1191 # noqa: E501 + # For "mode": + # Convolve uses 'full' by default. + # Correlate uses 'valid' by default. + # For "direction", +1 to write the return values out in order 0->N + # -1 to write them out N->0. + + n1 = len(ap1) + n2 = len(ap2) + + if n1 < n2: + # This should never occur when called by np.convolve because + # _np_correlate.impl swaps arguments based on length. + # The same applies for np.correlate. + raise ValueError("'len(ap1)' must greater than 'len(ap2)'") + + length = n1 + n = n2 + if mode == "valid": + length = length - n + 1 + n_left = 0 + n_right = 0 + elif mode == "full": + n_right = n - 1 + n_left = n - 1 + length = length + n - 1 + elif mode == "same": + n_left = n // 2 + n_right = n - n_left - 1 + else: + raise ValueError( + "Invalid 'mode', " + "valid are 'full', 'same', 'valid'" + ) + + ret = np.zeros(length, dt) + + if direction == 1: + idx = 0 + inc = 1 + elif direction == -1: + idx = length - 1 + inc = -1 + else: + raise ValueError("Invalid direction") + + for i in range(n_left): + k = i + n - n_left + ret[idx] = innerprod(ap1[:k], ap2[-k:]) + idx = idx + inc + + for i in range(n1 - n2 + 1): + ret[idx] = innerprod(ap1[i : i + n2], ap2) + idx = idx + inc + + for i in range(n_right): + k = n - i - 1 + ret[idx] = innerprod(ap1[-k:], ap2[:k]) + idx = idx + inc + + return ret + + return impl + + +@overload(np.correlate) +def _np_correlate(a, v, mode="valid"): + _assert_1d(a, 'np.correlate') + _assert_1d(v, 'np.correlate') + + @register_jitable + def op_conj(x): + return np.conj(x) + + @register_jitable + def op_nop(x): + return x + + if a.dtype in types.complex_domain: + if v.dtype in types.complex_domain: + a_op = op_nop + b_op = op_conj + else: + a_op = op_nop + b_op = op_nop + else: + if v.dtype in types.complex_domain: + a_op = op_nop + b_op = op_conj + else: + a_op = op_conj + b_op = op_nop + + def impl(a, v, mode="valid"): + la = len(a) + lv = len(v) + + if la == 0: + raise ValueError("'a' cannot be empty") + if lv == 0: + raise ValueError("'v' cannot be empty") + + if la < lv: + return _np_correlate_core(b_op(v), a_op(a), mode, -1) + else: + return _np_correlate_core(a_op(a), b_op(v), mode, 1) + + return impl + + +@overload(np.convolve) +def np_convolve(a, v, mode="full"): + _assert_1d(a, 'np.convolve') + _assert_1d(v, 'np.convolve') + + def impl(a, v, mode="full"): + la = len(a) + lv = len(v) + + if la == 0: + raise ValueError("'a' cannot be empty") + if lv == 0: + raise ValueError("'v' cannot be empty") + + if la < lv: + return _np_correlate_core(v, a[::-1], mode, 1) + else: + return _np_correlate_core(a, v[::-1], mode, 1) + + return impl + + +@overload(np.asarray) +def np_asarray(a, dtype=None): + + # developer note... keep this function (type_can_asarray) in sync with the + # accepted types implementations below! + if not type_can_asarray(a): + return None + + if isinstance(a, types.Array): + if is_nonelike(dtype) or a.dtype == dtype.dtype: + def impl(a, dtype=None): + return a + else: + def impl(a, dtype=None): + return a.astype(dtype) + elif isinstance(a, (types.Sequence, types.Tuple)): + # Nested lists cannot be unpacked, therefore only single lists are + # permitted and these conform to Sequence and can be unpacked along on + # the same path as Tuple. + if is_nonelike(dtype): + def impl(a, dtype=None): + return np.array(a) + else: + def impl(a, dtype=None): + return np.array(a, dtype) + elif isinstance(a, (types.Number, types.Boolean)): + dt_conv = a if is_nonelike(dtype) else dtype + ty = as_dtype(dt_conv) + + def impl(a, dtype=None): + return np.array(a, ty) + elif isinstance(a, types.containers.ListType): + if not isinstance(a.dtype, (types.Number, types.Boolean)): + raise TypingError( + "asarray support for List is limited " + "to Boolean and Number types") + + target_dtype = a.dtype if is_nonelike(dtype) else dtype + + def impl(a, dtype=None): + l = len(a) + ret = np.empty(l, dtype=target_dtype) + for i, v in enumerate(a): + ret[i] = v + return ret + elif isinstance(a, types.StringLiteral): + arr = np.asarray(a.literal_value) + + def impl(a, dtype=None): + return arr.copy() + else: + impl = None + + return impl + + +if numpy_version < (2, 0): + @overload(np.asfarray) + def np_asfarray(a, dtype=np.float64): + # convert numba dtype types into NumPy dtype + if isinstance(dtype, types.Type): + dtype = as_dtype(dtype) + if not np.issubdtype(dtype, np.inexact): + dx = types.float64 + else: + dx = dtype + + def impl(a, dtype=np.float64): + return np.asarray(a, dx) + return impl + + +@overload(np.extract) +def np_extract(condition, arr): + + def np_extract_impl(condition, arr): + cond = np.asarray(condition).flatten() + a = np.asarray(arr) + + if a.size == 0: + raise ValueError('Cannot extract from an empty array') + + # the following looks odd but replicates NumPy... + # https://github.com/numpy/numpy/issues/12859 + if np.any(cond[a.size:]) and cond.size > a.size: + msg = 'condition shape inconsistent with arr shape' + raise ValueError(msg) + # NumPy raises IndexError: index 'm' is out of + # bounds for size 'n' + + max_len = min(a.size, cond.size) + out = [a.flat[idx] for idx in range(max_len) if cond[idx]] + + return np.array(out) + + return np_extract_impl + + +@overload(np.select) +def np_select(condlist, choicelist, default=0): + + def np_select_arr_impl(condlist, choicelist, default=0): + if len(condlist) != len(choicelist): + raise ValueError('list of cases must be same length as list ' + 'of conditions') + out = default * np.ones(choicelist[0].shape, choicelist[0].dtype) + # should use reversed+zip, but reversed is not available + for i in range(len(condlist) - 1, -1, -1): + cond = condlist[i] + choice = choicelist[i] + out = np.where(cond, choice, out) + return out + + # first we check the types of the input parameters + if not isinstance(condlist, (types.List, types.UniTuple)): + raise NumbaTypeError('condlist must be a List or a Tuple') + if not isinstance(choicelist, (types.List, types.UniTuple)): + raise NumbaTypeError('choicelist must be a List or a Tuple') + if not isinstance(default, (int, types.Number, types.Boolean)): + raise NumbaTypeError('default must be a scalar (number or boolean)') + # the types of the parameters have been checked, now we test the types + # of the content of the parameters + # implementation note: if in the future numba's np.where accepts tuples + # as elements of condlist, then the check below should be extended to + # accept tuples + if not isinstance(condlist[0], types.Array): + raise NumbaTypeError('items of condlist must be arrays') + if not isinstance(choicelist[0], types.Array): + raise NumbaTypeError('items of choicelist must be arrays') + # the types of the parameters and their contents have been checked, + # now we test the dtypes of the content of parameters + if isinstance(condlist[0], types.Array): + if not isinstance(condlist[0].dtype, types.Boolean): + raise NumbaTypeError('condlist arrays must contain booleans') + if isinstance(condlist[0], types.UniTuple): + if not (isinstance(condlist[0], types.UniTuple) + and isinstance(condlist[0][0], types.Boolean)): + raise NumbaTypeError('condlist tuples must only contain booleans') + # the input types are correct, now we perform checks on the dimensions + if (isinstance(condlist[0], types.Array) and + condlist[0].ndim != choicelist[0].ndim): + raise NumbaTypeError('condlist and choicelist elements must have the ' + 'same number of dimensions') + if isinstance(condlist[0], types.Array) and condlist[0].ndim < 1: + raise NumbaTypeError('condlist arrays must be of at least dimension 1') + + return np_select_arr_impl + + +@overload(np.union1d) +def np_union1d(ar1, ar2): + if not type_can_asarray(ar1) or not type_can_asarray(ar2): + raise TypingError("The arguments to np.union1d must be array-like") + if (('unichr' in ar1.dtype.name or 'unichr' in ar2.dtype.name) and + ar1.dtype.name != ar2.dtype.name): + raise TypingError("For Unicode arrays, arrays must have same dtype") + + def union_impl(ar1, ar2): + a = np.ravel(np.asarray(ar1)) + b = np.ravel(np.asarray(ar2)) + return np.unique(np.concatenate((a, b))) + + return union_impl + + +@overload(np.asarray_chkfinite) +def np_asarray_chkfinite(a, dtype=None): + + msg = "The argument to np.asarray_chkfinite must be array-like" + if not isinstance(a, (types.Array, types.Sequence, types.Tuple)): + raise TypingError(msg) + + if is_nonelike(dtype): + dt = a.dtype + else: + try: + dt = as_dtype(dtype) + except NumbaNotImplementedError: + raise TypingError('dtype must be a valid Numpy dtype') + + def impl(a, dtype=None): + a = np.asarray(a, dtype=dt) + for i in np.nditer(a): + if not np.isfinite(i): + raise ValueError("array must not contain infs or NaNs") + return a + + return impl + + +@overload(np.unwrap) +def numpy_unwrap(p, discont=None, axis=-1, period=6.283185307179586): + if not isinstance(axis, (int, types.Integer)): + msg = 'The argument "axis" must be an integer' + raise TypingError(msg) + + if not type_can_asarray(p): + msg = 'The argument "p" must be array-like' + raise TypingError(msg) + + if (not isinstance(discont, (types.Integer, types.Float)) + and not cgutils.is_nonelike(discont)): + msg = 'The argument "discont" must be a scalar' + raise TypingError(msg) + + if not isinstance(period, (float, types.Number)): + msg = 'The argument "period" must be a scalar' + raise TypingError(msg) + + slice1 = (slice(1, None, None),) + if isinstance(period, types.Number): + dtype = np.result_type(as_dtype(p.dtype), as_dtype(period)) + else: + dtype = np.result_type(as_dtype(p.dtype), np.float64) + + integer_input = np.issubdtype(dtype, np.integer) + + def impl(p, discont=None, axis=-1, period=6.283185307179586): + if axis != -1: + msg = 'Value for argument "axis" is not supported' + raise ValueError(msg) + # Flatten to a 2D array, keeping axis -1 + p_init = np.asarray(p).astype(dtype) + init_shape = p_init.shape + last_axis = init_shape[-1] + p_new = p_init.reshape((p_init.size // last_axis, last_axis)) + # Manipulate discont and period + if discont is None: + discont = period / 2 + if integer_input: + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + + # Work on each row separately + for i in range(p_init.size // last_axis): + row = p_new[i] + dd = np.diff(row) + ddmod = np.mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + ddmod = np.where((ddmod == interval_low) & (dd > 0), + interval_high, ddmod) + ph_correct = ddmod - dd + + ph_correct = np.where(np.array([abs(x) for x in dd]) < discont, 0, + ph_correct) + ph_ravel = np.where(np.array([abs(x) for x in dd]) < discont, 0, + ph_correct) + ph_correct = np.reshape(ph_ravel, ph_correct.shape) + up = np.copy(row) + up[slice1] = row[slice1] + ph_correct.cumsum() + p_new[i] = up + + return p_new.reshape(init_shape) + + return impl + +#---------------------------------------------------------------------------- +# Windowing functions +# - translated from the numpy implementations found in: +# https://github.com/numpy/numpy/blob/v1.16.1/numpy/lib/function_base.py#L2543-L3233 # noqa: E501 +# at commit: f1c4c758e1c24881560dd8ab1e64ae750 +# - and also, for NumPy >= 1.20, translated from implementations in +# https://github.com/numpy/numpy/blob/156cd054e007b05d4ac4829e10a369d19dd2b0b1/numpy/lib/function_base.py#L2655-L3065 # noqa: E501 + + +@register_jitable +def np_bartlett_impl(M): + n = np.arange(1. - M, M, 2) + return np.where(np.less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1)) + + +@register_jitable +def np_blackman_impl(M): + n = np.arange(1. - M, M, 2) + return (0.42 + 0.5 * np.cos(np.pi * n / (M - 1)) + + 0.08 * np.cos(2.0 * np.pi * n / (M - 1))) + + +@register_jitable +def np_hamming_impl(M): + n = np.arange(1 - M, M, 2) + return 0.54 + 0.46 * np.cos(np.pi * n / (M - 1)) + + +@register_jitable +def np_hanning_impl(M): + n = np.arange(1 - M, M, 2) + return 0.5 + 0.5 * np.cos(np.pi * n / (M - 1)) + + +def window_generator(func): + def window_overload(M): + if not isinstance(M, types.Integer): + raise TypingError('M must be an integer') + + def window_impl(M): + + if M < 1: + return np.array((), dtype=np.float64) + if M == 1: + return np.ones(1, dtype=np.float64) + return func(M) + + return window_impl + return window_overload + + +overload(np.bartlett)(window_generator(np_bartlett_impl)) +overload(np.blackman)(window_generator(np_blackman_impl)) +overload(np.hamming)(window_generator(np_hamming_impl)) +overload(np.hanning)(window_generator(np_hanning_impl)) + + +_i0A = np.array([ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1, +]) + +_i0B = np.array([ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1, +]) + + +@register_jitable +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x * b1 - b2 + vals[i] + + return 0.5 * (b0 - b2) + + +@register_jitable +def _i0(x): + if x < 0: + x = -x + if x <= 8.0: + y = (0.5 * x) - 2.0 + return np.exp(x) * _chbevl(y, _i0A) + + return np.exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / np.sqrt(x) + + +@register_jitable +def _i0n(n, alpha, beta): + y = np.empty_like(n, dtype=np.float64) + t = _i0(np.float64(beta)) + for i in range(len(y)): + y[i] = _i0(beta * np.sqrt(1 - ((n[i] - alpha) / alpha)**2.0)) / t + + return y + + +@overload(np.kaiser) +def np_kaiser(M, beta): + if not isinstance(M, types.Integer): + raise TypingError('M must be an integer') + + if not isinstance(beta, (types.Integer, types.Float)): + raise TypingError('beta must be an integer or float') + + def np_kaiser_impl(M, beta): + if M < 1: + return np.array((), dtype=np.float64) + if M == 1: + return np.ones(1, dtype=np.float64) + + n = np.arange(0, M) + alpha = (M - 1) / 2.0 + + return _i0n(n, alpha, beta) + + return np_kaiser_impl + + +@register_jitable +def _cross_operation(a, b, out): + + def _cross_preprocessing(x): + x0 = x[..., 0] + x1 = x[..., 1] + if x.shape[-1] == 3: + x2 = x[..., 2] + else: + x2 = np.multiply(x.dtype.type(0), x0) + return x0, x1, x2 + + a0, a1, a2 = _cross_preprocessing(a) + b0, b1, b2 = _cross_preprocessing(b) + + cp0 = np.multiply(a1, b2) - np.multiply(a2, b1) + cp1 = np.multiply(a2, b0) - np.multiply(a0, b2) + cp2 = np.multiply(a0, b1) - np.multiply(a1, b0) + + out[..., 0] = cp0 + out[..., 1] = cp1 + out[..., 2] = cp2 + + +def _cross(a, b): + pass + + +@overload(_cross) +def _cross_impl(a, b): + dtype = np.promote_types(as_dtype(a.dtype), as_dtype(b.dtype)) + if a.ndim == 1 and b.ndim == 1: + def impl(a, b): + cp = np.empty((3,), dtype) + _cross_operation(a, b, cp) + return cp + else: + def impl(a, b): + shape = np.add(a[..., 0], b[..., 0]).shape + cp = np.empty(shape + (3,), dtype) + _cross_operation(a, b, cp) + return cp + return impl + + +@overload(np.cross) +def np_cross(a, b): + if not type_can_asarray(a) or not type_can_asarray(b): + raise TypingError("Inputs must be array-like.") + + def impl(a, b): + a_ = np.asarray(a) + b_ = np.asarray(b) + if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3): + raise ValueError(( + "Incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)" + )) + + if a_.shape[-1] == 3 or b_.shape[-1] == 3: + return _cross(a_, b_) + else: + raise ValueError(( + "Dimensions for both inputs is 2.\n" + "Please replace your numpy.cross(a, b) call with " + "a call to `cross2d(a, b)` from `numba.np.extensions`." + )) + return impl + + +@register_jitable +def _cross2d_operation(a, b): + + def _cross_preprocessing(x): + x0 = x[..., 0] + x1 = x[..., 1] + return x0, x1 + + a0, a1 = _cross_preprocessing(a) + b0, b1 = _cross_preprocessing(b) + + cp = np.multiply(a0, b1) - np.multiply(a1, b0) + # If ndim of a and b is 1, cp is a scalar. + # In this case np.cross returns a 0-D array, containing the scalar. + # np.asarray is used to reconcile this case, without introducing + # overhead in the case where cp is an actual N-D array. + # (recall that np.asarray does not copy existing arrays) + return np.asarray(cp) + + +def cross2d(a, b): + pass + + +@overload(cross2d) +def cross2d_impl(a, b): + if not type_can_asarray(a) or not type_can_asarray(b): + raise TypingError("Inputs must be array-like.") + + def impl(a, b): + a_ = np.asarray(a) + b_ = np.asarray(b) + if a_.shape[-1] != 2 or b_.shape[-1] != 2: + raise ValueError(( + "Incompatible dimensions for 2D cross product\n" + "(dimension must be 2 for both inputs)" + )) + return _cross2d_operation(a_, b_) + + return impl + + +@overload(np.trim_zeros) +def np_trim_zeros(filt, trim='fb'): + if not isinstance(filt, types.Array): + raise NumbaTypeError('The first argument must be an array') + + if filt.ndim > 1: + raise NumbaTypeError('array must be 1D') + + if not isinstance(trim, (str, types.UnicodeType)): + raise NumbaTypeError('The second argument must be a string') + + trim_escapes = numpy_version >= (2, 2) + + def impl(filt, trim='fb'): + a_ = np.asarray(filt) + first = 0 + trim = trim.lower() + if 'f' in trim: + for i in a_: + if i == 0 or (trim_escapes and i == ''): + first = first + 1 + else: + break + last = len(filt) + if 'b' in trim: + for i in a_[::-1]: + if i == 0 or (trim_escapes and i == ''): + last = last - 1 + else: + break + return a_[first:last] + + return impl + + +@overload(np.setxor1d) +def jit_np_setxor1d(ar1, ar2, assume_unique=False): + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('setxor1d: first two args must be array-like') + if not (isinstance(assume_unique, (types.Boolean, bool))): + raise TypingError('setxor1d: Argument "assume_unique" must be boolean') + + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L477 # noqa: E501 + def np_setxor1d_impl(ar1, ar2, assume_unique=False): + a = np.asarray(ar1) + b = np.asarray(ar2) + + if not assume_unique: + a = np.unique(a) + b = np.unique(b) + else: + a = a.ravel() + b = b.ravel() + + # Implementation very similar to np_intersect1d_impl: + # We want union minus the intersect + aux = np.concatenate((a, b)) + aux.sort() + + flag = np.empty(aux.shape[0] + 1, dtype=np.bool_) + flag[0] = True + flag[-1] = True + flag[1:-1] = aux[1:] != aux[:-1] + return aux[flag[1:] & flag[:-1]] + + return np_setxor1d_impl + + +@overload(np.setdiff1d) +def jit_np_setdiff1d(ar1, ar2, assume_unique=False): + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('setdiff1d: first two args must be array-like') + if not (isinstance(assume_unique, (types.Boolean, bool))): + raise TypingError('setdiff1d: Argument "assume_unique" must be boolean') + + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L940 # noqa: E501 + def np_setdiff1d_impl(ar1, ar2, assume_unique=False): + ar1 = np.asarray(ar1) + ar2 = np.asarray(ar2) + if assume_unique: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + else: + ar1 = np.unique(ar1) + ar2 = np.unique(ar2) + return ar1[np.in1d(ar1, ar2, assume_unique=True, invert=True)] + + return np_setdiff1d_impl + + +@overload(np.in1d) +def jit_np_in1d(ar1, ar2, assume_unique=False, invert=False): + if not (type_can_asarray(ar1) or type_can_asarray(ar2)): + raise TypingError('in1d: first two args must be array-like') + if not isinstance(assume_unique, (types.Boolean, bool)): + raise TypingError('in1d: Argument "assume_unique" must be boolean') + if not isinstance(invert, (types.Boolean, bool)): + raise TypingError('in1d: Argument "invert" must be boolean') + + def np_in1d_impl(ar1, ar2, assume_unique=False, invert=False): + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L525 # noqa: E501 + + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # This code is run when it would make the code significantly faster + # Sorting is also not guaranteed to work on objects but numba does + # not support object arrays. + if len(ar2) < 10 * len(ar1) ** 0.145: + if invert: + mask = np.ones(len(ar1), dtype=np.bool_) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=np.bool_) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + # Equivalent to ar1, inv_idx = np.unique(ar1, return_inverse=True) + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L358C8-L358C8 # noqa: E501 + order1 = np.argsort(ar1) + aux = ar1[order1] + mask = np.empty(aux.shape, dtype=np.bool_) + mask[:1] = True + mask[1:] = aux[1:] != aux[:-1] + ar1 = aux[mask] + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[order1] = imask + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + flag = np.empty(sar.size, np.bool_) + if invert: + flag[:-1] = (sar[1:] != sar[:-1]) + else: + flag[:-1] = (sar[1:] == sar[:-1]) + flag[-1:] = invert + ret = np.empty(ar.shape, dtype=np.bool_) + ret[order] = flag + + # return ret[:len(ar1)] + if assume_unique: + return ret[:len(ar1)] + else: + return ret[inv_idx] + + return np_in1d_impl + + +@overload(np.isin) +def jit_np_isin(element, test_elements, assume_unique=False, invert=False): + if not (type_can_asarray(element) or type_can_asarray(test_elements)): + raise TypingError('isin: first two args must be array-like') + if not (isinstance(assume_unique, (types.Boolean, bool))): + raise TypingError('isin: Argument "assume_unique" must be boolean') + if not (isinstance(invert, (types.Boolean, bool))): + raise TypingError('isin: Argument "invert" must be boolean') + + # https://github.com/numpy/numpy/blob/03b62604eead0f7d279a5a4c094743eb29647368/numpy/lib/arraysetops.py#L889 # noqa: E501 + def np_isin_impl(element, test_elements, assume_unique=False, invert=False): + + element = np.asarray(element) + return np.in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + return np_isin_impl diff --git a/venv/lib/python3.10/site-packages/numba/np/polynomial/__init__.py b/venv/lib/python3.10/site-packages/numba/np/polynomial/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce62018536c020bf94ccfe26fd7adf016454a648 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_core.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cd7ab32e68e4adf912c2a452cccb9d32729fdf8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bae5d2648b737c9bd9f5cb9aa70b4ab298da845e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/polynomial/__pycache__/polynomial_functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/polynomial/polynomial_core.py b/venv/lib/python3.10/site-packages/numba/np/polynomial/polynomial_core.py new file mode 100644 index 0000000000000000000000000000000000000000..16448fb6c2d348e876144e5f0339e16b35283e3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/polynomial/polynomial_core.py @@ -0,0 +1,223 @@ +from numba.extending import (models, register_model, type_callable, + unbox, NativeValue, make_attribute_wrapper, box, + lower_builtin) +from numba.core import types, cgutils +import warnings +from numba.core.errors import NumbaExperimentalFeatureWarning, NumbaValueError +from numpy.polynomial.polynomial import Polynomial +from contextlib import ExitStack +import numpy as np +from llvmlite import ir + + +@register_model(types.PolynomialType) +class PolynomialModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('coef', fe_type.coef), + ('domain', fe_type.domain), + ('window', fe_type.window) + # Introduced in NumPy 1.24, maybe leave it out for now + # ('symbol', types.string) + ] + super(PolynomialModel, self).__init__(dmm, fe_type, members) + + +@type_callable(Polynomial) +def type_polynomial(context): + def typer(coef, domain=None, window=None): + default_domain = types.Array(types.int64, 1, 'C') + double_domain = types.Array(types.double, 1, 'C') + default_window = types.Array(types.int64, 1, 'C') + double_window = types.Array(types.double, 1, 'C') + double_coef = types.Array(types.double, 1, 'C') + + warnings.warn("Polynomial class is experimental", + category=NumbaExperimentalFeatureWarning) + + if isinstance(coef, types.Array) and \ + all([a is None for a in (domain, window)]): + if coef.ndim == 1: + # If Polynomial(coef) is called, coef is cast to double dtype, + # and domain and window are set to equal [-1, 1], i.e. have + # integer dtype + return types.PolynomialType(double_coef, + default_domain, + default_window, + 1) + else: + msg = 'Coefficient array is not 1-d' + raise NumbaValueError(msg) + elif all([isinstance(a, types.Array) for a in (coef, domain, window)]): + if coef.ndim == 1: + if all([a.ndim == 1 for a in (domain, window)]): + # If Polynomial(coef, domain, window) is called, then coef, + # domain and window are cast to double dtype + return types.PolynomialType(double_coef, + double_domain, + double_window, + 3) + else: + msg = 'Coefficient array is not 1-d' + raise NumbaValueError(msg) + return typer + + +make_attribute_wrapper(types.PolynomialType, 'coef', 'coef') +make_attribute_wrapper(types.PolynomialType, 'domain', 'domain') +make_attribute_wrapper(types.PolynomialType, 'window', 'window') +# Introduced in NumPy 1.24, maybe leave it out for now +# make_attribute_wrapper(types.PolynomialType, 'symbol', 'symbol') + + +@lower_builtin(Polynomial, types.Array) +def impl_polynomial1(context, builder, sig, args): + + def to_double(arr): + return np.asarray(arr, dtype=np.double) + + def const_impl(): + return np.asarray([-1, 1]) + + typ = sig.return_type + polynomial = cgutils.create_struct_proxy(typ)(context, builder) + sig_coef = sig.args[0].copy(dtype=types.double)(sig.args[0]) + coef_cast = context.compile_internal(builder, to_double, sig_coef, args) + sig_domain = sig.args[0].copy(dtype=types.intp)() + sig_window = sig.args[0].copy(dtype=types.intp)() + domain_cast = context.compile_internal(builder, const_impl, sig_domain, ()) + window_cast = context.compile_internal(builder, const_impl, sig_window, ()) + polynomial.coef = coef_cast + polynomial.domain = domain_cast + polynomial.window = window_cast + + return polynomial._getvalue() + + +@lower_builtin(Polynomial, types.Array, types.Array, types.Array) +def impl_polynomial3(context, builder, sig, args): + + def to_double(coef): + return np.asarray(coef, dtype=np.double) + + typ = sig.return_type + polynomial = cgutils.create_struct_proxy(typ)(context, builder) + + coef_sig = sig.args[0].copy(dtype=types.double)(sig.args[0]) + domain_sig = sig.args[1].copy(dtype=types.double)(sig.args[1]) + window_sig = sig.args[2].copy(dtype=types.double)(sig.args[2]) + coef_cast = context.compile_internal(builder, + to_double, coef_sig, + (args[0],)) + domain_cast = context.compile_internal(builder, + to_double, domain_sig, + (args[1],)) + window_cast = context.compile_internal(builder, + to_double, window_sig, + (args[2],)) + + domain_helper = context.make_helper(builder, + domain_sig.return_type, + value=domain_cast) + window_helper = context.make_helper(builder, + window_sig.return_type, + value=window_cast) + + i64 = ir.IntType(64) + two = i64(2) + + s1 = builder.extract_value(domain_helper.shape, 0) + s2 = builder.extract_value(window_helper.shape, 0) + pred1 = builder.icmp_signed('!=', s1, two) + pred2 = builder.icmp_signed('!=', s2, two) + + with cgutils.if_unlikely(builder, pred1): + context.call_conv.return_user_exc( + builder, ValueError, + ("Domain has wrong number of elements.",)) + + with cgutils.if_unlikely(builder, pred2): + context.call_conv.return_user_exc( + builder, ValueError, + ("Window has wrong number of elements.",)) + + polynomial.coef = coef_cast + polynomial.domain = domain_helper._getvalue() + polynomial.window = window_helper._getvalue() + + return polynomial._getvalue() + + +@unbox(types.PolynomialType) +def unbox_polynomial(typ, obj, c): + """ + Convert a Polynomial object to a native polynomial structure. + """ + is_error_ptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) + polynomial = cgutils.create_struct_proxy(typ)(c.context, c.builder) + with ExitStack() as stack: + natives = [] + for name in ("coef", "domain", "window"): + attr = c.pyapi.object_getattr_string(obj, name) + with cgutils.early_exit_if_null(c.builder, stack, attr): + c.builder.store(cgutils.true_bit, is_error_ptr) + t = getattr(typ, name) + native = c.unbox(t, attr) + c.pyapi.decref(attr) + with cgutils.early_exit_if(c.builder, stack, native.is_error): + c.builder.store(cgutils.true_bit, is_error_ptr) + natives.append(native) + + polynomial.coef = natives[0] + polynomial.domain = natives[1] + polynomial.window = natives[2] + + return NativeValue(polynomial._getvalue(), + is_error=c.builder.load(is_error_ptr)) + + +@box(types.PolynomialType) +def box_polynomial(typ, val, c): + """ + Convert a native polynomial structure to a Polynomial object. + """ + ret_ptr = cgutils.alloca_once(c.builder, c.pyapi.pyobj) + fail_obj = c.pyapi.get_null_object() + + with ExitStack() as stack: + polynomial = cgutils.create_struct_proxy(typ)(c.context, c.builder, + value=val) + coef_obj = c.box(typ.coef, polynomial.coef) + with cgutils.early_exit_if_null(c.builder, stack, coef_obj): + c.builder.store(fail_obj, ret_ptr) + + domain_obj = c.box(typ.domain, polynomial.domain) + with cgutils.early_exit_if_null(c.builder, stack, domain_obj): + c.builder.store(fail_obj, ret_ptr) + + window_obj = c.box(typ.window, polynomial.window) + with cgutils.early_exit_if_null(c.builder, stack, window_obj): + c.builder.store(fail_obj, ret_ptr) + + class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Polynomial)) + with cgutils.early_exit_if_null(c.builder, stack, class_obj): + c.pyapi.decref(coef_obj) + c.pyapi.decref(domain_obj) + c.pyapi.decref(window_obj) + c.builder.store(fail_obj, ret_ptr) + + if typ.n_args == 1: + res1 = c.pyapi.call_function_objargs(class_obj, (coef_obj,)) + c.builder.store(res1, ret_ptr) + else: + res3 = c.pyapi.call_function_objargs(class_obj, (coef_obj, + domain_obj, + window_obj)) + c.builder.store(res3, ret_ptr) + + c.pyapi.decref(coef_obj) + c.pyapi.decref(domain_obj) + c.pyapi.decref(window_obj) + c.pyapi.decref(class_obj) + + return c.builder.load(ret_ptr) diff --git a/venv/lib/python3.10/site-packages/numba/np/polynomial/polynomial_functions.py b/venv/lib/python3.10/site-packages/numba/np/polynomial/polynomial_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..4a13f8cf0d1bb54744e74dfac8485cea8e9cbeb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/polynomial/polynomial_functions.py @@ -0,0 +1,375 @@ +""" +Implementation of operations involving polynomials. +""" + + +import numpy as np +from numpy.polynomial import polynomial as poly +from numpy.polynomial import polyutils as pu + +from numba import literal_unroll +from numba.core import types, errors +from numba.core.extending import overload +from numba.np.numpy_support import type_can_asarray, as_dtype, from_dtype + + +@overload(np.roots) +def roots_impl(p): + + # cast int vectors to float cf. numpy, this is a bit dicey as + # the roots could be complex which will fail anyway + ty = getattr(p, 'dtype', p) + if isinstance(ty, types.Integer): + cast_t = np.float64 + else: + cast_t = as_dtype(ty) + + def roots_impl(p): + # impl based on numpy: + # https://github.com/numpy/numpy/blob/master/numpy/lib/polynomial.py + + if len(p.shape) != 1: + raise ValueError("Input must be a 1d array.") + + non_zero = np.nonzero(p)[0] + + if len(non_zero) == 0: + return np.zeros(0, dtype=cast_t) + + tz = len(p) - non_zero[-1] - 1 + + # pull out the coeffs selecting between possible zero pads + p = p[int(non_zero[0]):int(non_zero[-1]) + 1] + + n = len(p) + if n > 1: + # construct companion matrix, ensure fortran order + # to give to eigvals, write to upper diag and then + # transpose. + A = np.diag(np.ones((n - 2,), cast_t), 1).T + A[0, :] = -p[1:] / p[0] # normalize + roots = np.linalg.eigvals(A) + else: + roots = np.zeros(0, dtype=cast_t) + + # add in additional zeros on the end if needed + if tz > 0: + return np.hstack((roots, np.zeros(tz, dtype=cast_t))) + else: + return roots + + return roots_impl + + +@overload(pu.trimseq) +def polyutils_trimseq(seq): + if not type_can_asarray(seq): + msg = 'The argument "seq" must be array-like' + raise errors.TypingError(msg) + + if isinstance(seq, types.BaseTuple): + msg = 'Unsupported type %r for argument "seq"' + raise errors.TypingError(msg % (seq)) + + if np.ndim(seq) > 1: + msg = 'Coefficient array is not 1-d' + raise errors.NumbaValueError(msg) + + def impl(seq): + if len(seq) == 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i + 1] + + return impl + + +@overload(pu.as_series) +def polyutils_as_series(alist, trim=True): + if not type_can_asarray(alist): + msg = 'The argument "alist" must be array-like' + raise errors.TypingError(msg) + + if not isinstance(trim, (bool, types.Boolean)): + msg = 'The argument "trim" must be boolean' + raise errors.TypingError(msg) + + res_dtype = np.float64 + + tuple_input = isinstance(alist, types.BaseTuple) + list_input = isinstance(alist, types.List) + if tuple_input: + if np.any(np.array([np.ndim(a) > 1 for a in alist])): + raise errors.NumbaValueError("Coefficient array is not 1-d") + + res_dtype = _poly_result_dtype(*alist) + + elif list_input: + dt = as_dtype(_get_list_type(alist)) + res_dtype = np.result_type(dt, np.float64) + + else: + if np.ndim(alist) <= 2: + res_dtype = np.result_type(res_dtype, as_dtype(alist.dtype)) + else: + # If total dimension has ndim > 2, then coeff arrays are not 1D + raise errors.NumbaValueError("Coefficient array is not 1-d") + + def impl(alist, trim=True): + if tuple_input: + arrays = [] + for item in literal_unroll(alist): + arrays.append(np.atleast_1d(np.asarray(item)).astype(res_dtype)) + + elif list_input: + arrays = [np.atleast_1d(np.asarray(a)).astype(res_dtype) + for a in alist] + + else: + alist_arr = np.asarray(alist) + arrays = [np.atleast_1d(np.asarray(a)).astype(res_dtype) + for a in alist_arr] + + if min([a.size for a in arrays]) == 0: + raise ValueError("Coefficient array is empty") + + if trim: + arrays = [pu.trimseq(a) for a in arrays] + + ret = arrays + return ret + + return impl + + +def _get_list_type(l): + # A helper function that takes a list (possibly nested) and returns its + # dtype. Returns a Numba type. + dt = l.dtype + if (not isinstance(dt, types.Number)) and type_can_asarray(dt): + return _get_list_type(dt) + else: + return dt + + +def _poly_result_dtype(*args): + # A helper function that takes a tuple of inputs and returns their result + # dtype. Used for poly functions. Returns a NumPy dtype. + res_dtype = np.float64 + for item in args: + if isinstance(item, types.BaseTuple): + s1 = item.types + elif isinstance(item, types.List): + s1 = [_get_list_type(item)] + elif isinstance(item, types.Number): + s1 = [item] + elif isinstance(item, types.Array): + s1 = [item.dtype] + else: + msg = 'Input dtype must be scalar' + raise errors.TypingError(msg) + + try: + l = [as_dtype(t) for t in s1] + l.append(res_dtype) + res_dtype = (np.result_type(*l)) + except errors.NumbaNotImplementedError: + msg = 'Input dtype must be scalar.' + raise errors.TypingError(msg) + + return from_dtype(res_dtype) + + +@overload(poly.polyadd) +def numpy_polyadd(c1, c2): + if not type_can_asarray(c1): + msg = 'The argument "c1" must be array-like' + raise errors.TypingError(msg) + + if not type_can_asarray(c2): + msg = 'The argument "c2" must be array-like' + raise errors.TypingError(msg) + + def impl(c1, c2): + arr1, arr2 = pu.as_series((c1, c2)) + diff = len(arr2) - len(arr1) + if diff > 0: + zr = np.zeros(diff) + arr1 = np.concatenate((arr1, zr)) + if diff < 0: + zr = np.zeros(-diff) + arr2 = np.concatenate((arr2, zr)) + val = arr1 + arr2 + return pu.trimseq(val) + + return impl + + +@overload(poly.polysub) +def numpy_polysub(c1, c2): + if not type_can_asarray(c1): + msg = 'The argument "c1" must be array-like' + raise errors.TypingError(msg) + + if not type_can_asarray(c2): + msg = 'The argument "c2" must be array-like' + raise errors.TypingError(msg) + + def impl(c1, c2): + arr1, arr2 = pu.as_series((c1, c2)) + diff = len(arr2) - len(arr1) + if diff > 0: + zr = np.zeros(diff) + arr1 = np.concatenate((arr1, zr)) + if diff < 0: + zr = np.zeros(-diff) + arr2 = np.concatenate((arr2, zr)) + val = arr1 - arr2 + return pu.trimseq(val) + + return impl + + +@overload(poly.polymul) +def numpy_polymul(c1, c2): + if not type_can_asarray(c1): + msg = 'The argument "c1" must be array-like' + raise errors.TypingError(msg) + + if not type_can_asarray(c2): + msg = 'The argument "c2" must be array-like' + raise errors.TypingError(msg) + + def impl(c1, c2): + arr1, arr2 = pu.as_series((c1, c2)) + val = np.convolve(arr1, arr2) + return pu.trimseq(val) + + return impl + + +@overload(poly.polyval, prefer_literal=True) +def poly_polyval(x, c, tensor=True): + if not type_can_asarray(x): + msg = 'The argument "x" must be array-like' + raise errors.TypingError(msg) + + if not type_can_asarray(c): + msg = 'The argument "c" must be array-like' + raise errors.TypingError(msg) + + if not isinstance(tensor, (bool, types.BooleanLiteral)): + msg = 'The argument "tensor" must be boolean' + raise errors.RequireLiteralValue(msg) + + res_dtype = _poly_result_dtype(c, x) + + # Simulate new_shape = (1,) * np.ndim(x) in the general case + # If x is a number, new_shape is not used + # If x is a tuple or a list, then it's 1d hence new_shape=(1,) + x_nd_array = not isinstance(x, types.Number) + new_shape = (1,) + if isinstance(x, types.Array): + # If x is a np.array, then take its dimension + new_shape = (1,) * np.ndim(x) + + if isinstance(tensor, bool): + tensor_arg = tensor + else: + tensor_arg = tensor.literal_value + + def impl(x, c, tensor=True): + arr = np.asarray(c).astype(res_dtype) + inputs = np.asarray(x).astype(res_dtype) + if x_nd_array and tensor_arg: + arr = arr.reshape(arr.shape + new_shape) + + l = len(arr) + y = arr[l - 1] + inputs * 0 + + for i in range(l - 1, 0, -1): + y = arr[i - 1] + y * inputs + + return y + + return impl + + +@overload(poly.polyint) +def poly_polyint(c, m=1): + + if not type_can_asarray(c): + msg = 'The argument "c" must be array-like' + raise errors.TypingError(msg) + + if not isinstance(m, (int, types.Integer)): + msg = 'The argument "m" must be an integer' + raise errors.TypingError(msg) + + res_dtype = as_dtype(_poly_result_dtype(c)) + + if not np.issubdtype(res_dtype, np.number): + msg = f'Input dtype must be scalar. Found {res_dtype} instead' + raise errors.TypingError(msg) + + is1D = ((np.ndim(c) == 1) or + (isinstance(c, (types.List, types.BaseTuple)) + and isinstance(c.dtype, types.Number))) + + def impl(c, m=1): + c = np.asarray(c).astype(res_dtype) + cdt = c.dtype + for i in range(m): + n = len(c) + + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + c = tmp + if is1D: + return pu.trimseq(c) + else: + return c + + return impl + + +@overload(poly.polydiv) +def numpy_polydiv(c1, c2): + if not type_can_asarray(c1): + msg = 'The argument "c1" must be array-like' + raise errors.TypingError(msg) + + if not type_can_asarray(c2): + msg = 'The argument "c2" must be array-like' + raise errors.TypingError(msg) + + def impl(c1, c2): + arr1, arr2 = pu.as_series((c1, c2)) + if arr2[-1] == 0: + raise ZeroDivisionError() + + l1 = len(arr1) + l2 = len(arr2) + if l1 < l2: + return arr1[:1] * 0, arr1 + elif l2 == 1: + return arr1 / arr2[-1], arr1[:1] * 0 + else: + dlen = l1 - l2 + scl = arr2[-1] + arr2 = arr2[:-1] / scl + i = dlen + j = l1 - 1 + while i >= 0: + arr1[i:j] -= arr2 * arr1[j] + i -= 1 + j -= 1 + return arr1[j + 1:] / scl, pu.trimseq(arr1[:j + 1]) + + return impl diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__init__.py b/venv/lib/python3.10/site-packages/numba/np/random/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7a8418ac1ee38dd1dee9b915682a73958bd773c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c77b05c8f22ec26dbee081966c5c7508e2cd6aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/distributions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09083756b32be6a847c15f533af13150589297b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/distributions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/generator_core.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/generator_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2eee1545d74f57eca8a403a489f3482fc69eae5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/generator_core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/generator_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/generator_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4027a1418d122c65de7bab912fbe98471d962122 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/generator_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/new_distributions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/new_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0768e1f970a20a15c5d90e795560bbc0793465c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/new_distributions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/new_random_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/new_random_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70facbfe13662f661f751197684e3f9ccb15c3b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/new_random_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/old_distributions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/old_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2019b54248b66f3bda584627a7fcec3b88f74ed3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/old_distributions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/old_random_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/old_random_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40a5f9b5276c8c667b7226f772036f9cf703e7b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/old_random_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/random_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/random_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0f81290f6b4e78bf3940c90bb25149da169f610 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/random/__pycache__/random_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/random/_constants.py b/venv/lib/python3.10/site-packages/numba/np/random/_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..7676ab87f4d1020abb659b1c9d9168094b51bbd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/_constants.py @@ -0,0 +1,1229 @@ +import numpy as np +import ctypes + +# These constants are directly obtained from: +# https://github.com/numpy/numpy/blob/caccd283941b0bade7b71056138ded5379b1625f/numpy/random/src/distributions/ziggurat_constants.h + +ki_double = np.array([ + 0x000EF33D8025EF6A, 0x0000000000000000, 0x000C08BE98FBC6A8, + 0x000DA354FABD8142, 0x000E51F67EC1EEEA, 0x000EB255E9D3F77E, + 0x000EEF4B817ECAB9, 0x000F19470AFA44AA, 0x000F37ED61FFCB18, + 0x000F4F469561255C, 0x000F61A5E41BA396, 0x000F707A755396A4, + 0x000F7CB2EC28449A, 0x000F86F10C6357D3, 0x000F8FA6578325DE, + 0x000F9724C74DD0DA, 0x000F9DA907DBF509, 0x000FA360F581FA74, + 0x000FA86FDE5B4BF8, 0x000FACF160D354DC, 0x000FB0FB6718B90F, + 0x000FB49F8D5374C6, 0x000FB7EC2366FE77, 0x000FBAECE9A1E50E, + 0x000FBDAB9D040BED, 0x000FC03060FF6C57, 0x000FC2821037A248, + 0x000FC4A67AE25BD1, 0x000FC6A2977AEE31, 0x000FC87AA92896A4, + 0x000FCA325E4BDE85, 0x000FCBCCE902231A, 0x000FCD4D12F839C4, + 0x000FCEB54D8FEC99, 0x000FD007BF1DC930, 0x000FD1464DD6C4E6, + 0x000FD272A8E2F450, 0x000FD38E4FF0C91E, 0x000FD49A9990B478, + 0x000FD598B8920F53, 0x000FD689C08E99EC, 0x000FD76EA9C8E832, + 0x000FD848547B08E8, 0x000FD9178BAD2C8C, 0x000FD9DD07A7ADD2, + 0x000FDA9970105E8C, 0x000FDB4D5DC02E20, 0x000FDBF95C5BFCD0, + 0x000FDC9DEBB99A7D, 0x000FDD3B8118729D, 0x000FDDD288342F90, + 0x000FDE6364369F64, 0x000FDEEE708D514E, 0x000FDF7401A6B42E, + 0x000FDFF46599ED40, 0x000FE06FE4BC24F2, 0x000FE0E6C225A258, + 0x000FE1593C28B84C, 0x000FE1C78CBC3F99, 0x000FE231E9DB1CAA, + 0x000FE29885DA1B91, 0x000FE2FB8FB54186, 0x000FE35B33558D4A, + 0x000FE3B799D0002A, 0x000FE410E99EAD7F, 0x000FE46746D47734, + 0x000FE4BAD34C095C, 0x000FE50BAED29524, 0x000FE559F74EBC78, + 0x000FE5A5C8E41212, 0x000FE5EF3E138689, 0x000FE6366FD91078, + 0x000FE67B75C6D578, 0x000FE6BE661E11AA, 0x000FE6FF55E5F4F2, + 0x000FE73E5900A702, 0x000FE77B823E9E39, 0x000FE7B6E37070A2, + 0x000FE7F08D774243, 0x000FE8289053F08C, 0x000FE85EFB35173A, + 0x000FE893DC840864, 0x000FE8C741F0CEBC, 0x000FE8F9387D4EF6, + 0x000FE929CC879B1D, 0x000FE95909D388EA, 0x000FE986FB939AA2, + 0x000FE9B3AC714866, 0x000FE9DF2694B6D5, 0x000FEA0973ABE67C, + 0x000FEA329CF166A4, 0x000FEA5AAB32952C, 0x000FEA81A6D5741A, + 0x000FEAA797DE1CF0, 0x000FEACC85F3D920, 0x000FEAF07865E63C, + 0x000FEB13762FEC13, 0x000FEB3585FE2A4A, 0x000FEB56AE3162B4, + 0x000FEB76F4E284FA, 0x000FEB965FE62014, 0x000FEBB4F4CF9D7C, + 0x000FEBD2B8F449D0, 0x000FEBEFB16E2E3E, 0x000FEC0BE31EBDE8, + 0x000FEC2752B15A15, 0x000FEC42049DAFD3, 0x000FEC5BFD29F196, + 0x000FEC75406CEEF4, 0x000FEC8DD2500CB4, 0x000FECA5B6911F12, + 0x000FECBCF0C427FE, 0x000FECD38454FB15, 0x000FECE97488C8B3, + 0x000FECFEC47F91B7, 0x000FED1377358528, 0x000FED278F844903, + 0x000FED3B10242F4C, 0x000FED4DFBAD586E, 0x000FED605498C3DD, + 0x000FED721D414FE8, 0x000FED8357E4A982, 0x000FED9406A42CC8, + 0x000FEDA42B85B704, 0x000FEDB3C8746AB4, 0x000FEDC2DF416652, + 0x000FEDD171A46E52, 0x000FEDDF813C8AD3, 0x000FEDED0F909980, + 0x000FEDFA1E0FD414, 0x000FEE06AE124BC4, 0x000FEE12C0D95A06, + 0x000FEE1E579006E0, 0x000FEE29734B6524, 0x000FEE34150AE4BC, + 0x000FEE3E3DB89B3C, 0x000FEE47EE2982F4, 0x000FEE51271DB086, + 0x000FEE59E9407F41, 0x000FEE623528B42E, 0x000FEE6A0B5897F1, + 0x000FEE716C3E077A, 0x000FEE7858327B82, 0x000FEE7ECF7B06BA, + 0x000FEE84D2484AB2, 0x000FEE8A60B66343, 0x000FEE8F7ACCC851, + 0x000FEE94207E25DA, 0x000FEE9851A829EA, 0x000FEE9C0E13485C, + 0x000FEE9F557273F4, 0x000FEEA22762CCAE, 0x000FEEA4836B42AC, + 0x000FEEA668FC2D71, 0x000FEEA7D76ED6FA, 0x000FEEA8CE04FA0A, + 0x000FEEA94BE8333B, 0x000FEEA950296410, 0x000FEEA8D9C0075E, + 0x000FEEA7E7897654, 0x000FEEA678481D24, 0x000FEEA48AA29E83, + 0x000FEEA21D22E4DA, 0x000FEE9F2E352024, 0x000FEE9BBC26AF2E, + 0x000FEE97C524F2E4, 0x000FEE93473C0A3A, 0x000FEE8E40557516, + 0x000FEE88AE369C7A, 0x000FEE828E7F3DFD, 0x000FEE7BDEA7B888, + 0x000FEE749BFF37FF, 0x000FEE6CC3A9BD5E, 0x000FEE64529E007E, + 0x000FEE5B45A32888, 0x000FEE51994E57B6, 0x000FEE474A0006CF, + 0x000FEE3C53E12C50, 0x000FEE30B2E02AD8, 0x000FEE2462AD8205, + 0x000FEE175EB83C5A, 0x000FEE09A22A1447, 0x000FEDFB27E349CC, + 0x000FEDEBEA76216C, 0x000FEDDBE422047E, 0x000FEDCB0ECE39D3, + 0x000FEDB964042CF4, 0x000FEDA6DCE938C9, 0x000FED937237E98D, + 0x000FED7F1C38A836, 0x000FED69D2B9C02B, 0x000FED538D06AE00, + 0x000FED3C41DEA422, 0x000FED23E76A2FD8, 0x000FED0A732FE644, + 0x000FECEFDA07FE34, 0x000FECD4100EB7B8, 0x000FECB708956EB4, + 0x000FEC98B61230C1, 0x000FEC790A0DA978, 0x000FEC57F50F31FE, + 0x000FEC356686C962, 0x000FEC114CB4B335, 0x000FEBEB948E6FD0, + 0x000FEBC429A0B692, 0x000FEB9AF5EE0CDC, 0x000FEB6FE1C98542, + 0x000FEB42D3AD1F9E, 0x000FEB13B00B2D4B, 0x000FEAE2591A02E9, + 0x000FEAAEAE992257, 0x000FEA788D8EE326, 0x000FEA3FCFFD73E5, + 0x000FEA044C8DD9F6, 0x000FE9C5D62F563B, 0x000FE9843BA947A4, + 0x000FE93F471D4728, 0x000FE8F6BD76C5D6, 0x000FE8AA5DC4E8E6, + 0x000FE859E07AB1EA, 0x000FE804F690A940, 0x000FE7AB488233C0, + 0x000FE74C751F6AA5, 0x000FE6E8102AA202, 0x000FE67DA0B6ABD8, + 0x000FE60C9F38307E, 0x000FE5947338F742, 0x000FE51470977280, + 0x000FE48BD436F458, 0x000FE3F9BFFD1E37, 0x000FE35D35EEB19C, + 0x000FE2B5122FE4FE, 0x000FE20003995557, 0x000FE13C82788314, + 0x000FE068C4EE67B0, 0x000FDF82B02B71AA, 0x000FDE87C57EFEAA, + 0x000FDD7509C63BFD, 0x000FDC46E529BF13, 0x000FDAF8F82E0282, + 0x000FD985E1B2BA75, 0x000FD7E6EF48CF04, 0x000FD613ADBD650B, + 0x000FD40149E2F012, 0x000FD1A1A7B4C7AC, 0x000FCEE204761F9E, + 0x000FCBA8D85E11B2, 0x000FC7D26ECD2D22, 0x000FC32B2F1E22ED, + 0x000FBD6581C0B83A, 0x000FB606C4005434, 0x000FAC40582A2874, + 0x000F9E971E014598, 0x000F89FA48A41DFC, 0x000F66C5F7F0302C, + 0x000F1A5A4B331C4A], dtype=np.uint64) + +wi_double = np.array([ + 8.68362706080130616677e-16, 4.77933017572773682428e-17, + 6.35435241740526230246e-17, 7.45487048124769627714e-17, + 8.32936681579309972857e-17, 9.06806040505948228243e-17, + 9.71486007656776183958e-17, 1.02947503142410192108e-16, + 1.08234302884476839838e-16, 1.13114701961090307945e-16, + 1.17663594570229211411e-16, 1.21936172787143633280e-16, + 1.25974399146370927864e-16, 1.29810998862640315416e-16, + 1.33472037368241227547e-16, 1.36978648425712032797e-16, + 1.40348230012423820659e-16, 1.43595294520569430270e-16, + 1.46732087423644219083e-16, 1.49769046683910367425e-16, + 1.52715150035961979750e-16, 1.55578181694607639484e-16, + 1.58364940092908853989e-16, 1.61081401752749279325e-16, + 1.63732852039698532012e-16, 1.66323990584208352778e-16, + 1.68859017086765964015e-16, 1.71341701765596607184e-16, + 1.73775443658648593310e-16, 1.76163319230009959832e-16, + 1.78508123169767272927e-16, 1.80812402857991522674e-16, + 1.83078487648267501776e-16, 1.85308513886180189386e-16, + 1.87504446393738816849e-16, 1.89668097007747596212e-16, + 1.91801140648386198029e-16, 1.93905129306251037069e-16, + 1.95981504266288244037e-16, 1.98031606831281739736e-16, + 2.00056687762733300198e-16, 2.02057915620716538808e-16, + 2.04036384154802118313e-16, 2.05993118874037063144e-16, + 2.07929082904140197311e-16, 2.09845182223703516690e-16, + 2.11742270357603418769e-16, 2.13621152594498681022e-16, + 2.15482589785814580926e-16, 2.17327301775643674990e-16, + 2.19155970504272708519e-16, 2.20969242822353175995e-16, + 2.22767733047895534948e-16, 2.24552025294143552381e-16, + 2.26322675592856786566e-16, 2.28080213834501706782e-16, + 2.29825145544246839061e-16, 2.31557953510408037008e-16, + 2.33279099280043561128e-16, 2.34989024534709550938e-16, + 2.36688152357916037468e-16, 2.38376888404542434981e-16, + 2.40055621981350627349e-16, 2.41724727046750252175e-16, + 2.43384563137110286400e-16, 2.45035476226149539878e-16, + 2.46677799523270498158e-16, 2.48311854216108767769e-16, + 2.49937950162045242375e-16, 2.51556386532965786439e-16, + 2.53167452417135826983e-16, 2.54771427381694417303e-16, + 2.56368581998939683749e-16, 2.57959178339286723500e-16, + 2.59543470433517070146e-16, 2.61121704706701939097e-16, + 2.62694120385972564623e-16, 2.64260949884118951286e-16, + 2.65822419160830680292e-16, 2.67378748063236329361e-16, + 2.68930150647261591777e-16, 2.70476835481199518794e-16, + 2.72019005932773206655e-16, 2.73556860440867908686e-16, + 2.75090592773016664571e-16, 2.76620392269639032183e-16, + 2.78146444075954410103e-16, 2.79668929362423005309e-16, + 2.81188025534502074329e-16, 2.82703906432447923059e-16, + 2.84216742521840606520e-16, 2.85726701075460149289e-16, + 2.87233946347097994381e-16, 2.88738639737848191815e-16, + 2.90240939955384233230e-16, 2.91741003166694553259e-16, + 2.93238983144718163965e-16, 2.94735031409293489611e-16, + 2.96229297362806647792e-16, 2.97721928420902891115e-16, + 2.99213070138601307081e-16, 3.00702866332133102993e-16, + 3.02191459196806151971e-16, 3.03678989421180184427e-16, + 3.05165596297821922381e-16, 3.06651417830895451744e-16, + 3.08136590840829717032e-16, 3.09621251066292253306e-16, + 3.11105533263689296831e-16, 3.12589571304399892784e-16, + 3.14073498269944617203e-16, 3.15557446545280064031e-16, + 3.17041547910402852545e-16, 3.18525933630440648871e-16, + 3.20010734544401137886e-16, 3.21496081152744704901e-16, + 3.22982103703941557538e-16, 3.24468932280169778077e-16, + 3.25956696882307838340e-16, 3.27445527514370671802e-16, + 3.28935554267536967851e-16, 3.30426907403912838589e-16, + 3.31919717440175233652e-16, 3.33414115231237245918e-16, + 3.34910232054077845412e-16, 3.36408199691876507948e-16, + 3.37908150518594979994e-16, 3.39410217584148914282e-16, + 3.40914534700312603713e-16, 3.42421236527501816058e-16, + 3.43930458662583133920e-16, 3.45442337727858401604e-16, + 3.46957011461378353333e-16, 3.48474618808741370700e-16, + 3.49995300016538099813e-16, 3.51519196727607440975e-16, + 3.53046452078274009054e-16, 3.54577210797743572160e-16, + 3.56111619309838843415e-16, 3.57649825837265051035e-16, + 3.59191980508602994994e-16, 3.60738235468235137839e-16, + 3.62288744989419151904e-16, 3.63843665590734438546e-16, + 3.65403156156136995766e-16, 3.66967378058870090021e-16, + 3.68536495289491401456e-16, 3.70110674588289834952e-16, + 3.71690085582382297792e-16, 3.73274900927794352614e-16, + 3.74865296456848868882e-16, 3.76461451331202869131e-16, + 3.78063548200896037651e-16, 3.79671773369794425924e-16, + 3.81286316967837738238e-16, 3.82907373130524317507e-16, + 3.84535140186095955858e-16, 3.86169820850914927119e-16, + 3.87811622433558721164e-16, 3.89460757048192620674e-16, + 3.91117441837820542060e-16, 3.92781899208054153270e-16, + 3.94454357072087711446e-16, 3.96135049107613542983e-16, + 3.97824215026468259474e-16, 3.99522100857856502444e-16, + 4.01228959246062907451e-16, 4.02945049763632792393e-16, + 4.04670639241074995115e-16, 4.06406002114225038723e-16, + 4.08151420790493873480e-16, 4.09907186035326643447e-16, + 4.11673597380302570170e-16, 4.13450963554423599878e-16, + 4.15239602940268833891e-16, 4.17039844056831587498e-16, + 4.18852026071011229572e-16, 4.20676499339901510978e-16, + 4.22513625986204937320e-16, 4.24363780509307796137e-16, + 4.26227350434779809917e-16, 4.28104737005311666397e-16, + 4.29996355916383230161e-16, 4.31902638100262944617e-16, + 4.33824030562279080411e-16, 4.35760997273684900553e-16, + 4.37714020125858747008e-16, 4.39683599951052137423e-16, + 4.41670257615420348435e-16, 4.43674535190656726604e-16, + 4.45696997211204306674e-16, 4.47738232024753387312e-16, + 4.49798853244554968009e-16, 4.51879501313005876278e-16, + 4.53980845187003400947e-16, 4.56103584156742206384e-16, + 4.58248449810956667052e-16, 4.60416208163115281428e-16, + 4.62607661954784567754e-16, 4.64823653154320737780e-16, + 4.67065065671263059081e-16, 4.69332828309332890697e-16, + 4.71627917983835129766e-16, 4.73951363232586715165e-16, + 4.76304248053313737663e-16, 4.78687716104872284247e-16, + 4.81102975314741720538e-16, 4.83551302941152515162e-16, + 4.86034051145081195402e-16, 4.88552653135360343280e-16, + 4.91108629959526955862e-16, 4.93703598024033454728e-16, + 4.96339277440398725619e-16, 4.99017501309182245754e-16, + 5.01740226071808946011e-16, 5.04509543081872748637e-16, + 5.07327691573354207058e-16, 5.10197073234156184149e-16, + 5.13120268630678373200e-16, 5.16100055774322824569e-16, + 5.19139431175769859873e-16, 5.22241633800023428760e-16, + 5.25410172417759732697e-16, 5.28648856950494511482e-16, + 5.31961834533840037535e-16, 5.35353631181649688145e-16, + 5.38829200133405320160e-16, 5.42393978220171234073e-16, + 5.46053951907478041166e-16, 5.49815735089281410703e-16, + 5.53686661246787600374e-16, 5.57674893292657647836e-16, + 5.61789555355541665830e-16, 5.66040892008242216739e-16, + 5.70440462129138908417e-16, 5.75001376891989523684e-16, + 5.79738594572459365014e-16, 5.84669289345547900201e-16, + 5.89813317647789942685e-16, 5.95193814964144415532e-16, + 6.00837969627190832234e-16, 6.06778040933344851394e-16, + 6.13052720872528159123e-16, 6.19708989458162555387e-16, + 6.26804696330128439415e-16, 6.34412240712750598627e-16, + 6.42623965954805540945e-16, 6.51560331734499356881e-16, + 6.61382788509766415145e-16, 6.72315046250558662913e-16, + 6.84680341756425875856e-16, 6.98971833638761995415e-16, + 7.15999493483066421560e-16, 7.37242430179879890722e-16, + 7.65893637080557275482e-16, 8.11384933765648418565e-16], + dtype=np.float64) + +fi_double = np.array([ + 1.00000000000000000000e+00, 9.77101701267671596263e-01, + 9.59879091800106665211e-01, 9.45198953442299649730e-01, + 9.32060075959230460718e-01, 9.19991505039347012840e-01, + 9.08726440052130879366e-01, 8.98095921898343418910e-01, + 8.87984660755833377088e-01, 8.78309655808917399966e-01, + 8.69008688036857046555e-01, 8.60033621196331532488e-01, + 8.51346258458677951353e-01, 8.42915653112204177333e-01, + 8.34716292986883434679e-01, 8.26726833946221373317e-01, + 8.18929191603702366642e-01, 8.11307874312656274185e-01, + 8.03849483170964274059e-01, 7.96542330422958966274e-01, + 7.89376143566024590648e-01, 7.82341832654802504798e-01, + 7.75431304981187174974e-01, 7.68637315798486264740e-01, + 7.61953346836795386565e-01, 7.55373506507096115214e-01, + 7.48892447219156820459e-01, 7.42505296340151055290e-01, + 7.36207598126862650112e-01, 7.29995264561476231435e-01, + 7.23864533468630222401e-01, 7.17811932630721960535e-01, + 7.11834248878248421200e-01, 7.05928501332754310127e-01, + 7.00091918136511615067e-01, 6.94321916126116711609e-01, + 6.88616083004671808432e-01, 6.82972161644994857355e-01, + 6.77388036218773526009e-01, 6.71861719897082099173e-01, + 6.66391343908750100056e-01, 6.60975147776663107813e-01, + 6.55611470579697264149e-01, 6.50298743110816701574e-01, + 6.45035480820822293424e-01, 6.39820277453056585060e-01, + 6.34651799287623608059e-01, 6.29528779924836690007e-01, + 6.24450015547026504592e-01, 6.19414360605834324325e-01, + 6.14420723888913888899e-01, 6.09468064925773433949e-01, + 6.04555390697467776029e-01, 5.99681752619125263415e-01, + 5.94846243767987448159e-01, 5.90047996332826008015e-01, + 5.85286179263371453274e-01, 5.80559996100790898232e-01, + 5.75868682972353718164e-01, 5.71211506735253227163e-01, + 5.66587763256164445025e-01, 5.61996775814524340831e-01, + 5.57437893618765945014e-01, 5.52910490425832290562e-01, + 5.48413963255265812791e-01, 5.43947731190026262382e-01, + 5.39511234256952132426e-01, 5.35103932380457614215e-01, + 5.30725304403662057062e-01, 5.26374847171684479008e-01, + 5.22052074672321841931e-01, 5.17756517229756352272e-01, + 5.13487720747326958914e-01, 5.09245245995747941592e-01, + 5.05028667943468123624e-01, 5.00837575126148681903e-01, + 4.96671569052489714213e-01, 4.92530263643868537748e-01, + 4.88413284705458028423e-01, 4.84320269426683325253e-01, + 4.80250865909046753544e-01, 4.76204732719505863248e-01, + 4.72181538467730199660e-01, 4.68180961405693596422e-01, + 4.64202689048174355069e-01, 4.60246417812842867345e-01, + 4.56311852678716434184e-01, 4.52398706861848520777e-01, + 4.48506701507203064949e-01, 4.44635565395739396077e-01, + 4.40785034665803987508e-01, 4.36954852547985550526e-01, + 4.33144769112652261445e-01, 4.29354541029441427735e-01, + 4.25583931338021970170e-01, 4.21832709229495894654e-01, + 4.18100649837848226120e-01, 4.14387534040891125642e-01, + 4.10693148270188157500e-01, 4.07017284329473372217e-01, + 4.03359739221114510510e-01, 3.99720314980197222177e-01, + 3.96098818515832451492e-01, 3.92495061459315619512e-01, + 3.88908860018788715696e-01, 3.85340034840077283462e-01, + 3.81788410873393657674e-01, 3.78253817245619183840e-01, + 3.74736087137891138443e-01, 3.71235057668239498696e-01, + 3.67750569779032587814e-01, 3.64282468129004055601e-01, + 3.60830600989648031529e-01, 3.57394820145780500731e-01, + 3.53974980800076777232e-01, 3.50570941481406106455e-01, + 3.47182563956793643900e-01, 3.43809713146850715049e-01, + 3.40452257044521866547e-01, 3.37110066637006045021e-01, + 3.33783015830718454708e-01, 3.30470981379163586400e-01, + 3.27173842813601400970e-01, 3.23891482376391093290e-01, + 3.20623784956905355514e-01, 3.17370638029913609834e-01, + 3.14131931596337177215e-01, 3.10907558126286509559e-01, + 3.07697412504292056035e-01, 3.04501391976649993243e-01, + 3.01319396100803049698e-01, 2.98151326696685481377e-01, + 2.94997087799961810184e-01, 2.91856585617095209972e-01, + 2.88729728482182923521e-01, 2.85616426815501756042e-01, + 2.82516593083707578948e-01, 2.79430141761637940157e-01, + 2.76356989295668320494e-01, 2.73297054068577072172e-01, + 2.70250256365875463072e-01, 2.67216518343561471038e-01, + 2.64195763997261190426e-01, 2.61187919132721213522e-01, + 2.58192911337619235290e-01, 2.55210669954661961700e-01, + 2.52241126055942177508e-01, 2.49284212418528522415e-01, + 2.46339863501263828249e-01, 2.43408015422750312329e-01, + 2.40488605940500588254e-01, 2.37581574431238090606e-01, + 2.34686861872330010392e-01, 2.31804410824338724684e-01, + 2.28934165414680340644e-01, 2.26076071322380278694e-01, + 2.23230075763917484855e-01, 2.20396127480151998723e-01, + 2.17574176724331130872e-01, 2.14764175251173583536e-01, + 2.11966076307030182324e-01, 2.09179834621125076977e-01, + 2.06405406397880797353e-01, 2.03642749310334908452e-01, + 2.00891822494656591136e-01, 1.98152586545775138971e-01, + 1.95425003514134304483e-01, 1.92709036903589175926e-01, + 1.90004651670464985713e-01, 1.87311814223800304768e-01, + 1.84630492426799269756e-01, 1.81960655599522513892e-01, + 1.79302274522847582272e-01, 1.76655321443734858455e-01, + 1.74019770081838553999e-01, 1.71395595637505754327e-01, + 1.68782774801211288285e-01, 1.66181285764481906364e-01, + 1.63591108232365584074e-01, 1.61012223437511009516e-01, + 1.58444614155924284882e-01, 1.55888264724479197465e-01, + 1.53343161060262855866e-01, 1.50809290681845675763e-01, + 1.48286642732574552861e-01, 1.45775208005994028060e-01, + 1.43274978973513461566e-01, 1.40785949814444699690e-01, + 1.38308116448550733057e-01, 1.35841476571253755301e-01, + 1.33386029691669155683e-01, 1.30941777173644358090e-01, + 1.28508722279999570981e-01, 1.26086870220185887081e-01, + 1.23676228201596571932e-01, 1.21276805484790306533e-01, + 1.18888613442910059947e-01, 1.16511665625610869035e-01, + 1.14145977827838487895e-01, 1.11791568163838089811e-01, + 1.09448457146811797824e-01, 1.07116667774683801961e-01, + 1.04796225622487068629e-01, 1.02487158941935246892e-01, + 1.00189498768810017482e-01, 9.79032790388624646338e-02, + 9.56285367130089991594e-02, 9.33653119126910124859e-02, + 9.11136480663737591268e-02, 8.88735920682758862021e-02, + 8.66451944505580717859e-02, 8.44285095703534715916e-02, + 8.22235958132029043366e-02, 8.00305158146630696292e-02, + 7.78493367020961224423e-02, 7.56801303589271778804e-02, + 7.35229737139813238622e-02, 7.13779490588904025339e-02, + 6.92451443970067553879e-02, 6.71246538277884968737e-02, + 6.50165779712428976156e-02, 6.29210244377581412456e-02, + 6.08381083495398780614e-02, 5.87679529209337372930e-02, + 5.67106901062029017391e-02, 5.46664613248889208474e-02, + 5.26354182767921896513e-02, 5.06177238609477817000e-02, + 4.86135532158685421122e-02, 4.66230949019303814174e-02, + 4.46465522512944634759e-02, 4.26841449164744590750e-02, + 4.07361106559409394401e-02, 3.88027074045261474722e-02, + 3.68842156885673053135e-02, 3.49809414617161251737e-02, + 3.30932194585785779961e-02, 3.12214171919203004046e-02, + 2.93659397581333588001e-02, 2.75272356696031131329e-02, + 2.57058040085489103443e-02, 2.39022033057958785407e-02, + 2.21170627073088502113e-02, 2.03510962300445102935e-02, + 1.86051212757246224594e-02, 1.68800831525431419000e-02, + 1.51770883079353092332e-02, 1.34974506017398673818e-02, + 1.18427578579078790488e-02, 1.02149714397014590439e-02, + 8.61658276939872638800e-03, 7.05087547137322242369e-03, + 5.52240329925099155545e-03, 4.03797259336302356153e-03, + 2.60907274610215926189e-03, 1.26028593049859797236e-03], + dtype=np.float64) + +ki_float = np.array([ + 0x007799EC, 0x00000000, 0x006045F5, 0x006D1AA8, 0x00728FB4, + 0x007592AF, 0x00777A5C, 0x0078CA38, 0x0079BF6B, 0x007A7A35, + 0x007B0D2F, 0x007B83D4, 0x007BE597, 0x007C3788, 0x007C7D33, + 0x007CB926, 0x007CED48, 0x007D1B08, 0x007D437F, 0x007D678B, + 0x007D87DB, 0x007DA4FC, 0x007DBF61, 0x007DD767, 0x007DED5D, + 0x007E0183, 0x007E1411, 0x007E2534, 0x007E3515, 0x007E43D5, + 0x007E5193, 0x007E5E67, 0x007E6A69, 0x007E75AA, 0x007E803E, + 0x007E8A32, 0x007E9395, 0x007E9C72, 0x007EA4D5, 0x007EACC6, + 0x007EB44E, 0x007EBB75, 0x007EC243, 0x007EC8BC, 0x007ECEE8, + 0x007ED4CC, 0x007EDA6B, 0x007EDFCB, 0x007EE4EF, 0x007EE9DC, + 0x007EEE94, 0x007EF31B, 0x007EF774, 0x007EFBA0, 0x007EFFA3, + 0x007F037F, 0x007F0736, 0x007F0ACA, 0x007F0E3C, 0x007F118F, + 0x007F14C4, 0x007F17DC, 0x007F1ADA, 0x007F1DBD, 0x007F2087, + 0x007F233A, 0x007F25D7, 0x007F285D, 0x007F2AD0, 0x007F2D2E, + 0x007F2F7A, 0x007F31B3, 0x007F33DC, 0x007F35F3, 0x007F37FB, + 0x007F39F3, 0x007F3BDC, 0x007F3DB7, 0x007F3F84, 0x007F4145, + 0x007F42F8, 0x007F449F, 0x007F463A, 0x007F47CA, 0x007F494E, + 0x007F4AC8, 0x007F4C38, 0x007F4D9D, 0x007F4EF9, 0x007F504C, + 0x007F5195, 0x007F52D5, 0x007F540D, 0x007F553D, 0x007F5664, + 0x007F5784, 0x007F589C, 0x007F59AC, 0x007F5AB5, 0x007F5BB8, + 0x007F5CB3, 0x007F5DA8, 0x007F5E96, 0x007F5F7E, 0x007F605F, + 0x007F613B, 0x007F6210, 0x007F62E0, 0x007F63AA, 0x007F646F, + 0x007F652E, 0x007F65E8, 0x007F669C, 0x007F674C, 0x007F67F6, + 0x007F689C, 0x007F693C, 0x007F69D9, 0x007F6A70, 0x007F6B03, + 0x007F6B91, 0x007F6C1B, 0x007F6CA0, 0x007F6D21, 0x007F6D9E, + 0x007F6E17, 0x007F6E8C, 0x007F6EFC, 0x007F6F68, 0x007F6FD1, + 0x007F7035, 0x007F7096, 0x007F70F3, 0x007F714C, 0x007F71A1, + 0x007F71F2, 0x007F723F, 0x007F7289, 0x007F72CF, 0x007F7312, + 0x007F7350, 0x007F738B, 0x007F73C3, 0x007F73F6, 0x007F7427, + 0x007F7453, 0x007F747C, 0x007F74A1, 0x007F74C3, 0x007F74E0, + 0x007F74FB, 0x007F7511, 0x007F7524, 0x007F7533, 0x007F753F, + 0x007F7546, 0x007F754A, 0x007F754B, 0x007F7547, 0x007F753F, + 0x007F7534, 0x007F7524, 0x007F7511, 0x007F74F9, 0x007F74DE, + 0x007F74BE, 0x007F749A, 0x007F7472, 0x007F7445, 0x007F7414, + 0x007F73DF, 0x007F73A5, 0x007F7366, 0x007F7323, 0x007F72DA, + 0x007F728D, 0x007F723A, 0x007F71E3, 0x007F7186, 0x007F7123, + 0x007F70BB, 0x007F704D, 0x007F6FD9, 0x007F6F5F, 0x007F6EDF, + 0x007F6E58, 0x007F6DCB, 0x007F6D37, 0x007F6C9C, 0x007F6BF9, + 0x007F6B4F, 0x007F6A9C, 0x007F69E2, 0x007F691F, 0x007F6854, + 0x007F677F, 0x007F66A1, 0x007F65B8, 0x007F64C6, 0x007F63C8, + 0x007F62C0, 0x007F61AB, 0x007F608A, 0x007F5F5D, 0x007F5E21, + 0x007F5CD8, 0x007F5B7F, 0x007F5A17, 0x007F589E, 0x007F5713, + 0x007F5575, 0x007F53C4, 0x007F51FE, 0x007F5022, 0x007F4E2F, + 0x007F4C22, 0x007F49FA, 0x007F47B6, 0x007F4553, 0x007F42CF, + 0x007F4028, 0x007F3D5A, 0x007F3A64, 0x007F3741, 0x007F33ED, + 0x007F3065, 0x007F2CA4, 0x007F28A4, 0x007F245F, 0x007F1FCE, + 0x007F1AEA, 0x007F15A9, 0x007F1000, 0x007F09E4, 0x007F0346, + 0x007EFC16, 0x007EF43E, 0x007EEBA8, 0x007EE237, 0x007ED7C8, + 0x007ECC2F, 0x007EBF37, 0x007EB09D, 0x007EA00A, 0x007E8D0D, + 0x007E7710, 0x007E5D47, 0x007E3E93, 0x007E1959, 0x007DEB2C, + 0x007DB036, 0x007D6203, 0x007CF4B9, 0x007C4FD2, 0x007B3630, + 0x0078D2D2], dtype=np.uint32) + +wi_float = np.array([ + 4.66198677960027669255e-07, 2.56588335019207033255e-08, + 3.41146697750176784592e-08, 4.00230311410932959821e-08, + 4.47179475877737745459e-08, 4.86837785973537366722e-08, + 5.21562578925932412861e-08, 5.52695199001886257153e-08, + 5.81078488992733116465e-08, 6.07279932024587421409e-08, + 6.31701613261172047795e-08, 6.54639842900233842742e-08, + 6.76319905583641815324e-08, 6.96917493470166688656e-08, + 7.16572544283857476692e-08, 7.35398519048393832969e-08, + 7.53488822443557479279e-08, 7.70921367281667127885e-08, + 7.87761895947956022626e-08, 8.04066446825615346857e-08, + 8.19883218760237408659e-08, 8.35254002936857088917e-08, + 8.50215298165053411740e-08, 8.64799190652369040985e-08, + 8.79034055989140110861e-08, 8.92945125124233511541e-08, + 9.06554945027956262312e-08, 9.19883756905278607229e-08, + 9.32949809202232869780e-08, 9.45769618559625849039e-08, + 9.58358188855612866442e-08, 9.70729196232813152662e-08, + 9.82895146313061088986e-08, 9.94867508514382224721e-08, + 1.00665683139461669691e-07, 1.01827284217853923044e-07, + 1.02972453302539369464e-07, 1.04102023612124921572e-07, + 1.05216768930574060431e-07, 1.06317409364335657741e-07, + 1.07404616410877866490e-07, 1.08479017436113134283e-07, + 1.09541199642370962438e-07, 1.10591713595628691212e-07, + 1.11631076370069356306e-07, 1.12659774359245895023e-07, + 1.13678265795837113569e-07, 1.14686983015899673063e-07, + 1.15686334498432158725e-07, 1.16676706706789039179e-07, + 1.17658465754873988919e-07, 1.18631958917986203582e-07, + 1.19597516005596215528e-07, 1.20555450611113917226e-07, + 1.21506061251817163689e-07, 1.22449632410483948386e-07, + 1.23386435488872536840e-07, 1.24316729681986364321e-07, + 1.25240762781015530062e-07, 1.26158771911939892267e-07, + 1.27070984215989333455e-07, 1.27977617477468922011e-07, + 1.28878880703854958297e-07, 1.29774974662539874521e-07, + 1.30666092378141980504e-07, 1.31552419593887221722e-07, + 1.32434135200211397569e-07, 1.33311411633413359243e-07, + 1.34184415246907777059e-07, 1.35053306657377859830e-07, + 1.35918241067904315860e-07, 1.36779368569952053923e-07, + 1.37636834425917531047e-07, 1.38490779333783508675e-07, + 1.39341339675287344817e-07, 1.40188647748881762555e-07, + 1.41032831988654882776e-07, 1.41874017170273235693e-07, + 1.42712324604921442006e-07, 1.43547872322127921816e-07, + 1.44380775242292721080e-07, 1.45211145339665544509e-07, + 1.46039091796461362146e-07, 1.46864721148745476208e-07, + 1.47688137424670065700e-07, 1.48509442275598857119e-07, + 1.49328735100614641423e-07, 1.50146113164867617390e-07, + 1.50961671712187416111e-07, 1.51775504072350982845e-07, + 1.52587701763369746341e-07, 1.53398354589133671168e-07, + 1.54207550732725568797e-07, 1.55015376845697999657e-07, + 1.55821918133584372604e-07, 1.56627258437898192833e-07, + 1.57431480314857468671e-07, 1.58234665111056041043e-07, + 1.59036893036289199880e-07, 1.59838243233728855017e-07, + 1.60638793847630850137e-07, 1.61438622088746393909e-07, + 1.62237804297600106296e-07, 1.63036416005787357730e-07, + 1.63834531995435479082e-07, 1.64632226356965902954e-07, + 1.65429572545287097020e-07, 1.66226643434541294491e-07, + 1.67023511371523209274e-07, 1.67820248227882200051e-07, + 1.68616925451215588827e-07, 1.69413614115155757272e-07, + 1.70210384968549673733e-07, 1.71007308483826142122e-07, + 1.71804454904642543391e-07, 1.72601894292900061024e-07, + 1.73399696575213681990e-07, 1.74197931588920988271e-07, + 1.74996669127712165834e-07, 1.75795978986961275677e-07, + 1.76595931008838063924e-07, 1.77396595127278238022e-07, + 1.78198041412889183130e-07, 1.79000340117867431104e-07, + 1.79803561721004406185e-07, 1.80607776972855859813e-07, + 1.81413056941151359868e-07, 1.82219473056520464354e-07, + 1.83027097158612474240e-07, 1.83836001542687613069e-07, + 1.84646259006759307383e-07, 1.85457942899367347876e-07, + 1.86271127168064649331e-07, 1.87085886408701333260e-07, + 1.87902295915592424729e-07, 1.88720431732658022414e-07, + 1.89540370705627262627e-07, 1.90362190535400839128e-07, + 1.91185969832669990437e-07, 1.92011788173893651535e-07, + 1.92839726158739913768e-07, 1.93669865469102145482e-07, + 1.94502288929804890433e-07, 1.95337080571120616772e-07, + 1.96174325693223683314e-07, 1.97014110932714374919e-07, + 1.97856524331352952716e-07, 1.98701655407150388211e-07, + 1.99549595227971635348e-07, 2.00400436487814600236e-07, + 2.01254273585938820883e-07, 2.02111202709026498408e-07, + 2.02971321916571014951e-07, 2.03834731229698846698e-07, + 2.04701532723644121196e-07, 2.05571830624108885378e-07, + 2.06445731407757185541e-07, 2.07323343907107312957e-07, + 2.08204779420104330037e-07, 2.09090151824673600213e-07, + 2.09979577698577670508e-07, 2.10873176444920111011e-07, + 2.11771070423665379388e-07, 2.12673385089569268965e-07, + 2.13580249136944118603e-07, 2.14491794651713402832e-07, + 2.15408157271244625533e-07, 2.16329476352486921685e-07, + 2.17255895148978920488e-07, 2.18187560997337924713e-07, + 2.19124625513888206785e-07, 2.20067244802139479285e-07, + 2.21015579671883851683e-07, 2.21969795870742159701e-07, + 2.22930064329060010376e-07, 2.23896561419128954210e-07, + 2.24869469229791575583e-07, 2.25848975857580322189e-07, + 2.26835275715640744118e-07, 2.27828569861799901001e-07, + 2.28829066347263833069e-07, 2.29836980587561823183e-07, + 2.30852535757505260518e-07, 2.31875963212094114516e-07, + 2.32907502935486642699e-07, 2.33947404020352726160e-07, + 2.34995925180156140289e-07, 2.36053335297164516378e-07, + 2.37119914009265667728e-07, 2.38195952338983970691e-07, + 2.39281753368440712742e-07, 2.40377632964396957621e-07, + 2.41483920557958384709e-07, 2.42600959984018662258e-07, + 2.43729110386077326413e-07, 2.44868747192698939290e-07, + 2.46020263172594533433e-07, 2.47184069576113545901e-07, + 2.48360597371852893654e-07, 2.49550298588131851232e-07, + 2.50753647770270890721e-07, 2.51971143565970967140e-07, + 2.53203310452642767375e-07, 2.54450700622322097890e-07, + 2.55713896041856770961e-07, 2.56993510708419870887e-07, + 2.58290193123138874550e-07, 2.59604629008804833146e-07, + 2.60937544301314385690e-07, 2.62289708448800566945e-07, + 2.63661938057441759882e-07, 2.65055100928844238758e-07, + 2.66470120540847889467e-07, 2.67907981031821866252e-07, + 2.69369732758258246335e-07, 2.70856498507068313229e-07, + 2.72369480457841388042e-07, 2.73909968006952220135e-07, + 2.75479346585437289399e-07, 2.77079107626811561009e-07, + 2.78710859870496796972e-07, 2.80376342222588603820e-07, + 2.82077438439999912690e-07, 2.83816193958769527230e-07, + 2.85594835255375795814e-07, 2.87415792215003905739e-07, + 2.89281724087851835900e-07, 2.91195549750371467233e-07, + 2.93160483161771875581e-07, 2.95180075129332912389e-07, + 2.97258262785797916083e-07, 2.99399428561531794298e-07, + 3.01608470935804138388e-07, 3.03890889921758510417e-07, + 3.06252891144972267537e-07, 3.08701513613258141075e-07, + 3.11244787989714509378e-07, 3.13891934589336184321e-07, + 3.16653613755314681314e-07, 3.19542246256559459667e-07, + 3.22572428717978242099e-07, 3.25761480217458181578e-07, + 3.29130173358915628534e-07, 3.32703730345002116955e-07, + 3.36513208964639108346e-07, 3.40597478255417943913e-07, + 3.45006114675213401550e-07, 3.49803789521323211592e-07, + 3.55077180848341416206e-07, 3.60946392031859609868e-07, + 3.67584959507244041831e-07, 3.75257645787954431030e-07, + 3.84399301057791926300e-07, 3.95804015855768440983e-07, + 4.11186015434435801956e-07, 4.35608969373823260746e-07], + dtype=np.float32) + +fi_float = np.array([ + 1.00000000000000000000e+00, 9.77101701267671596263e-01, + 9.59879091800106665211e-01, 9.45198953442299649730e-01, + 9.32060075959230460718e-01, 9.19991505039347012840e-01, + 9.08726440052130879366e-01, 8.98095921898343418910e-01, + 8.87984660755833377088e-01, 8.78309655808917399966e-01, + 8.69008688036857046555e-01, 8.60033621196331532488e-01, + 8.51346258458677951353e-01, 8.42915653112204177333e-01, + 8.34716292986883434679e-01, 8.26726833946221373317e-01, + 8.18929191603702366642e-01, 8.11307874312656274185e-01, + 8.03849483170964274059e-01, 7.96542330422958966274e-01, + 7.89376143566024590648e-01, 7.82341832654802504798e-01, + 7.75431304981187174974e-01, 7.68637315798486264740e-01, + 7.61953346836795386565e-01, 7.55373506507096115214e-01, + 7.48892447219156820459e-01, 7.42505296340151055290e-01, + 7.36207598126862650112e-01, 7.29995264561476231435e-01, + 7.23864533468630222401e-01, 7.17811932630721960535e-01, + 7.11834248878248421200e-01, 7.05928501332754310127e-01, + 7.00091918136511615067e-01, 6.94321916126116711609e-01, + 6.88616083004671808432e-01, 6.82972161644994857355e-01, + 6.77388036218773526009e-01, 6.71861719897082099173e-01, + 6.66391343908750100056e-01, 6.60975147776663107813e-01, + 6.55611470579697264149e-01, 6.50298743110816701574e-01, + 6.45035480820822293424e-01, 6.39820277453056585060e-01, + 6.34651799287623608059e-01, 6.29528779924836690007e-01, + 6.24450015547026504592e-01, 6.19414360605834324325e-01, + 6.14420723888913888899e-01, 6.09468064925773433949e-01, + 6.04555390697467776029e-01, 5.99681752619125263415e-01, + 5.94846243767987448159e-01, 5.90047996332826008015e-01, + 5.85286179263371453274e-01, 5.80559996100790898232e-01, + 5.75868682972353718164e-01, 5.71211506735253227163e-01, + 5.66587763256164445025e-01, 5.61996775814524340831e-01, + 5.57437893618765945014e-01, 5.52910490425832290562e-01, + 5.48413963255265812791e-01, 5.43947731190026262382e-01, + 5.39511234256952132426e-01, 5.35103932380457614215e-01, + 5.30725304403662057062e-01, 5.26374847171684479008e-01, + 5.22052074672321841931e-01, 5.17756517229756352272e-01, + 5.13487720747326958914e-01, 5.09245245995747941592e-01, + 5.05028667943468123624e-01, 5.00837575126148681903e-01, + 4.96671569052489714213e-01, 4.92530263643868537748e-01, + 4.88413284705458028423e-01, 4.84320269426683325253e-01, + 4.80250865909046753544e-01, 4.76204732719505863248e-01, + 4.72181538467730199660e-01, 4.68180961405693596422e-01, + 4.64202689048174355069e-01, 4.60246417812842867345e-01, + 4.56311852678716434184e-01, 4.52398706861848520777e-01, + 4.48506701507203064949e-01, 4.44635565395739396077e-01, + 4.40785034665803987508e-01, 4.36954852547985550526e-01, + 4.33144769112652261445e-01, 4.29354541029441427735e-01, + 4.25583931338021970170e-01, 4.21832709229495894654e-01, + 4.18100649837848226120e-01, 4.14387534040891125642e-01, + 4.10693148270188157500e-01, 4.07017284329473372217e-01, + 4.03359739221114510510e-01, 3.99720314980197222177e-01, + 3.96098818515832451492e-01, 3.92495061459315619512e-01, + 3.88908860018788715696e-01, 3.85340034840077283462e-01, + 3.81788410873393657674e-01, 3.78253817245619183840e-01, + 3.74736087137891138443e-01, 3.71235057668239498696e-01, + 3.67750569779032587814e-01, 3.64282468129004055601e-01, + 3.60830600989648031529e-01, 3.57394820145780500731e-01, + 3.53974980800076777232e-01, 3.50570941481406106455e-01, + 3.47182563956793643900e-01, 3.43809713146850715049e-01, + 3.40452257044521866547e-01, 3.37110066637006045021e-01, + 3.33783015830718454708e-01, 3.30470981379163586400e-01, + 3.27173842813601400970e-01, 3.23891482376391093290e-01, + 3.20623784956905355514e-01, 3.17370638029913609834e-01, + 3.14131931596337177215e-01, 3.10907558126286509559e-01, + 3.07697412504292056035e-01, 3.04501391976649993243e-01, + 3.01319396100803049698e-01, 2.98151326696685481377e-01, + 2.94997087799961810184e-01, 2.91856585617095209972e-01, + 2.88729728482182923521e-01, 2.85616426815501756042e-01, + 2.82516593083707578948e-01, 2.79430141761637940157e-01, + 2.76356989295668320494e-01, 2.73297054068577072172e-01, + 2.70250256365875463072e-01, 2.67216518343561471038e-01, + 2.64195763997261190426e-01, 2.61187919132721213522e-01, + 2.58192911337619235290e-01, 2.55210669954661961700e-01, + 2.52241126055942177508e-01, 2.49284212418528522415e-01, + 2.46339863501263828249e-01, 2.43408015422750312329e-01, + 2.40488605940500588254e-01, 2.37581574431238090606e-01, + 2.34686861872330010392e-01, 2.31804410824338724684e-01, + 2.28934165414680340644e-01, 2.26076071322380278694e-01, + 2.23230075763917484855e-01, 2.20396127480151998723e-01, + 2.17574176724331130872e-01, 2.14764175251173583536e-01, + 2.11966076307030182324e-01, 2.09179834621125076977e-01, + 2.06405406397880797353e-01, 2.03642749310334908452e-01, + 2.00891822494656591136e-01, 1.98152586545775138971e-01, + 1.95425003514134304483e-01, 1.92709036903589175926e-01, + 1.90004651670464985713e-01, 1.87311814223800304768e-01, + 1.84630492426799269756e-01, 1.81960655599522513892e-01, + 1.79302274522847582272e-01, 1.76655321443734858455e-01, + 1.74019770081838553999e-01, 1.71395595637505754327e-01, + 1.68782774801211288285e-01, 1.66181285764481906364e-01, + 1.63591108232365584074e-01, 1.61012223437511009516e-01, + 1.58444614155924284882e-01, 1.55888264724479197465e-01, + 1.53343161060262855866e-01, 1.50809290681845675763e-01, + 1.48286642732574552861e-01, 1.45775208005994028060e-01, + 1.43274978973513461566e-01, 1.40785949814444699690e-01, + 1.38308116448550733057e-01, 1.35841476571253755301e-01, + 1.33386029691669155683e-01, 1.30941777173644358090e-01, + 1.28508722279999570981e-01, 1.26086870220185887081e-01, + 1.23676228201596571932e-01, 1.21276805484790306533e-01, + 1.18888613442910059947e-01, 1.16511665625610869035e-01, + 1.14145977827838487895e-01, 1.11791568163838089811e-01, + 1.09448457146811797824e-01, 1.07116667774683801961e-01, + 1.04796225622487068629e-01, 1.02487158941935246892e-01, + 1.00189498768810017482e-01, 9.79032790388624646338e-02, + 9.56285367130089991594e-02, 9.33653119126910124859e-02, + 9.11136480663737591268e-02, 8.88735920682758862021e-02, + 8.66451944505580717859e-02, 8.44285095703534715916e-02, + 8.22235958132029043366e-02, 8.00305158146630696292e-02, + 7.78493367020961224423e-02, 7.56801303589271778804e-02, + 7.35229737139813238622e-02, 7.13779490588904025339e-02, + 6.92451443970067553879e-02, 6.71246538277884968737e-02, + 6.50165779712428976156e-02, 6.29210244377581412456e-02, + 6.08381083495398780614e-02, 5.87679529209337372930e-02, + 5.67106901062029017391e-02, 5.46664613248889208474e-02, + 5.26354182767921896513e-02, 5.06177238609477817000e-02, + 4.86135532158685421122e-02, 4.66230949019303814174e-02, + 4.46465522512944634759e-02, 4.26841449164744590750e-02, + 4.07361106559409394401e-02, 3.88027074045261474722e-02, + 3.68842156885673053135e-02, 3.49809414617161251737e-02, + 3.30932194585785779961e-02, 3.12214171919203004046e-02, + 2.93659397581333588001e-02, 2.75272356696031131329e-02, + 2.57058040085489103443e-02, 2.39022033057958785407e-02, + 2.21170627073088502113e-02, 2.03510962300445102935e-02, + 1.86051212757246224594e-02, 1.68800831525431419000e-02, + 1.51770883079353092332e-02, 1.34974506017398673818e-02, + 1.18427578579078790488e-02, 1.02149714397014590439e-02, + 8.61658276939872638800e-03, 7.05087547137322242369e-03, + 5.52240329925099155545e-03, 4.03797259336302356153e-03, + 2.60907274610215926189e-03, 1.26028593049859797236e-03], + dtype=np.float32) + +ke_double = np.array([ + 0x001C5214272497C6, 0x0000000000000000, 0x00137D5BD79C317E, + 0x00186EF58E3F3C10, 0x001A9BB7320EB0AE, 0x001BD127F719447C, + 0x001C951D0F88651A, 0x001D1BFE2D5C3972, 0x001D7E5BD56B18B2, + 0x001DC934DD172C70, 0x001E0409DFAC9DC8, 0x001E337B71D47836, + 0x001E5A8B177CB7A2, 0x001E7B42096F046C, 0x001E970DAF08AE3E, + 0x001EAEF5B14EF09E, 0x001EC3BD07B46556, 0x001ED5F6F08799CE, + 0x001EE614AE6E5688, 0x001EF46ECA361CD0, 0x001F014B76DDD4A4, + 0x001F0CE313A796B6, 0x001F176369F1F77A, 0x001F20F20C452570, + 0x001F29AE1951A874, 0x001F31B18FB95532, 0x001F39125157C106, + 0x001F3FE2EB6E694C, 0x001F463332D788FA, 0x001F4C10BF1D3A0E, + 0x001F51874C5C3322, 0x001F56A109C3ECC0, 0x001F5B66D9099996, + 0x001F5FE08210D08C, 0x001F6414DD445772, 0x001F6809F6859678, + 0x001F6BC52A2B02E6, 0x001F6F4B3D32E4F4, 0x001F72A07190F13A, + 0x001F75C8974D09D6, 0x001F78C71B045CC0, 0x001F7B9F12413FF4, + 0x001F7E5346079F8A, 0x001F80E63BE21138, 0x001F835A3DAD9162, + 0x001F85B16056B912, 0x001F87ED89B24262, 0x001F8A10759374FA, + 0x001F8C1BBA3D39AC, 0x001F8E10CC45D04A, 0x001F8FF102013E16, + 0x001F91BD968358E0, 0x001F9377AC47AFD8, 0x001F95204F8B64DA, + 0x001F96B878633892, 0x001F98410C968892, 0x001F99BAE146BA80, + 0x001F9B26BC697F00, 0x001F9C85561B717A, 0x001F9DD759CFD802, + 0x001F9F1D6761A1CE, 0x001FA058140936C0, 0x001FA187EB3A3338, + 0x001FA2AD6F6BC4FC, 0x001FA3C91ACE0682, 0x001FA4DB5FEE6AA2, + 0x001FA5E4AA4D097C, 0x001FA6E55EE46782, 0x001FA7DDDCA51EC4, + 0x001FA8CE7CE6A874, 0x001FA9B793CE5FEE, 0x001FAA9970ADB858, + 0x001FAB745E588232, 0x001FAC48A3740584, 0x001FAD1682BF9FE8, + 0x001FADDE3B5782C0, 0x001FAEA008F21D6C, 0x001FAF5C2418B07E, + 0x001FB012C25B7A12, 0x001FB0C41681DFF4, 0x001FB17050B6F1FA, + 0x001FB2179EB2963A, 0x001FB2BA2BDFA84A, 0x001FB358217F4E18, + 0x001FB3F1A6C9BE0C, 0x001FB486E10CACD6, 0x001FB517F3C793FC, + 0x001FB5A500C5FDAA, 0x001FB62E2837FE58, 0x001FB6B388C9010A, + 0x001FB7353FB50798, 0x001FB7B368DC7DA8, 0x001FB82E1ED6BA08, + 0x001FB8A57B0347F6, 0x001FB919959A0F74, 0x001FB98A85BA7204, + 0x001FB9F861796F26, 0x001FBA633DEEE286, 0x001FBACB2F41EC16, + 0x001FBB3048B49144, 0x001FBB929CAEA4E2, 0x001FBBF23CC8029E, + 0x001FBC4F39D22994, 0x001FBCA9A3E140D4, 0x001FBD018A548F9E, + 0x001FBD56FBDE729C, 0x001FBDAA068BD66A, 0x001FBDFAB7CB3F40, + 0x001FBE491C7364DE, 0x001FBE9540C9695E, 0x001FBEDF3086B128, + 0x001FBF26F6DE6174, 0x001FBF6C9E828AE2, 0x001FBFB031A904C4, + 0x001FBFF1BA0FFDB0, 0x001FC03141024588, 0x001FC06ECF5B54B2, + 0x001FC0AA6D8B1426, 0x001FC0E42399698A, 0x001FC11BF9298A64, + 0x001FC151F57D1942, 0x001FC1861F770F4A, 0x001FC1B87D9E74B4, + 0x001FC1E91620EA42, 0x001FC217EED505DE, 0x001FC2450D3C83FE, + 0x001FC27076864FC2, 0x001FC29A2F90630E, 0x001FC2C23CE98046, + 0x001FC2E8A2D2C6B4, 0x001FC30D654122EC, 0x001FC33087DE9C0E, + 0x001FC3520E0B7EC6, 0x001FC371FADF66F8, 0x001FC390512A2886, + 0x001FC3AD137497FA, 0x001FC3C844013348, 0x001FC3E1E4CCAB40, + 0x001FC3F9F78E4DA8, 0x001FC4107DB85060, 0x001FC4257877FD68, + 0x001FC438E8B5BFC6, 0x001FC44ACF15112A, 0x001FC45B2BF447E8, + 0x001FC469FF6C4504, 0x001FC477495001B2, 0x001FC483092BFBB8, + 0x001FC48D3E457FF6, 0x001FC495E799D21A, 0x001FC49D03DD30B0, + 0x001FC4A29179B432, 0x001FC4A68E8E07FC, 0x001FC4A8F8EBFB8C, + 0x001FC4A9CE16EA9E, 0x001FC4A90B41FA34, 0x001FC4A6AD4E28A0, + 0x001FC4A2B0C82E74, 0x001FC49D11E62DE2, 0x001FC495CC852DF4, + 0x001FC48CDC265EC0, 0x001FC4823BEC237A, 0x001FC475E696DEE6, + 0x001FC467D6817E82, 0x001FC458059DC036, 0x001FC4466D702E20, + 0x001FC433070BCB98, 0x001FC41DCB0D6E0E, 0x001FC406B196BBF6, + 0x001FC3EDB248CB62, 0x001FC3D2C43E593C, 0x001FC3B5DE0591B4, + 0x001FC396F599614C, 0x001FC376005A4592, 0x001FC352F3069370, + 0x001FC32DC1B22818, 0x001FC3065FBD7888, 0x001FC2DCBFCBF262, + 0x001FC2B0D3B99F9E, 0x001FC2828C8FFCF0, 0x001FC251DA79F164, + 0x001FC21EACB6D39E, 0x001FC1E8F18C6756, 0x001FC1B09637BB3C, + 0x001FC17586DCCD10, 0x001FC137AE74D6B6, 0x001FC0F6F6BB2414, + 0x001FC0B348184DA4, 0x001FC06C898BAFF0, 0x001FC022A092F364, + 0x001FBFD5710F72B8, 0x001FBF84DD29488E, 0x001FBF30C52FC60A, + 0x001FBED907770CC6, 0x001FBE7D80327DDA, 0x001FBE1E094BA614, + 0x001FBDBA7A354408, 0x001FBD52A7B9F826, 0x001FBCE663C6201A, + 0x001FBC757D2C4DE4, 0x001FBBFFBF63B7AA, 0x001FBB84F23FE6A2, + 0x001FBB04D9A0D18C, 0x001FBA7F351A70AC, 0x001FB9F3BF92B618, + 0x001FB9622ED4ABFC, 0x001FB8CA33174A16, 0x001FB82B76765B54, + 0x001FB7859C5B895C, 0x001FB6D840D55594, 0x001FB622F7D96942, + 0x001FB5654C6F37E0, 0x001FB49EBFBF69D2, 0x001FB3CEC803E746, + 0x001FB2F4CF539C3E, 0x001FB21032442852, 0x001FB1203E5A9604, + 0x001FB0243042E1C2, 0x001FAF1B31C479A6, 0x001FAE045767E104, + 0x001FACDE9DBF2D72, 0x001FABA8E640060A, 0x001FAA61F399FF28, + 0x001FA908656F66A2, 0x001FA79AB3508D3C, 0x001FA61726D1F214, + 0x001FA47BD48BEA00, 0x001FA2C693C5C094, 0x001FA0F4F47DF314, + 0x001F9F04336BBE0A, 0x001F9CF12B79F9BC, 0x001F9AB84415ABC4, + 0x001F98555B782FB8, 0x001F95C3ABD03F78, 0x001F92FDA9CEF1F2, + 0x001F8FFCDA9AE41C, 0x001F8CB99E7385F8, 0x001F892AEC479606, + 0x001F8545F904DB8E, 0x001F80FDC336039A, 0x001F7C427839E926, + 0x001F7700A3582ACC, 0x001F71200F1A241C, 0x001F6A8234B7352A, + 0x001F630000A8E266, 0x001F5A66904FE3C4, 0x001F50724ECE1172, + 0x001F44C7665C6FDA, 0x001F36E5A38A59A2, 0x001F26143450340A, + 0x001F113E047B0414, 0x001EF6AEFA57CBE6, 0x001ED38CA188151E, + 0x001EA2A61E122DB0, 0x001E5961C78B267C, 0x001DDDF62BAC0BB0, + 0x001CDB4DD9E4E8C0], dtype=np.uint64) + +we_double = np.array([ + 9.655740063209182975e-16, 7.089014243955414331e-18, + 1.163941249669122378e-17, 1.524391512353216015e-17, + 1.833284885723743916e-17, 2.108965109464486630e-17, + 2.361128077843138196e-17, 2.595595772310893952e-17, + 2.816173554197752338e-17, 3.025504130321382330e-17, + 3.225508254836375280e-17, 3.417632340185027033e-17, + 3.602996978734452488e-17, 3.782490776869649048e-17, + 3.956832198097553231e-17, 4.126611778175946428e-17, + 4.292321808442525631e-17, 4.454377743282371417e-17, + 4.613133981483185932e-17, 4.768895725264635940e-17, + 4.921928043727962847e-17, 5.072462904503147014e-17, + 5.220704702792671737e-17, 5.366834661718192181e-17, + 5.511014372835094717e-17, 5.653388673239667134e-17, + 5.794088004852766616e-17, 5.933230365208943081e-17, + 6.070922932847179572e-17, 6.207263431163193485e-17, + 6.342341280303076511e-17, 6.476238575956142121e-17, + 6.609030925769405241e-17, 6.740788167872722244e-17, + 6.871574991183812442e-17, 7.001451473403929616e-17, + 7.130473549660643409e-17, 7.258693422414648352e-17, + 7.386159921381791997e-17, 7.512918820723728089e-17, + 7.639013119550825792e-17, 7.764483290797848102e-17, + 7.889367502729790548e-17, 8.013701816675454434e-17, + 8.137520364041762206e-17, 8.260855505210038174e-17, + 8.383737972539139383e-17, 8.506196999385323132e-17, + 8.628260436784112996e-17, 8.749954859216182511e-17, + 8.871305660690252281e-17, 8.992337142215357066e-17, + 9.113072591597909173e-17, 9.233534356381788123e-17, + 9.353743910649128938e-17, 9.473721916312949566e-17, + 9.593488279457997317e-17, 9.713062202221521206e-17, + 9.832462230649511362e-17, 9.951706298915071878e-17, + 1.007081177024294931e-16, 1.018979547484694078e-16, + 1.030867374515421954e-16, 1.042746244856188556e-16, + 1.054617701794576406e-16, 1.066483248011914702e-16, + 1.078344348241948498e-16, 1.090202431758350473e-16, + 1.102058894705578110e-16, 1.113915102286197502e-16, + 1.125772390816567488e-16, 1.137632069661684705e-16, + 1.149495423059009298e-16, 1.161363711840218308e-16, + 1.173238175059045788e-16, 1.185120031532669434e-16, + 1.197010481303465158e-16, 1.208910707027385520e-16, + 1.220821875294706151e-16, 1.232745137888415193e-16, + 1.244681632985112523e-16, 1.256632486302898513e-16, + 1.268598812200397542e-16, 1.280581714730749379e-16, + 1.292582288654119552e-16, 1.304601620412028847e-16, + 1.316640789066572582e-16, 1.328700867207380889e-16, + 1.340782921828999433e-16, 1.352888015181175458e-16, + 1.365017205594397770e-16, 1.377171548282880964e-16, + 1.389352096127063919e-16, 1.401559900437571538e-16, + 1.413796011702485188e-16, 1.426061480319665444e-16, + 1.438357357315790180e-16, 1.450684695053687684e-16, + 1.463044547929475721e-16, 1.475437973060951633e-16, + 1.487866030968626066e-16, 1.500329786250736949e-16, + 1.512830308253539427e-16, 1.525368671738125550e-16, + 1.537945957544996933e-16, 1.550563253257577148e-16, + 1.563221653865837505e-16, 1.575922262431176140e-16, + 1.588666190753684151e-16, 1.601454560042916733e-16, + 1.614288501593278662e-16, 1.627169157465130500e-16, + 1.640097681172717950e-16, 1.653075238380036909e-16, + 1.666103007605742067e-16, 1.679182180938228863e-16, + 1.692313964762022267e-16, 1.705499580496629830e-16, + 1.718740265349031656e-16, 1.732037273081008369e-16, + 1.745391874792533975e-16, 1.758805359722491379e-16, + 1.772279036068006489e-16, 1.785814231823732619e-16, + 1.799412295642463721e-16, 1.813074597718501559e-16, + 1.826802530695252266e-16, 1.840597510598587828e-16, + 1.854460977797569461e-16, 1.868394397994192684e-16, + 1.882399263243892051e-16, 1.896477093008616722e-16, + 1.910629435244376536e-16, 1.924857867525243818e-16, + 1.939163998205899420e-16, 1.953549467624909132e-16, + 1.968015949351037382e-16, 1.982565151475019047e-16, + 1.997198817949342081e-16, 2.011918729978734671e-16, + 2.026726707464198289e-16, 2.041624610503588774e-16, + 2.056614340951917875e-16, 2.071697844044737034e-16, + 2.086877110088159721e-16, 2.102154176219292789e-16, + 2.117531128241075913e-16, 2.133010102535779087e-16, + 2.148593288061663316e-16, 2.164282928437604723e-16, + 2.180081324120784027e-16, 2.195990834682870728e-16, + 2.212013881190495942e-16, 2.228152948696180545e-16, + 2.244410588846308588e-16, 2.260789422613173739e-16, + 2.277292143158621037e-16, 2.293921518837311354e-16, + 2.310680396348213318e-16, 2.327571704043534613e-16, + 2.344598455404957859e-16, 2.361763752697773994e-16, + 2.379070790814276700e-16, 2.396522861318623520e-16, + 2.414123356706293277e-16, 2.431875774892255956e-16, + 2.449783723943070217e-16, 2.467850927069288738e-16, + 2.486081227895851719e-16, 2.504478596029557040e-16, + 2.523047132944217013e-16, 2.541791078205812227e-16, + 2.560714816061770759e-16, 2.579822882420530896e-16, + 2.599119972249746917e-16, 2.618610947423924219e-16, + 2.638300845054942823e-16, 2.658194886341845120e-16, + 2.678298485979525166e-16, 2.698617262169488933e-16, + 2.719157047279818500e-16, 2.739923899205814823e-16, + 2.760924113487617126e-16, 2.782164236246436081e-16, + 2.803651078006983464e-16, 2.825391728480253184e-16, + 2.847393572388174091e-16, 2.869664306419817679e-16, + 2.892211957417995598e-16, 2.915044901905293183e-16, + 2.938171887070028633e-16, 2.961602053345465687e-16, + 2.985344958730045276e-16, 3.009410605012618141e-16, + 3.033809466085003416e-16, 3.058552518544860874e-16, + 3.083651274815310004e-16, 3.109117819034266344e-16, + 3.134964845996663118e-16, 3.161205703467105734e-16, + 3.187854438219713117e-16, 3.214925846206797361e-16, + 3.242435527309451638e-16, 3.270399945182240440e-16, + 3.298836492772283149e-16, 3.327763564171671408e-16, + 3.357200633553244075e-16, 3.387168342045505162e-16, + 3.417688593525636996e-16, 3.448784660453423890e-16, + 3.480481301037442286e-16, 3.512804889222979418e-16, + 3.545783559224791863e-16, 3.579447366604276541e-16, + 3.613828468219060593e-16, 3.648961323764542545e-16, + 3.684882922095621322e-16, 3.721633036080207290e-16, + 3.759254510416256532e-16, 3.797793587668874387e-16, + 3.837300278789213687e-16, 3.877828785607895292e-16, + 3.919437984311428867e-16, 3.962191980786774996e-16, + 4.006160751056541688e-16, 4.051420882956573177e-16, + 4.098056438903062509e-16, 4.146159964290904582e-16, + 4.195833672073398926e-16, 4.247190841824385048e-16, + 4.300357481667470702e-16, 4.355474314693952008e-16, + 4.412699169036069903e-16, 4.472209874259932285e-16, + 4.534207798565834480e-16, 4.598922204905932469e-16, + 4.666615664711475780e-16, 4.737590853262492027e-16, + 4.812199172829237933e-16, 4.890851827392209900e-16, + 4.974034236191939753e-16, 5.062325072144159699e-16, + 5.156421828878082953e-16, 5.257175802022274839e-16, + 5.365640977112021618e-16, 5.483144034258703912e-16, + 5.611387454675159622e-16, 5.752606481503331688e-16, + 5.909817641652102998e-16, 6.087231416180907671e-16, + 6.290979034877557049e-16, 6.530492053564040799e-16, + 6.821393079028928626e-16, 7.192444966089361564e-16, + 7.706095350032096755e-16, 8.545517038584027421e-16], + dtype=np.float64) + +fe_double = np.array([ + 1.000000000000000000e+00, 9.381436808621747003e-01, + 9.004699299257464817e-01, 8.717043323812035949e-01, + 8.477855006239896074e-01, 8.269932966430503241e-01, + 8.084216515230083777e-01, 7.915276369724956185e-01, + 7.759568520401155522e-01, 7.614633888498962833e-01, + 7.478686219851951034e-01, 7.350380924314234843e-01, + 7.228676595935720206e-01, 7.112747608050760117e-01, + 7.001926550827881623e-01, 6.895664961170779872e-01, + 6.793505722647653622e-01, 6.695063167319247333e-01, + 6.600008410789997004e-01, 6.508058334145710999e-01, + 6.418967164272660897e-01, 6.332519942143660652e-01, + 6.248527387036659775e-01, 6.166821809152076561e-01, + 6.087253820796220127e-01, 6.009689663652322267e-01, + 5.934009016917334289e-01, 5.860103184772680329e-01, + 5.787873586028450257e-01, 5.717230486648258170e-01, + 5.648091929124001709e-01, 5.580382822625874484e-01, + 5.514034165406412891e-01, 5.448982376724396115e-01, + 5.385168720028619127e-01, 5.322538802630433219e-01, + 5.261042139836197284e-01, 5.200631773682335979e-01, + 5.141263938147485613e-01, 5.082897764106428795e-01, + 5.025495018413477233e-01, 4.969019872415495476e-01, + 4.913438695940325340e-01, 4.858719873418849144e-01, + 4.804833639304542103e-01, 4.751751930373773747e-01, + 4.699448252839599771e-01, 4.647897562504261781e-01, + 4.597076156421376902e-01, 4.546961574746155033e-01, + 4.497532511627549967e-01, 4.448768734145485126e-01, + 4.400651008423538957e-01, 4.353161032156365740e-01, + 4.306281372884588343e-01, 4.259995411430343437e-01, + 4.214287289976165751e-01, 4.169141864330028757e-01, + 4.124544659971611793e-01, 4.080481831520323954e-01, + 4.036940125305302773e-01, 3.993906844752310725e-01, + 3.951369818332901573e-01, 3.909317369847971069e-01, + 3.867738290841376547e-01, 3.826621814960098344e-01, + 3.785957594095807899e-01, 3.745735676159021588e-01, + 3.705946484351460013e-01, 3.666580797815141568e-01, + 3.627629733548177748e-01, 3.589084729487497794e-01, + 3.550937528667874599e-01, 3.513180164374833381e-01, + 3.475804946216369817e-01, 3.438804447045024082e-01, + 3.402171490667800224e-01, 3.365899140286776059e-01, + 3.329980687618089852e-01, 3.294409642641363267e-01, + 3.259179723935561879e-01, 3.224284849560891675e-01, + 3.189719128449572394e-01, 3.155476852271289490e-01, + 3.121552487741795501e-01, 3.087940669345601852e-01, + 3.054636192445902565e-01, 3.021634006756935276e-01, + 2.988929210155817917e-01, 2.956517042812611962e-01, + 2.924392881618925744e-01, 2.892552234896777485e-01, + 2.860990737370768255e-01, 2.829704145387807457e-01, + 2.798688332369729248e-01, 2.767939284485173568e-01, + 2.737453096528029706e-01, 2.707225967990600224e-01, + 2.677254199320447947e-01, 2.647534188350622042e-01, + 2.618062426893629779e-01, 2.588835497490162285e-01, + 2.559850070304153791e-01, 2.531102900156294577e-01, + 2.502590823688622956e-01, 2.474310756653276266e-01, + 2.446259691318921070e-01, 2.418434693988772144e-01, + 2.390832902624491774e-01, 2.363451524570596429e-01, + 2.336287834374333461e-01, 2.309339171696274118e-01, + 2.282602939307167011e-01, 2.256076601166840667e-01, + 2.229757680581201940e-01, 2.203643758433594946e-01, + 2.177732471487005272e-01, 2.152021510753786837e-01, + 2.126508619929782795e-01, 2.101191593889882581e-01, + 2.076068277242220372e-01, 2.051136562938377095e-01, + 2.026394390937090173e-01, 2.001839746919112650e-01, + 1.977470661050988732e-01, 1.953285206795632167e-01, + 1.929281499767713515e-01, 1.905457696631953912e-01, + 1.881811994042543179e-01, 1.858342627621971110e-01, + 1.835047870977674633e-01, 1.811926034754962889e-01, + 1.788975465724783054e-01, 1.766194545904948843e-01, + 1.743581691713534942e-01, 1.721135353153200598e-01, + 1.698854013025276610e-01, 1.676736186172501919e-01, + 1.654780418749360049e-01, 1.632985287519018169e-01, + 1.611349399175920349e-01, 1.589871389693142123e-01, + 1.568549923693652315e-01, 1.547383693844680830e-01, + 1.526371420274428570e-01, 1.505511850010398944e-01, + 1.484803756438667910e-01, 1.464245938783449441e-01, + 1.443837221606347754e-01, 1.423576454324722018e-01, + 1.403462510748624548e-01, 1.383494288635802039e-01, + 1.363670709264288572e-01, 1.343990717022136294e-01, + 1.324453279013875218e-01, 1.305057384683307731e-01, + 1.285802045452281717e-01, 1.266686294375106714e-01, + 1.247709185808309612e-01, 1.228869795095451356e-01, + 1.210167218266748335e-01, 1.191600571753276827e-01, + 1.173168992115555670e-01, 1.154871635786335338e-01, + 1.136707678827443141e-01, 1.118676316700562973e-01, + 1.100776764051853845e-01, 1.083008254510337970e-01, + 1.065370040500016602e-01, 1.047861393065701724e-01, + 1.030481601712577161e-01, 1.013229974259536315e-01, + 9.961058367063713170e-02, 9.791085331149219917e-02, + 9.622374255043279756e-02, 9.454918937605585882e-02, + 9.288713355604354127e-02, 9.123751663104015530e-02, + 8.960028191003285847e-02, 8.797537446727021759e-02, + 8.636274114075691288e-02, 8.476233053236811865e-02, + 8.317409300963238272e-02, 8.159798070923741931e-02, + 8.003394754231990538e-02, 7.848194920160642130e-02, + 7.694194317048050347e-02, 7.541388873405840965e-02, + 7.389774699236474620e-02, 7.239348087570873780e-02, + 7.090105516237182881e-02, 6.942043649872875477e-02, + 6.795159342193660135e-02, 6.649449638533977414e-02, + 6.504911778675374900e-02, 6.361543199980733421e-02, + 6.219341540854099459e-02, 6.078304644547963265e-02, + 5.938430563342026597e-02, 5.799717563120065922e-02, + 5.662164128374287675e-02, 5.525768967669703741e-02, + 5.390531019604608703e-02, 5.256449459307169225e-02, + 5.123523705512628146e-02, 4.991753428270637172e-02, + 4.861138557337949667e-02, 4.731679291318154762e-02, + 4.603376107617516977e-02, 4.476229773294328196e-02, + 4.350241356888818328e-02, 4.225412241331623353e-02, + 4.101744138041481941e-02, 3.979239102337412542e-02, + 3.857899550307485742e-02, 3.737728277295936097e-02, + 3.618728478193142251e-02, 3.500903769739741045e-02, + 3.384258215087432992e-02, 3.268796350895953468e-02, + 3.154523217289360859e-02, 3.041444391046660423e-02, + 2.929566022463739317e-02, 2.818894876397863569e-02, + 2.709438378095579969e-02, 2.601204664513421735e-02, + 2.494202641973178314e-02, 2.388442051155817078e-02, + 2.283933540638524023e-02, 2.180688750428358066e-02, + 2.078720407257811723e-02, 1.978042433800974303e-02, + 1.878670074469603046e-02, 1.780620041091136169e-02, + 1.683910682603994777e-02, 1.588562183997316302e-02, + 1.494596801169114850e-02, 1.402039140318193759e-02, + 1.310916493125499106e-02, 1.221259242625538123e-02, + 1.133101359783459695e-02, 1.046481018102997894e-02, + 9.614413642502209895e-03, 8.780314985808975251e-03, + 7.963077438017040002e-03, 7.163353183634983863e-03, + 6.381905937319179087e-03, 5.619642207205483020e-03, + 4.877655983542392333e-03, 4.157295120833795314e-03, + 3.460264777836904049e-03, 2.788798793574076128e-03, + 2.145967743718906265e-03, 1.536299780301572356e-03, + 9.672692823271745359e-04, 4.541343538414967652e-04], + dtype=np.float64) + +ke_float = np.array([ + 0x00714851, 0x00000000, 0x004DF56F, 0x0061BBD6, 0x006A6EDD, + 0x006F44A0, 0x00725474, 0x00746FF9, 0x0075F96F, 0x007724D3, + 0x00781027, 0x0078CDEE, 0x00796A2C, 0x0079ED08, 0x007A5C37, + 0x007ABBD7, 0x007B0EF4, 0x007B57DC, 0x007B9853, 0x007BD1BB, + 0x007C052E, 0x007C338C, 0x007C5D8E, 0x007C83C8, 0x007CA6B8, + 0x007CC6C6, 0x007CE449, 0x007CFF8C, 0x007D18CD, 0x007D3043, + 0x007D461D, 0x007D5A84, 0x007D6D9B, 0x007D7F82, 0x007D9053, + 0x007DA028, 0x007DAF15, 0x007DBD2D, 0x007DCA82, 0x007DD722, + 0x007DE31C, 0x007DEE7C, 0x007DF94D, 0x007E0399, 0x007E0D69, + 0x007E16C6, 0x007E1FB6, 0x007E2842, 0x007E306F, 0x007E3843, + 0x007E3FC4, 0x007E46F6, 0x007E4DDF, 0x007E5481, 0x007E5AE2, + 0x007E6104, 0x007E66EC, 0x007E6C9B, 0x007E7215, 0x007E775D, + 0x007E7C76, 0x007E8160, 0x007E8620, 0x007E8AB6, 0x007E8F24, + 0x007E936D, 0x007E9793, 0x007E9B95, 0x007E9F77, 0x007EA33A, + 0x007EA6DE, 0x007EAA66, 0x007EADD1, 0x007EB123, 0x007EB45A, + 0x007EB779, 0x007EBA80, 0x007EBD71, 0x007EC04B, 0x007EC310, + 0x007EC5C1, 0x007EC85E, 0x007ECAE9, 0x007ECD61, 0x007ECFC7, + 0x007ED21C, 0x007ED460, 0x007ED694, 0x007ED8B9, 0x007EDACE, + 0x007EDCD5, 0x007EDECE, 0x007EE0B8, 0x007EE296, 0x007EE466, + 0x007EE62A, 0x007EE7E2, 0x007EE98D, 0x007EEB2D, 0x007EECC1, + 0x007EEE4A, 0x007EEFC9, 0x007EF13D, 0x007EF2A7, 0x007EF406, + 0x007EF55C, 0x007EF6A8, 0x007EF7EB, 0x007EF924, 0x007EFA55, + 0x007EFB7D, 0x007EFC9C, 0x007EFDB2, 0x007EFEC1, 0x007EFFC7, + 0x007F00C5, 0x007F01BB, 0x007F02AA, 0x007F0391, 0x007F0470, + 0x007F0548, 0x007F0618, 0x007F06E2, 0x007F07A4, 0x007F0860, + 0x007F0914, 0x007F09C2, 0x007F0A69, 0x007F0B09, 0x007F0BA3, + 0x007F0C36, 0x007F0CC2, 0x007F0D48, 0x007F0DC8, 0x007F0E41, + 0x007F0EB4, 0x007F0F21, 0x007F0F88, 0x007F0FE8, 0x007F1042, + 0x007F1096, 0x007F10E4, 0x007F112B, 0x007F116D, 0x007F11A8, + 0x007F11DD, 0x007F120C, 0x007F1235, 0x007F1258, 0x007F1274, + 0x007F128A, 0x007F129A, 0x007F12A4, 0x007F12A7, 0x007F12A4, + 0x007F129B, 0x007F128B, 0x007F1274, 0x007F1257, 0x007F1233, + 0x007F1209, 0x007F11D8, 0x007F119F, 0x007F1160, 0x007F111A, + 0x007F10CC, 0x007F1077, 0x007F101B, 0x007F0FB7, 0x007F0F4B, + 0x007F0ED7, 0x007F0E5C, 0x007F0DD8, 0x007F0D4C, 0x007F0CB7, + 0x007F0C19, 0x007F0B73, 0x007F0AC3, 0x007F0A0A, 0x007F0947, + 0x007F087B, 0x007F07A4, 0x007F06C2, 0x007F05D6, 0x007F04DF, + 0x007F03DC, 0x007F02CD, 0x007F01B2, 0x007F008B, 0x007EFF56, + 0x007EFE13, 0x007EFCC3, 0x007EFB64, 0x007EF9F6, 0x007EF878, + 0x007EF6EA, 0x007EF54B, 0x007EF39A, 0x007EF1D6, 0x007EEFFF, + 0x007EEE14, 0x007EEC13, 0x007EE9FD, 0x007EE7CF, 0x007EE589, + 0x007EE329, 0x007EE0AE, 0x007EDE16, 0x007EDB61, 0x007ED88C, + 0x007ED595, 0x007ED27B, 0x007ECF3B, 0x007ECBD3, 0x007EC841, + 0x007EC481, 0x007EC091, 0x007EBC6D, 0x007EB811, 0x007EB37A, + 0x007EAEA4, 0x007EA988, 0x007EA422, 0x007E9E6B, 0x007E985D, + 0x007E91EF, 0x007E8B1A, 0x007E83D4, 0x007E7C11, 0x007E73C5, + 0x007E6AE1, 0x007E6155, 0x007E570F, 0x007E4BF7, 0x007E3FF3, + 0x007E32E6, 0x007E24AC, 0x007E1518, 0x007E03F7, 0x007DF10A, + 0x007DDC03, 0x007DC480, 0x007DAA09, 0x007D8C00, 0x007D699A, + 0x007D41C9, 0x007D131E, 0x007CDB97, 0x007C9851, 0x007C44F8, + 0x007BDABC, 0x007B4E33, 0x007A8A98, 0x00796587, 0x007777D9, + 0x00736D37, ], dtype=np.uint32) + +we_float = np.array([ + 1.03677719e-06, 7.61177108e-09, 1.24977240e-08, 1.63680292e-08, + 1.96847466e-08, 2.26448404e-08, 2.53524197e-08, 2.78699974e-08, + 3.02384333e-08, 3.24861032e-08, 3.46336312e-08, 3.66965478e-08, + 3.86868855e-08, 4.06141855e-08, 4.24861622e-08, 4.43091566e-08, + 4.60884545e-08, 4.78285168e-08, 4.95331490e-08, 5.12056279e-08, + 5.28488000e-08, 5.44651557e-08, 5.60568899e-08, 5.76259484e-08, + 5.91740662e-08, 6.07027987e-08, 6.22135462e-08, 6.37075759e-08, + 6.51860386e-08, 6.66499836e-08, 6.81003709e-08, 6.95380822e-08, + 7.09639292e-08, 7.23786618e-08, 7.37829746e-08, 7.51775128e-08, + 7.65628768e-08, 7.79396272e-08, 7.93082883e-08, 8.06693516e-08, + 8.20232788e-08, 8.33705045e-08, 8.47114385e-08, 8.60464681e-08, + 8.73759596e-08, 8.87002606e-08, 9.00197010e-08, 9.13345948e-08, + 9.26452410e-08, 9.39519249e-08, 9.52549192e-08, 9.65544849e-08, + 9.78508719e-08, 9.91443202e-08, 1.00435060e-07, 1.01723315e-07, + 1.03009296e-07, 1.04293211e-07, 1.05575259e-07, 1.06855633e-07, + 1.08134518e-07, 1.09412096e-07, 1.10688542e-07, 1.11964025e-07, + 1.13238713e-07, 1.14512767e-07, 1.15786343e-07, 1.17059595e-07, + 1.18332673e-07, 1.19605723e-07, 1.20878890e-07, 1.22152313e-07, + 1.23426131e-07, 1.24700479e-07, 1.25975490e-07, 1.27251294e-07, + 1.28528022e-07, 1.29805799e-07, 1.31084751e-07, 1.32365001e-07, + 1.33646673e-07, 1.34929886e-07, 1.36214760e-07, 1.37501415e-07, + 1.38789966e-07, 1.40080532e-07, 1.41373228e-07, 1.42668169e-07, + 1.43965470e-07, 1.45265245e-07, 1.46567606e-07, 1.47872669e-07, + 1.49180545e-07, 1.50491348e-07, 1.51805191e-07, 1.53122186e-07, + 1.54442445e-07, 1.55766083e-07, 1.57093212e-07, 1.58423946e-07, + 1.59758399e-07, 1.61096684e-07, 1.62438917e-07, 1.63785214e-07, + 1.65135690e-07, 1.66490462e-07, 1.67849647e-07, 1.69213364e-07, + 1.70581733e-07, 1.71954874e-07, 1.73332908e-07, 1.74715958e-07, + 1.76104148e-07, 1.77497602e-07, 1.78896448e-07, 1.80300814e-07, + 1.81710828e-07, 1.83126623e-07, 1.84548331e-07, 1.85976086e-07, + 1.87410026e-07, 1.88850288e-07, 1.90297012e-07, 1.91750343e-07, + 1.93210424e-07, 1.94677403e-07, 1.96151428e-07, 1.97632653e-07, + 1.99121231e-07, 2.00617321e-07, 2.02121082e-07, 2.03632677e-07, + 2.05152273e-07, 2.06680040e-07, 2.08216149e-07, 2.09760777e-07, + 2.11314104e-07, 2.12876312e-07, 2.14447590e-07, 2.16028129e-07, + 2.17618123e-07, 2.19217773e-07, 2.20827283e-07, 2.22446862e-07, + 2.24076723e-07, 2.25717086e-07, 2.27368174e-07, 2.29030216e-07, + 2.30703448e-07, 2.32388110e-07, 2.34084450e-07, 2.35792720e-07, + 2.37513182e-07, 2.39246101e-07, 2.40991752e-07, 2.42750416e-07, + 2.44522382e-07, 2.46307948e-07, 2.48107418e-07, 2.49921109e-07, + 2.51749342e-07, 2.53592452e-07, 2.55450781e-07, 2.57324683e-07, + 2.59214522e-07, 2.61120673e-07, 2.63043524e-07, 2.64983476e-07, + 2.66940939e-07, 2.68916342e-07, 2.70910123e-07, 2.72922739e-07, + 2.74954660e-07, 2.77006373e-07, 2.79078382e-07, 2.81171210e-07, + 2.83285396e-07, 2.85421503e-07, 2.87580110e-07, 2.89761822e-07, + 2.91967265e-07, 2.94197089e-07, 2.96451969e-07, 2.98732610e-07, + 3.01039742e-07, 3.03374127e-07, 3.05736557e-07, 3.08127859e-07, + 3.10548894e-07, 3.13000563e-07, 3.15483804e-07, 3.17999599e-07, + 3.20548974e-07, 3.23133003e-07, 3.25752811e-07, 3.28409576e-07, + 3.31104534e-07, 3.33838984e-07, 3.36614287e-07, 3.39431878e-07, + 3.42293264e-07, 3.45200034e-07, 3.48153864e-07, 3.51156520e-07, + 3.54209871e-07, 3.57315892e-07, 3.60476673e-07, 3.63694431e-07, + 3.66971518e-07, 3.70310433e-07, 3.73713834e-07, 3.77184553e-07, + 3.80725611e-07, 3.84340234e-07, 3.88031877e-07, 3.91804239e-07, + 3.95661291e-07, 3.99607304e-07, 4.03646879e-07, 4.07784981e-07, + 4.12026980e-07, 4.16378695e-07, 4.20846449e-07, 4.25437124e-07, + 4.30158235e-07, 4.35018005e-07, 4.40025460e-07, 4.45190536e-07, + 4.50524210e-07, 4.56038644e-07, 4.61747369e-07, 4.67665494e-07, + 4.73809965e-07, 4.80199879e-07, 4.86856855e-07, 4.93805512e-07, + 5.01074042e-07, 5.08694944e-07, 5.16705952e-07, 5.25151216e-07, + 5.34082859e-07, 5.43563016e-07, 5.53666578e-07, 5.64484953e-07, + 5.76131313e-07, 5.88748108e-07, 6.02518140e-07, 6.17681418e-07, + 6.34561837e-07, 6.53611496e-07, 6.75488730e-07, 7.01206245e-07, + 7.32441505e-07, 7.72282898e-07, 8.27435688e-07, 9.17567905e-07,] + , dtype=np.float32) + +fe_float = np.array([ + 1.00000000e+00, 9.38143681e-01, 9.00469930e-01, 8.71704332e-01, + 8.47785501e-01, 8.26993297e-01, 8.08421652e-01, 7.91527637e-01, + 7.75956852e-01, 7.61463389e-01, 7.47868622e-01, 7.35038092e-01, + 7.22867660e-01, 7.11274761e-01, 7.00192655e-01, 6.89566496e-01, + 6.79350572e-01, 6.69506317e-01, 6.60000841e-01, 6.50805833e-01, + 6.41896716e-01, 6.33251994e-01, 6.24852739e-01, 6.16682181e-01, + 6.08725382e-01, 6.00968966e-01, 5.93400902e-01, 5.86010318e-01, + 5.78787359e-01, 5.71723049e-01, 5.64809193e-01, 5.58038282e-01, + 5.51403417e-01, 5.44898238e-01, 5.38516872e-01, 5.32253880e-01, + 5.26104214e-01, 5.20063177e-01, 5.14126394e-01, 5.08289776e-01, + 5.02549502e-01, 4.96901987e-01, 4.91343870e-01, 4.85871987e-01, + 4.80483364e-01, 4.75175193e-01, 4.69944825e-01, 4.64789756e-01, + 4.59707616e-01, 4.54696157e-01, 4.49753251e-01, 4.44876873e-01, + 4.40065101e-01, 4.35316103e-01, 4.30628137e-01, 4.25999541e-01, + 4.21428729e-01, 4.16914186e-01, 4.12454466e-01, 4.08048183e-01, + 4.03694013e-01, 3.99390684e-01, 3.95136982e-01, 3.90931737e-01, + 3.86773829e-01, 3.82662181e-01, 3.78595759e-01, 3.74573568e-01, + 3.70594648e-01, 3.66658080e-01, 3.62762973e-01, 3.58908473e-01, + 3.55093753e-01, 3.51318016e-01, 3.47580495e-01, 3.43880445e-01, + 3.40217149e-01, 3.36589914e-01, 3.32998069e-01, 3.29440964e-01, + 3.25917972e-01, 3.22428485e-01, 3.18971913e-01, 3.15547685e-01, + 3.12155249e-01, 3.08794067e-01, 3.05463619e-01, 3.02163401e-01, + 2.98892921e-01, 2.95651704e-01, 2.92439288e-01, 2.89255223e-01, + 2.86099074e-01, 2.82970415e-01, 2.79868833e-01, 2.76793928e-01, + 2.73745310e-01, 2.70722597e-01, 2.67725420e-01, 2.64753419e-01, + 2.61806243e-01, 2.58883550e-01, 2.55985007e-01, 2.53110290e-01, + 2.50259082e-01, 2.47431076e-01, 2.44625969e-01, 2.41843469e-01, + 2.39083290e-01, 2.36345152e-01, 2.33628783e-01, 2.30933917e-01, + 2.28260294e-01, 2.25607660e-01, 2.22975768e-01, 2.20364376e-01, + 2.17773247e-01, 2.15202151e-01, 2.12650862e-01, 2.10119159e-01, + 2.07606828e-01, 2.05113656e-01, 2.02639439e-01, 2.00183975e-01, + 1.97747066e-01, 1.95328521e-01, 1.92928150e-01, 1.90545770e-01, + 1.88181199e-01, 1.85834263e-01, 1.83504787e-01, 1.81192603e-01, + 1.78897547e-01, 1.76619455e-01, 1.74358169e-01, 1.72113535e-01, + 1.69885401e-01, 1.67673619e-01, 1.65478042e-01, 1.63298529e-01, + 1.61134940e-01, 1.58987139e-01, 1.56854992e-01, 1.54738369e-01, + 1.52637142e-01, 1.50551185e-01, 1.48480376e-01, 1.46424594e-01, + 1.44383722e-01, 1.42357645e-01, 1.40346251e-01, 1.38349429e-01, + 1.36367071e-01, 1.34399072e-01, 1.32445328e-01, 1.30505738e-01, + 1.28580205e-01, 1.26668629e-01, 1.24770919e-01, 1.22886980e-01, + 1.21016722e-01, 1.19160057e-01, 1.17316899e-01, 1.15487164e-01, + 1.13670768e-01, 1.11867632e-01, 1.10077676e-01, 1.08300825e-01, + 1.06537004e-01, 1.04786139e-01, 1.03048160e-01, 1.01322997e-01, + 9.96105837e-02, 9.79108533e-02, 9.62237426e-02, 9.45491894e-02, + 9.28871336e-02, 9.12375166e-02, 8.96002819e-02, 8.79753745e-02, + 8.63627411e-02, 8.47623305e-02, 8.31740930e-02, 8.15979807e-02, + 8.00339475e-02, 7.84819492e-02, 7.69419432e-02, 7.54138887e-02, + 7.38977470e-02, 7.23934809e-02, 7.09010552e-02, 6.94204365e-02, + 6.79515934e-02, 6.64944964e-02, 6.50491178e-02, 6.36154320e-02, + 6.21934154e-02, 6.07830464e-02, 5.93843056e-02, 5.79971756e-02, + 5.66216413e-02, 5.52576897e-02, 5.39053102e-02, 5.25644946e-02, + 5.12352371e-02, 4.99175343e-02, 4.86113856e-02, 4.73167929e-02, + 4.60337611e-02, 4.47622977e-02, 4.35024136e-02, 4.22541224e-02, + 4.10174414e-02, 3.97923910e-02, 3.85789955e-02, 3.73772828e-02, + 3.61872848e-02, 3.50090377e-02, 3.38425822e-02, 3.26879635e-02, + 3.15452322e-02, 3.04144439e-02, 2.92956602e-02, 2.81889488e-02, + 2.70943838e-02, 2.60120466e-02, 2.49420264e-02, 2.38844205e-02, + 2.28393354e-02, 2.18068875e-02, 2.07872041e-02, 1.97804243e-02, + 1.87867007e-02, 1.78062004e-02, 1.68391068e-02, 1.58856218e-02, + 1.49459680e-02, 1.40203914e-02, 1.31091649e-02, 1.22125924e-02, + 1.13310136e-02, 1.04648102e-02, 9.61441364e-03, 8.78031499e-03, + 7.96307744e-03, 7.16335318e-03, 6.38190594e-03, 5.61964221e-03, + 4.87765598e-03, 4.15729512e-03, 3.46026478e-03, 2.78879879e-03, + 2.14596774e-03, 1.53629978e-03, 9.67269282e-04, 4.54134354e-04,] + , dtype=np.float32) + + +ziggurat_nor_r = 3.6541528853610087963519472518 +ziggurat_nor_inv_r = 0.27366123732975827203338247596 +ziggurat_exp_r = 7.6971174701310497140446280481 + +ziggurat_nor_r_f = np.float32(3.6541528853610087963519472518) +ziggurat_nor_inv_r_f = np.float32(0.27366123732975827203338247596) +ziggurat_exp_r_f = np.float32(7.6971174701310497140446280481) + +M_PI = 3.14159265358979323846 +INT64_MAX = 9223372036854775807 +UINT8_MAX = 255 +UINT16_MAX = 65535 +UINT32_MAX = 4294967295 +UINT64_MAX = 18446744073709551615 +LONG_MAX = (1 << ( 8 * ctypes.sizeof(ctypes.c_long) - 1)) - 1 + +LS2PI = 0.91893853320467267 +TWELFTH = 0.083333333333333333333333 diff --git a/venv/lib/python3.10/site-packages/numba/np/random/distributions.py b/venv/lib/python3.10/site-packages/numba/np/random/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..6d52c79dd55ce942ea0390d60c6cc792d16c9fd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/distributions.py @@ -0,0 +1,12 @@ +import sys +from numba.core.utils import _RedirectSubpackage +from numba.core import config + +if config.USE_LEGACY_TYPE_SYSTEM: + sys.modules[__name__] = \ + _RedirectSubpackage(locals(), + "numba.np.random.old_distributions") +else: + sys.modules[__name__] = \ + _RedirectSubpackage(locals(), + "numba.np.random.new_distributions") diff --git a/venv/lib/python3.10/site-packages/numba/np/random/generator_core.py b/venv/lib/python3.10/site-packages/numba/np/random/generator_core.py new file mode 100644 index 0000000000000000000000000000000000000000..42977faadbccbcd01a35613d6daf43ec7d85bed2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/generator_core.py @@ -0,0 +1,132 @@ +""" +Core Implementations for Generator/BitGenerator Models. +""" + +from llvmlite import ir +from numba.core import cgutils, types, config +from numba.core.extending import (intrinsic, make_attribute_wrapper, models, + overload, register_jitable, + register_model) + + +@register_model(types.NumPyRandomBitGeneratorType) +class NumPyRngBitGeneratorModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('parent', types.pyobject), + ('state_address', types.uintp), + ('state', types.uintp), + ('fnptr_next_uint64', types.uintp), + ('fnptr_next_uint32', types.uintp), + ('fnptr_next_double', types.uintp), + ('bit_generator', types.uintp), + ] + super(NumPyRngBitGeneratorModel, self).__init__(dmm, fe_type, members) + + +_bit_gen_type = types.NumPyRandomBitGeneratorType('bit_generator') + + +@register_model(types.NumPyRandomGeneratorType) +class NumPyRandomGeneratorTypeModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('bit_generator', _bit_gen_type), + ('meminfo', types.MemInfoPointer(types.voidptr)), + ('parent', types.pyobject) + ] + super( + NumPyRandomGeneratorTypeModel, + self).__init__( + dmm, + fe_type, + members) + + +# The Generator instances have a bit_generator attr +make_attribute_wrapper( + types.NumPyRandomGeneratorType, + 'bit_generator', + 'bit_generator') + + +def _generate_next_binding(overloadable_function, return_type): + """ + Generate the overloads for "next_(some type)" functions. + """ + @intrinsic + def intrin_NumPyRandomBitGeneratorType_next_ty(tyctx, inst): + sig = return_type(inst) + + def codegen(cgctx, builder, sig, llargs): + name = overloadable_function.__name__ + struct_ptr = cgutils.create_struct_proxy(inst)(cgctx, builder, + value=llargs[0]) + + # Get the 'state' and 'fnptr_next_(type)' members of the struct + state = struct_ptr.state + next_double_addr = getattr(struct_ptr, f'fnptr_{name}') + + # LLVM IR types needed + ll_void_ptr_t = cgctx.get_value_type(types.voidptr) + ll_return_t = cgctx.get_value_type(return_type) + ll_uintp_t = cgctx.get_value_type(types.uintp) + + # Convert the stored Generator function address to a pointer + next_fn_fnptr = builder.inttoptr( + next_double_addr, ll_void_ptr_t) + # Add the function to the module + fnty = ir.FunctionType(ll_return_t, (ll_uintp_t,)) + next_fn = cgutils.get_or_insert_function( + builder.module, fnty, name) + # Bit cast the function pointer to the function type + fnptr_as_fntype = builder.bitcast(next_fn_fnptr, next_fn.type) + # call it with the "state" address as the arg + ret = builder.call(fnptr_as_fntype, (state,)) + return ret + return sig, codegen + + @overload(overloadable_function) + def ol_next_ty(bitgen): + if isinstance(bitgen, types.NumPyRandomBitGeneratorType): + def impl(bitgen): + return intrin_NumPyRandomBitGeneratorType_next_ty(bitgen) + return impl + + +# Some function stubs for "next(some type)", these will be overloaded +def next_double(bitgen): + return bitgen.ctypes.next_double(bitgen.ctypes.state) + + +def next_uint32(bitgen): + return bitgen.ctypes.next_uint32(bitgen.ctypes.state) + + +def next_uint64(bitgen): + return bitgen.ctypes.next_uint64(bitgen.ctypes.state) + + +if config.USE_LEGACY_TYPE_SYSTEM: + _generate_next_binding(next_double, types.double) + _generate_next_binding(next_uint32, types.uint32) + _generate_next_binding(next_uint64, types.uint64) + + # See: https://github.com/numpy/numpy/pull/20314 + @register_jitable + def next_float(bitgen): + return types.float32(types.float32(next_uint32(bitgen) >> 8) + * types.float32(1.0) + / types.float32(16777216.0)) + +else: + _generate_next_binding(next_double, types.np_double) + _generate_next_binding(next_uint32, types.np_uint32) + _generate_next_binding(next_uint64, types.np_uint64) + + # See: https://github.com/numpy/numpy/pull/20314 + @register_jitable + def next_float(bitgen): + return types.np_float32(types.np_float32(next_uint32(bitgen) >> 8) + * types.np_float32(1.0) + / types.np_float32(16777216.0)) diff --git a/venv/lib/python3.10/site-packages/numba/np/random/generator_methods.py b/venv/lib/python3.10/site-packages/numba/np/random/generator_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..03968e8699b49e9a6478be188fcf9738a1ab9330 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/generator_methods.py @@ -0,0 +1,971 @@ +""" +Implementation of method overloads for Generator objects. +""" + +import numpy as np +from numba.core import types +from numba.core.extending import overload_method, register_jitable +from numba.np.numpy_support import as_dtype, from_dtype +from numba.np.random.generator_core import next_float, next_double +from numba.np.numpy_support import is_nonelike +from numba.core.errors import TypingError +from numba.core.types.containers import Tuple, UniTuple +from numba.np.random.distributions import \ + (random_standard_exponential_inv_f, random_standard_exponential_inv, + random_standard_exponential, random_standard_normal_f, + random_standard_gamma, random_standard_normal, random_uniform, + random_standard_exponential_f, random_standard_gamma_f, random_normal, + random_exponential, random_gamma, random_beta, random_power, + random_f,random_chisquare,random_standard_cauchy,random_pareto, + random_weibull, random_laplace, random_logistic, + random_lognormal, random_rayleigh, random_standard_t, random_wald, + random_geometric, random_zipf, random_triangular, + random_poisson, random_negative_binomial, random_logseries, + random_noncentral_chisquare, random_noncentral_f, random_binomial) +from numba.np.random import random_methods + + +def _get_proper_func(func_32, func_64, dtype, dist_name="the given"): + """ + Most of the standard NumPy distributions that accept dtype argument + only support either np.float32 or np.float64 as dtypes. + + This is a helper function that helps Numba select the proper underlying + implementation according to provided dtype. + """ + if isinstance(dtype, types.Omitted): + dtype = dtype.value + + np_dt = dtype + if isinstance(dtype, type): + nb_dt = from_dtype(np.dtype(dtype)) + elif isinstance(dtype, types.NumberClass): + nb_dt = dtype + np_dt = as_dtype(nb_dt) + + if np_dt not in [np.float32, np.float64]: + raise TypingError("Argument dtype is not one of the" + + " expected type(s): " + + " np.float32 or np.float64") + + if np_dt == np.float32: + next_func = func_32 + else: + next_func = func_64 + + return next_func, nb_dt + + +def check_size(size): + if not any([isinstance(size, UniTuple) and + isinstance(size.dtype, types.Integer), + isinstance(size, Tuple) and size.count == 0, + isinstance(size, types.Integer)]): + raise TypingError("Argument size is not one of the" + + " expected type(s): " + + " an integer, an empty tuple or a tuple of integers") + + +def check_types(obj, type_list, arg_name): + """ + Check if given object is one of the provided types. + If not raises an TypeError + """ + if isinstance(obj, types.Omitted): + obj = obj.value + + if not isinstance(type_list, (list, tuple)): + type_list = [type_list] + + if not any([isinstance(obj, _type) for _type in type_list]): + raise TypingError(f"Argument {arg_name} is not one of the" + + f" expected type(s): {type_list}") + + +# Overload the Generator().integers() +@overload_method(types.NumPyRandomGeneratorType, 'integers') +def NumPyRandomGeneratorType_integers(inst, low, high, size=None, + dtype=np.int64, endpoint=False): + check_types(low, [types.Integer, + types.Boolean, bool, int], 'low') + check_types(high, [types.Integer, types.Boolean, + bool, int], 'high') + check_types(endpoint, [types.Boolean, bool], 'endpoint') + + if isinstance(size, types.Omitted): + size = size.value + + if isinstance(dtype, types.Omitted): + dtype = dtype.value + + if isinstance(dtype, type): + nb_dt = from_dtype(np.dtype(dtype)) + _dtype = dtype + elif isinstance(dtype, types.NumberClass): + nb_dt = dtype + _dtype = as_dtype(nb_dt) + else: + raise TypingError("Argument dtype is not one of the" + + " expected type(s): " + + "np.int32, np.int64, np.int16, np.int8, " + "np.uint32, np.uint64, np.uint16, np.uint8, " + "np.bool_") + + if _dtype == np.bool_: + int_func = random_methods.random_bounded_bool_fill + lower_bound = -1 + upper_bound = 2 + else: + try: + i_info = np.iinfo(_dtype) + except ValueError: + raise TypingError("Argument dtype is not one of the" + + " expected type(s): " + + "np.int32, np.int64, np.int16, np.int8, " + "np.uint32, np.uint64, np.uint16, np.uint8, " + "np.bool_") + int_func = getattr(random_methods, + f'random_bounded_uint{i_info.bits}_fill') + lower_bound = i_info.min + upper_bound = i_info.max + + if is_nonelike(size): + def impl(inst, low, high, size=None, + dtype=np.int64, endpoint=False): + random_methods._randint_arg_check(low, high, endpoint, + lower_bound, upper_bound) + if not endpoint: + high -= dtype(1) + low = dtype(low) + high = dtype(high) + rng = high - low + return int_func(inst.bit_generator, low, rng, 1, dtype)[0] + else: + low = dtype(low) + high = dtype(high) + rng = high - low + return int_func(inst.bit_generator, low, rng, 1, dtype)[0] + return impl + else: + check_size(size) + + def impl(inst, low, high, size=None, + dtype=np.int64, endpoint=False): + random_methods._randint_arg_check(low, high, endpoint, + lower_bound, upper_bound) + if not endpoint: + high -= dtype(1) + low = dtype(low) + high = dtype(high) + rng = high - low + return int_func(inst.bit_generator, low, rng, size, dtype) + else: + low = dtype(low) + high = dtype(high) + rng = high - low + return int_func(inst.bit_generator, low, rng, size, dtype) + return impl + + +# The following `shuffle` implementation is a direct translation from: +# https://github.com/numpy/numpy/blob/95e3e7f445407e4f355b23d6a9991d8774f0eb0c/numpy/random/_generator.pyx#L4578 + +# Overload the Generator().shuffle() +@overload_method(types.NumPyRandomGeneratorType, 'shuffle') +def NumPyRandomGeneratorType_shuffle(inst, x, axis=0): + check_types(x, [types.Array], 'x') + check_types(axis, [int, types.Integer], 'axis') + + def impl(inst, x, axis=0): + if axis < 0: + axis = axis + x.ndim + if axis > x.ndim - 1 or axis < 0: + raise IndexError("Axis is out of bounds for the given array") + + z = np.swapaxes(x, 0, axis) + buf = np.empty_like(z[0, ...]) + + for i in range(len(z) - 1, 0, -1): + j = types.intp(random_methods.random_interval(inst.bit_generator, + i)) + if i == j: + continue + buf[...] = z[j, ...] + z[j, ...] = z[i, ...] + z[i, ...] = buf + + return impl + + +# The following `permutation` implementation is a direct translation from: +# https://github.com/numpy/numpy/blob/95e3e7f445407e4f355b23d6a9991d8774f0eb0c/numpy/random/_generator.pyx#L4710 +# Overload the Generator().permutation() +@overload_method(types.NumPyRandomGeneratorType, 'permutation') +def NumPyRandomGeneratorType_permutation(inst, x, axis=0): + check_types(x, [types.Array, types.Integer], 'x') + check_types(axis, [int, types.Integer], 'axis') + + IS_INT = isinstance(x, types.Integer) + + def impl(inst, x, axis=0): + if IS_INT: + new_arr = np.arange(x) + # NumPy ignores the axis argument when x is an integer + inst.shuffle(new_arr) + else: + new_arr = x.copy() + inst.shuffle(new_arr, axis=axis) + return new_arr + + return impl + + +# Overload the Generator().random() +@overload_method(types.NumPyRandomGeneratorType, 'random') +def NumPyRandomGeneratorType_random(inst, size=None, dtype=np.float64): + dist_func, nb_dt = _get_proper_func(next_float, next_double, + dtype, "random") + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, size=None, dtype=np.float64): + return nb_dt(dist_func(inst.bit_generator)) + return impl + else: + check_size(size) + + def impl(inst, size=None, dtype=np.float64): + out = np.empty(size, dtype=dtype) + out_f = out.flat + for i in range(out.size): + out_f[i] = dist_func(inst.bit_generator) + return out + return impl + + +# Overload the Generator().standard_exponential() method +@overload_method(types.NumPyRandomGeneratorType, 'standard_exponential') +def NumPyRandomGeneratorType_standard_exponential(inst, size=None, + dtype=np.float64, + method='zig'): + check_types(method, [types.UnicodeType, str], 'method') + dist_func_inv, nb_dt = _get_proper_func( + random_standard_exponential_inv_f, + random_standard_exponential_inv, + dtype + ) + + dist_func, nb_dt = _get_proper_func(random_standard_exponential_f, + random_standard_exponential, + dtype) + + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, size=None, dtype=np.float64, method='zig'): + if method == 'zig': + return nb_dt(dist_func(inst.bit_generator)) + elif method == 'inv': + return nb_dt(dist_func_inv(inst.bit_generator)) + else: + raise ValueError("Method must be either 'zig' or 'inv'") + return impl + else: + check_size(size) + + def impl(inst, size=None, dtype=np.float64, method='zig'): + out = np.empty(size, dtype=dtype) + out_f = out.flat + if method == 'zig': + for i in range(out.size): + out_f[i] = dist_func(inst.bit_generator) + elif method == 'inv': + for i in range(out.size): + out_f[i] = dist_func_inv(inst.bit_generator) + else: + raise ValueError("Method must be either 'zig' or 'inv'") + return out + return impl + + +# Overload the Generator().standard_normal() method +@overload_method(types.NumPyRandomGeneratorType, 'standard_normal') +def NumPyRandomGeneratorType_standard_normal(inst, size=None, dtype=np.float64): + dist_func, nb_dt = _get_proper_func(random_standard_normal_f, + random_standard_normal, + dtype) + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, size=None, dtype=np.float64): + return nb_dt(dist_func(inst.bit_generator)) + return impl + else: + check_size(size) + + def impl(inst, size=None, dtype=np.float64): + out = np.empty(size, dtype=dtype) + out_f = out.flat + for i in range(out.size): + out_f[i] = dist_func(inst.bit_generator) + return out + return impl + + +# Overload the Generator().standard_gamma() method +@overload_method(types.NumPyRandomGeneratorType, 'standard_gamma') +def NumPyRandomGeneratorType_standard_gamma(inst, shape, size=None, + dtype=np.float64): + check_types(shape, [types.Float, types.Integer, int, float], 'shape') + dist_func, nb_dt = _get_proper_func(random_standard_gamma_f, + random_standard_gamma, + dtype) + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, shape, size=None, dtype=np.float64): + return nb_dt(dist_func(inst.bit_generator, shape)) + return impl + else: + check_size(size) + + def impl(inst, shape, size=None, dtype=np.float64): + out = np.empty(size, dtype=dtype) + out_f = out.flat + for i in range(out.size): + out_f[i] = dist_func(inst.bit_generator, shape) + return out + return impl + + +# Overload the Generator().normal() method +@overload_method(types.NumPyRandomGeneratorType, 'normal') +def NumPyRandomGeneratorType_normal(inst, loc=0.0, scale=1.0, + size=None): + check_types(loc, [types.Float, types.Integer, int, float], 'loc') + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, loc=0.0, scale=1.0, size=None): + return random_normal(inst.bit_generator, loc, scale) + return impl + else: + check_size(size) + + def impl(inst, loc=0.0, scale=1.0, size=None): + out = np.empty(size, dtype=np.float64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_normal(inst.bit_generator, loc, scale) + return out + return impl + + +# Overload the Generator().uniform() method +@overload_method(types.NumPyRandomGeneratorType, 'uniform') +def NumPyRandomGeneratorType_uniform(inst, low=0.0, high=1.0, + size=None): + check_types(low, [types.Float, types.Integer, int, float], 'low') + check_types(high, [types.Float, types.Integer, int, float], 'high') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, low=0.0, high=1.0, size=None): + return random_uniform(inst.bit_generator, low, high - low) + return impl + else: + check_size(size) + + def impl(inst, low=0.0, high=1.0, size=None): + out = np.empty(size, dtype=np.float64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_uniform(inst.bit_generator, low, high - low) + return out + return impl + + +# Overload the Generator().exponential() method +@overload_method(types.NumPyRandomGeneratorType, 'exponential') +def NumPyRandomGeneratorType_exponential(inst, scale=1.0, size=None): + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, scale=1.0, size=None): + return random_exponential(inst.bit_generator, scale) + return impl + else: + check_size(size) + + def impl(inst, scale=1.0, size=None): + out = np.empty(size, dtype=np.float64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_exponential(inst.bit_generator, scale) + return out + return impl + + +# Overload the Generator().gamma() method +@overload_method(types.NumPyRandomGeneratorType, 'gamma') +def NumPyRandomGeneratorType_gamma(inst, shape, scale=1.0, size=None): + check_types(shape, [types.Float, types.Integer, int, float], 'shape') + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, shape, scale=1.0, size=None): + return random_gamma(inst.bit_generator, shape, scale) + return impl + else: + check_size(size) + + def impl(inst, shape, scale=1.0, size=None): + out = np.empty(size, dtype=np.float64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_gamma(inst.bit_generator, shape, scale) + return out + return impl + + +# Overload the Generator().beta() method +@overload_method(types.NumPyRandomGeneratorType, 'beta') +def NumPyRandomGeneratorType_beta(inst, a, b, size=None): + check_types(a, [types.Float, types.Integer, int, float], 'a') + check_types(b, [types.Float, types.Integer, int, float], 'b') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, a, b, size=None): + return random_beta(inst.bit_generator, a, b) + return impl + else: + check_size(size) + + def impl(inst, a, b, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_beta(inst.bit_generator, a, b) + return out + return impl + + +# Overload the Generator().f() method +@overload_method(types.NumPyRandomGeneratorType, 'f') +def NumPyRandomGeneratorType_f(inst, dfnum, dfden, size=None): + check_types(dfnum, [types.Float, types.Integer, int, float], 'dfnum') + check_types(dfden, [types.Float, types.Integer, int, float], 'dfden') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, dfnum, dfden, size=None): + return random_f(inst.bit_generator, dfnum, dfden) + return impl + else: + check_size(size) + + def impl(inst, dfnum, dfden, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_f(inst.bit_generator, dfnum, dfden) + return out + return impl + + +# Overload the Generator().chisquare() method +@overload_method(types.NumPyRandomGeneratorType, 'chisquare') +def NumPyRandomGeneratorType_chisquare(inst, df, size=None): + check_types(df, [types.Float, types.Integer, int, float], 'df') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, df, size=None): + return random_chisquare(inst.bit_generator, df) + return impl + else: + check_size(size) + + def impl(inst, df, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_chisquare(inst.bit_generator, df) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'standard_cauchy') +def NumPyRandomGeneratorType_standard_cauchy(inst, size=None): + + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, size=None): + return random_standard_cauchy(inst.bit_generator) + return impl + else: + check_size(size) + + def impl(inst, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_standard_cauchy(inst.bit_generator) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'pareto') +def NumPyRandomGeneratorType_pareto(inst, a, size=None): + check_types(a, [types.Float, types.Integer, int, float], 'a') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, a, size=None): + return random_pareto(inst.bit_generator, a) + return impl + else: + check_size(size) + + def impl(inst, a, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_pareto(inst.bit_generator, a) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'weibull') +def NumPyRandomGeneratorType_weibull(inst, a, size=None): + check_types(a, [types.Float, types.Integer, int, float], 'a') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, a, size=None): + return random_weibull(inst.bit_generator, a) + return impl + else: + check_size(size) + + def impl(inst, a, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_weibull(inst.bit_generator, a) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'power') +def NumPyRandomGeneratorType_power(inst, a, size=None): + check_types(a, [types.Float, types.Integer, int, float], 'a') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, a, size=None): + return random_power(inst.bit_generator, a) + return impl + else: + check_size(size) + + def impl(inst, a, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_power(inst.bit_generator, a) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'laplace') +def NumPyRandomGeneratorType_laplace(inst, loc=0.0, scale=1.0, size=None): + check_types(loc, [types.Float, types.Integer, int, float], 'loc') + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, loc=0.0, scale=1.0, size=None): + return random_laplace(inst.bit_generator, loc, scale) + return impl + else: + check_size(size) + + def impl(inst, loc=0.0, scale=1.0, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_laplace(inst.bit_generator, loc, scale) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'logistic') +def NumPyRandomGeneratorType_logistic(inst, loc=0.0, scale=1.0, size=None): + check_types(loc, [types.Float, types.Integer, int, float], 'loc') + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, loc=0.0, scale=1.0, size=None): + return random_logistic(inst.bit_generator, loc, scale) + return impl + else: + check_size(size) + + def impl(inst, loc=0.0, scale=1.0, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_logistic(inst.bit_generator, loc, scale) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'lognormal') +def NumPyRandomGeneratorType_lognormal(inst, mean=0.0, sigma=1.0, size=None): + check_types(mean, [types.Float, types.Integer, int, float], 'mean') + check_types(sigma, [types.Float, types.Integer, int, float], 'sigma') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, mean=0.0, sigma=1.0, size=None): + return random_lognormal(inst.bit_generator, mean, sigma) + return impl + else: + check_size(size) + + def impl(inst, mean=0.0, sigma=1.0, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_lognormal(inst.bit_generator, mean, sigma) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'rayleigh') +def NumPyRandomGeneratorType_rayleigh(inst, scale=1.0, size=None): + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, scale=1.0, size=None): + return random_rayleigh(inst.bit_generator, scale) + return impl + else: + check_size(size) + + def impl(inst, scale=1.0, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_rayleigh(inst.bit_generator, scale) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'standard_t') +def NumPyRandomGeneratorType_standard_t(inst, df, size=None): + check_types(df, [types.Float, types.Integer, int, float], 'df') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, df, size=None): + return random_standard_t(inst.bit_generator, df) + return impl + else: + check_size(size) + + def impl(inst, df, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_standard_t(inst.bit_generator, df) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'wald') +def NumPyRandomGeneratorType_wald(inst, mean, scale, size=None): + check_types(mean, [types.Float, types.Integer, int, float], 'mean') + check_types(scale, [types.Float, types.Integer, int, float], 'scale') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, mean, scale, size=None): + return random_wald(inst.bit_generator, mean, scale) + return impl + else: + check_size(size) + + def impl(inst, mean, scale, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_wald(inst.bit_generator, mean, scale) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'geometric') +def NumPyRandomGeneratorType_geometric(inst, p, size=None): + check_types(p, [types.Float, types.Integer, int, float], 'p') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, p, size=None): + return np.int64(random_geometric(inst.bit_generator, p)) + return impl + else: + check_size(size) + + def impl(inst, p, size=None): + out = np.empty(size, dtype=np.int64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_geometric(inst.bit_generator, p) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'zipf') +def NumPyRandomGeneratorType_zipf(inst, a, size=None): + check_types(a, [types.Float, types.Integer, int, float], 'a') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, a, size=None): + return np.int64(random_zipf(inst.bit_generator, a)) + return impl + else: + check_size(size) + + def impl(inst, a, size=None): + out = np.empty(size, dtype=np.int64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_zipf(inst.bit_generator, a) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'triangular') +def NumPyRandomGeneratorType_triangular(inst, left, mode, right, size=None): + check_types(left, [types.Float, types.Integer, int, float], 'left') + check_types(mode, [types.Float, types.Integer, int, float], 'mode') + check_types(right, [types.Float, types.Integer, int, float], 'right') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, left, mode, right, size=None): + return random_triangular(inst.bit_generator, left, mode, right) + return impl + else: + check_size(size) + + def impl(inst, left, mode, right, size=None): + out = np.empty(size) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_triangular(inst.bit_generator, + left, mode, right) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'poisson') +def NumPyRandomGeneratorType_poisson(inst, lam , size=None): + check_types(lam, [types.Float, types.Integer, int, float], 'lam') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, lam , size=None): + return np.int64(random_poisson(inst.bit_generator, lam)) + return impl + else: + check_size(size) + + def impl(inst, lam , size=None): + out = np.empty(size, dtype=np.int64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_poisson(inst.bit_generator, lam) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'negative_binomial') +def NumPyRandomGeneratorType_negative_binomial(inst, n, p, size=None): + check_types(n, [types.Float, types.Integer, int, float], 'n') + check_types(p, [types.Float, types.Integer, int, float], 'p') + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, n, p , size=None): + return np.int64(random_negative_binomial(inst.bit_generator, n, p)) + return impl + else: + check_size(size) + + def impl(inst, n, p , size=None): + out = np.empty(size, dtype=np.int64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_negative_binomial(inst.bit_generator, n, p) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'noncentral_chisquare') +def NumPyRandomGeneratorType_noncentral_chisquare(inst, df, nonc, size=None): + check_types(df, [types.Float, types.Integer, int, float], 'df') + check_types(nonc, [types.Float, types.Integer, int, float], 'nonc') + if isinstance(size, types.Omitted): + size = size.value + + @register_jitable + def check_arg_bounds(df, nonc): + if df <= 0: + raise ValueError("df <= 0") + if nonc < 0: + raise ValueError("nonc < 0") + + if is_nonelike(size): + def impl(inst, df, nonc, size=None): + check_arg_bounds(df, nonc) + return np.float64(random_noncentral_chisquare(inst.bit_generator, + df, nonc)) + return impl + else: + check_size(size) + + def impl(inst, df, nonc, size=None): + check_arg_bounds(df, nonc) + out = np.empty(size, dtype=np.float64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_noncentral_chisquare(inst.bit_generator, + df, nonc) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'noncentral_f') +def NumPyRandomGeneratorType_noncentral_f(inst, dfnum, dfden, nonc, size=None): + check_types(dfnum, [types.Float, types.Integer, int, float], 'dfnum') + check_types(dfden, [types.Float, types.Integer, int, float], 'dfden') + check_types(nonc, [types.Float, types.Integer, int, float], 'nonc') + if isinstance(size, types.Omitted): + size = size.value + + @register_jitable + def check_arg_bounds(dfnum, dfden, nonc): + if dfnum <= 0: + raise ValueError("dfnum <= 0") + if dfden <= 0: + raise ValueError("dfden <= 0") + if nonc < 0: + raise ValueError("nonc < 0") + + if is_nonelike(size): + def impl(inst, dfnum, dfden, nonc, size=None): + check_arg_bounds(dfnum, dfden, nonc) + return np.float64(random_noncentral_f(inst.bit_generator, + dfnum, dfden, nonc)) + return impl + else: + check_size(size) + + def impl(inst, dfnum, dfden, nonc, size=None): + check_arg_bounds(dfnum, dfden, nonc) + out = np.empty(size, dtype=np.float64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_noncentral_f(inst.bit_generator, + dfnum, dfden, nonc) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'logseries') +def NumPyRandomGeneratorType_logseries(inst, p, size=None): + check_types(p, [types.Float, types.Integer, int, float], 'p') + if isinstance(size, types.Omitted): + size = size.value + + @register_jitable + def check_arg_bounds(p): + if p < 0 or p >= 1 or np.isnan(p): + raise ValueError("p < 0, p >= 1 or p is NaN") + + if is_nonelike(size): + def impl(inst, p, size=None): + check_arg_bounds(p) + return np.int64(random_logseries(inst.bit_generator, p)) + return impl + else: + check_size(size) + + def impl(inst, p, size=None): + check_arg_bounds(p) + out = np.empty(size, dtype=np.int64) + out_f = out.flat + for i in range(out.size): + out_f[i] = random_logseries(inst.bit_generator, p) + return out + return impl + + +@overload_method(types.NumPyRandomGeneratorType, 'binomial') +def NumPyRandomGeneratorType_binomial(inst, n, p, size=None): + check_types(n, [types.Float, types.Integer, int, float], 'n') + check_types(p, [types.Float, types.Integer, int, float], 'p') + + if isinstance(size, types.Omitted): + size = size.value + + if is_nonelike(size): + def impl(inst, n, p, size=None): + return np.int64(random_binomial(inst.bit_generator, n, p)) + return impl + else: + check_size(size) + + def impl(inst, n, p, size=None): + out = np.empty(size, dtype=np.int64) + for i in np.ndindex(size): + out[i] = random_binomial(inst.bit_generator, n, p) + return out + return impl diff --git a/venv/lib/python3.10/site-packages/numba/np/random/new_distributions.py b/venv/lib/python3.10/site-packages/numba/np/random/new_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..2a2e8f72d3407a788266c578982f4a6006c94cc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/new_distributions.py @@ -0,0 +1,719 @@ +""" +Algorithmic implementations for generating different types +of random distributions. +""" + +import numpy as np + +from numba.core.extending import register_jitable +from numba.np.random._constants import (wi_double, ki_double, + ziggurat_nor_r, fi_double, + wi_float, ki_float, + ziggurat_nor_inv_r_f, + ziggurat_nor_r_f, fi_float, + we_double, ke_double, + ziggurat_exp_r, fe_double, + we_float, ke_float, + ziggurat_exp_r_f, fe_float, + INT64_MAX, ziggurat_nor_inv_r) +from numba.np.random.generator_core import (next_double, next_float, + next_uint32, next_uint64) +# All of the following implementations are direct translations from: +# https://github.com/numpy/numpy/blob/7cfef93c77599bd387ecc6a15d186c5a46024dac/numpy/random/src/distributions/distributions.c + + +@register_jitable +def np_log1p(x): + return np.log1p(x) + + +@register_jitable +def np_log1pf(x): + return np.log1p(np.float32(x)) + + +@register_jitable +def random_rayleigh(bitgen, mode): + return mode * np.sqrt(2.0 * random_standard_exponential(bitgen)) + + +@register_jitable +def np_expm1(x): + return np.expm1(x) + + +@register_jitable +def random_standard_normal(bitgen): + while 1: + r = next_uint64(bitgen) + idx = r & 0xff + r >>= 8 + sign = r & 0x1 + rabs = (r >> 1) & 0x000fffffffffffff + x = rabs * wi_double[idx] + if (sign & 0x1): + x = -x + if rabs < ki_double[idx]: + return x + if idx == 0: + while 1: + xx = -ziggurat_nor_inv_r * np.log1p(-next_double(bitgen)) + yy = -np.log1p(-next_double(bitgen)) + if (yy + yy > xx * xx): + if ((rabs >> 8) & 0x1): + return -(ziggurat_nor_r + xx) + else: + return ziggurat_nor_r + xx + else: + if (((fi_double[idx - 1] - fi_double[idx]) * + next_double(bitgen) + fi_double[idx]) < + np.exp(-0.5 * x * x)): + return x + + +@register_jitable +def random_standard_normal_f(bitgen): + while 1: + r = next_uint32(bitgen) + idx = r & 0xff + sign = (r >> 8) & 0x1 + rabs = (r >> 9) & 0x0007fffff + x = np.float32(np.float32(rabs) * wi_float[idx]) + if (sign & 0x1): + x = -x + if (rabs < ki_float[idx]): + return x + if (idx == 0): + while 1: + xx = np.float32(-ziggurat_nor_inv_r_f * + np_log1pf(-next_float(bitgen))) + yy = np.float32(-np_log1pf(-next_float(bitgen))) + if (np.float32(yy + yy) > np.float32(xx * xx)): + if ((rabs >> 8) & 0x1): + return -np.float32(ziggurat_nor_r_f + xx) + else: + return np.float32(ziggurat_nor_r_f + xx) + else: + if (((fi_float[idx - 1] - fi_float[idx]) * next_float(bitgen) + + fi_float[idx]) < np.float32(np.exp(-np.float32(0.5) * x * x))): + return x + + +@register_jitable +def random_standard_exponential(bitgen): + while 1: + ri = next_uint64(bitgen) + ri >>= 3 + idx = ri & 0xFF + ri >>= 8 + x = ri * we_double[idx] + if (ri < ke_double[idx]): + return x + else: + if idx == 0: + return ziggurat_exp_r - np_log1p(-next_double(bitgen)) + elif ((fe_double[idx - 1] - fe_double[idx]) * next_double(bitgen) + + fe_double[idx] < np.exp(-x)): + return x + + +@register_jitable +def random_standard_exponential_f(bitgen): + while 1: + ri = next_uint32(bitgen) + ri >>= 1 + idx = ri & 0xFF + ri >>= 8 + x = np.float32(np.float32(ri) * we_float[idx]) + if (ri < ke_float[idx]): + return x + else: + if (idx == 0): + return np.float32(ziggurat_exp_r_f - + np.float32(np_log1pf(-next_float(bitgen)))) + elif ((fe_float[idx - 1] - fe_float[idx]) * next_float(bitgen) + + fe_float[idx] < np.float32(np.exp(np.float32(-x)))): + return x + + +@register_jitable +def random_standard_exponential_inv(bitgen): + return -np_log1p(-next_double(bitgen)) + + +@register_jitable +def random_standard_exponential_inv_f(bitgen): + return -np.log(np.float32(1.0) - next_float(bitgen)) + + +@register_jitable +def random_standard_gamma(bitgen, shape): + if (shape == 1.0): + return random_standard_exponential(bitgen) + elif (shape == 0.0): + return 0.0 + elif (shape < 1.0): + while 1: + U = next_double(bitgen) + V = random_standard_exponential(bitgen) + if (U <= 1.0 - shape): + X = pow(U, 1. / shape) + if (X <= V): + return X + else: + Y = -np.log((1 - U) / shape) + X = pow(1.0 - shape + shape * Y, 1. / shape) + if (X <= (V + Y)): + return X + else: + b = shape - 1. / 3. + c = 1. / np.sqrt(9 * b) + while 1: + while 1: + X = random_standard_normal(bitgen) + V = 1.0 + c * X + if (V > 0.0): + break + + V = V * V * V + U = next_double(bitgen) + if (U < 1.0 - 0.0331 * (X * X) * (X * X)): + return (b * V) + + if (np.log(U) < 0.5 * X * X + b * (1. - V + np.log(V))): + return (b * V) + + +@register_jitable +def random_standard_gamma_f(bitgen, shape): + f32_one = np.float32(1.0) + shape = np.float32(shape) + if (shape == f32_one): + return random_standard_exponential_f(bitgen) + elif (shape == np.float32(0.0)): + return np.float32(0.0) + elif (shape < f32_one): + while 1: + U = next_float(bitgen) + V = random_standard_exponential_f(bitgen) + if (U <= f32_one - shape): + X = np.float32(pow(U, np.float32(f32_one / shape))) + if (X <= V): + return X + else: + Y = np.float32(-np.log(np.float32((f32_one - U) / shape))) + X = np.float32(pow(f32_one - shape + np.float32(shape * Y), + np.float32(f32_one / shape))) + if (X <= (V + Y)): + return X + else: + b = shape - f32_one / np.float32(3.0) + c = np.float32(f32_one / np.float32(np.sqrt(np.float32(9.0) * b))) + while 1: + while 1: + X = np.float32(random_standard_normal_f(bitgen)) + V = np.float32(f32_one + c * X) + if (V > np.float32(0.0)): + break + + V = np.float32(V * V * V) + U = next_float(bitgen) + if (U < f32_one - np.float32(0.0331) * (X * X) * (X * X)): + return np.float32(b * V) + + if (np.log(U) < np.float32(0.5) * X * X + b * + (f32_one - V + np.log(V))): + return np.float32(b * V) + + +@register_jitable +def random_normal(bitgen, loc, scale): + scaled_normal = scale * random_standard_normal(bitgen) + return loc + scaled_normal + + +@register_jitable +def random_normal_f(bitgen, loc, scale): + scaled_normal = np.float32(scale * random_standard_normal_f(bitgen)) + return np.float32(loc + scaled_normal) + + +@register_jitable +def random_exponential(bitgen, scale): + return scale * random_standard_exponential(bitgen) + + +@register_jitable +def random_uniform(bitgen, lower, range): + scaled_uniform = range * next_double(bitgen) + return lower + scaled_uniform + + +@register_jitable +def random_gamma(bitgen, shape, scale): + return scale * random_standard_gamma(bitgen, shape) + + +@register_jitable +def random_gamma_f(bitgen, shape, scale): + return np.float32(scale * random_standard_gamma_f(bitgen, shape)) + + +@register_jitable +def random_beta(bitgen, a, b): + if a <= 1.0 and b <= 1.0: + while 1: + U = next_double(bitgen) + V = next_double(bitgen) + X = pow(U, 1.0 / a) + Y = pow(V, 1.0 / b) + XpY = X + Y + if XpY <= 1.0 and XpY > 0.0: + if (X + Y > 0): + return X / XpY + else: + logX = np.log(U) / a + logY = np.log(V) / b + logM = min(logX, logY) + logX -= logM + logY -= logM + + return np.exp(logX - np.log(np.exp(logX) + np.exp(logY))) + else: + Ga = random_standard_gamma(bitgen, a) + Gb = random_standard_gamma(bitgen, b) + return Ga / (Ga + Gb) + + +@register_jitable +def random_chisquare(bitgen, df): + return 2.0 * random_standard_gamma(bitgen, df / 2.0) + + +@register_jitable +def random_f(bitgen, dfnum, dfden): + return ((random_chisquare(bitgen, dfnum) * dfden) / + (random_chisquare(bitgen, dfden) * dfnum)) + + +@register_jitable +def random_standard_cauchy(bitgen): + return random_standard_normal(bitgen) / random_standard_normal(bitgen) + + +@register_jitable +def random_pareto(bitgen, a): + return np_expm1(random_standard_exponential(bitgen) / a) + + +@register_jitable +def random_weibull(bitgen, a): + if (a == 0.0): + return 0.0 + return pow(random_standard_exponential(bitgen), 1. / a) + + +@register_jitable +def random_power(bitgen, a): + return pow(-np_expm1(-random_standard_exponential(bitgen)), 1. / a) + + +@register_jitable +def random_laplace(bitgen, loc, scale): + U = next_double(bitgen) + while U <= 0: + U = next_double(bitgen) + if (U >= 0.5): + U = loc - scale * np.log(2.0 - U - U) + elif (U > 0.0): + U = loc + scale * np.log(U + U) + return U + + +@register_jitable +def random_logistic(bitgen, loc, scale): + U = next_double(bitgen) + while U <= 0.0: + U = next_double(bitgen) + return loc + scale * np.log(U / (1.0 - U)) + + +@register_jitable +def random_lognormal(bitgen, mean, sigma): + return np.exp(random_normal(bitgen, mean, sigma)) + + +@register_jitable +def random_standard_t(bitgen, df): + num = random_standard_normal(bitgen) + denom = random_standard_gamma(bitgen, df / 2) + return np.sqrt(df / 2) * num / np.sqrt(denom) + + +@register_jitable +def random_wald(bitgen, mean, scale): + mu_2l = mean / (2 * scale) + Y = random_standard_normal(bitgen) + Y = mean * Y * Y + X = mean + mu_2l * (Y - np.sqrt(4 * scale * Y + Y * Y)) + U = next_double(bitgen) + if (U <= mean / (mean + X)): + return X + else: + return mean * mean / X + + +@register_jitable +def random_geometric_search(bitgen, p): + X = 1 + sum = prod = p + q = 1.0 - p + U = next_double(bitgen) + while (U > sum): + prod *= q + sum += prod + X = X + 1 + return X + + +@register_jitable +def random_geometric_inversion(bitgen, p): + return np.ceil(-random_standard_exponential(bitgen) / np.log1p(-p)) + + +@register_jitable +def random_geometric(bitgen, p): + if (p >= 0.333333333333333333333333): + return random_geometric_search(bitgen, p) + else: + return random_geometric_inversion(bitgen, p) + + +@register_jitable +def random_zipf(bitgen, a): + am1 = a - 1.0 + b = pow(2.0, am1) + while 1: + U = 1.0 - next_double(bitgen) + V = next_double(bitgen) + X = np.floor(pow(U, -1.0 / am1)) + if (X > INT64_MAX or X < 1.0): + continue + + T = pow(1.0 + 1.0 / X, am1) + if (V * X * (T - 1.0) / (b - 1.0) <= T / b): + return X + + +@register_jitable +def random_triangular(bitgen, left, mode, + right): + base = right - left + leftbase = mode - left + ratio = leftbase / base + leftprod = leftbase * base + rightprod = (right - mode) * base + + U = next_double(bitgen) + if (U <= ratio): + return left + np.sqrt(U * leftprod) + else: + return right - np.sqrt((1.0 - U) * rightprod) + + +@register_jitable +def random_loggam(x): + a = [8.333333333333333e-02, -2.777777777777778e-03, + 7.936507936507937e-04, -5.952380952380952e-04, + 8.417508417508418e-04, -1.917526917526918e-03, + 6.410256410256410e-03, -2.955065359477124e-02, + 1.796443723688307e-01, -1.39243221690590e+00] + + if ((x == 1.0) or (x == 2.0)): + return 0.0 + elif (x < 7.0): + n = int(7 - x) + else: + n = 0 + + x0 = x + n + x2 = (1.0 / x0) * (1.0 / x0) + # /* log(2 * M_PI) */ + lg2pi = 1.8378770664093453e+00 + gl0 = a[9] + + for k in range(0, 9): + gl0 *= x2 + gl0 += a[8 - k] + + gl = gl0 / x0 + 0.5 * lg2pi + (x0 - 0.5) * np.log(x0) - x0 + if (x < 7.0): + for k in range(1, n + 1): + gl = gl - np.log(x0 - 1.0) + x0 = x0 - 1.0 + + return gl + + +@register_jitable +def random_poisson_mult(bitgen, lam): + enlam = np.exp(-lam) + X = 0 + prod = 1.0 + while (1): + U = next_double(bitgen) + prod *= U + if (prod > enlam): + X += 1 + else: + return X + + +@register_jitable +def random_poisson_ptrs(bitgen, lam): + + slam = np.sqrt(lam) + loglam = np.log(lam) + b = 0.931 + 2.53 * slam + a = -0.059 + 0.02483 * b + invalpha = 1.1239 + 1.1328 / (b - 3.4) + vr = 0.9277 - 3.6224 / (b - 2) + + while (1): + U = next_double(bitgen) - 0.5 + V = next_double(bitgen) + us = 0.5 - np.fabs(U) + k = int((2 * a / us + b) * U + lam + 0.43) + if ((us >= 0.07) and (V <= vr)): + return k + + if ((k < 0) or ((us < 0.013) and (V > us))): + continue + + # /* log(V) == log(0.0) ok here */ + # /* if U==0.0 so that us==0.0, log is ok since always returns */ + if ((np.log(V) + np.log(invalpha) - np.log(a / (us * us) + b)) <= + (-lam + k * loglam - random_loggam(k + 1))): + return k + + +@register_jitable +def random_poisson(bitgen, lam): + if (lam >= 10): + return random_poisson_ptrs(bitgen, lam) + elif (lam == 0): + return 0 + else: + return random_poisson_mult(bitgen, lam) + + +@register_jitable +def random_negative_binomial(bitgen, n, p): + Y = random_gamma(bitgen, n, (1 - p) / p) + return random_poisson(bitgen, Y) + + +@register_jitable +def random_noncentral_chisquare(bitgen, df, nonc): + if np.isnan(nonc): + return np.nan + + if nonc == 0: + return random_chisquare(bitgen, df) + + if 1 < df: + Chi2 = random_chisquare(bitgen, df - 1) + n = random_standard_normal(bitgen) + np.sqrt(nonc) + return Chi2 + n * n + else: + i = random_poisson(bitgen, nonc / 2.0) + return random_chisquare(bitgen, df + 2 * i) + + +@register_jitable +def random_noncentral_f(bitgen, dfnum, dfden, nonc): + t = random_noncentral_chisquare(bitgen, dfnum, nonc) * dfden + return t / (random_chisquare(bitgen, dfden) * dfnum) + + +@register_jitable +def random_logseries(bitgen, p): + r = np_log1p(-p) + + while 1: + V = next_double(bitgen) + if (V >= p): + return 1 + U = next_double(bitgen) + q = -np.expm1(r * U) + if (V <= q * q): + result = np.int64(np.floor(1 + np.log(V) / np.log(q))) + if result < 1 or V == 0.0: + continue + else: + return result + if (V >= q): + return 1 + else: + return 2 + + +@register_jitable +def random_binomial_btpe(bitgen, n, p): + r = min(p, 1.0 - p) + q = 1.0 - r + fm = n * r + r + m = int(np.floor(fm)) + p1 = int(np.floor(2.195 * np.sqrt(n * r * q) - 4.6 * q) + 0.5) + xm = m + 0.5 + xl = xm - p1 + xr = xm + p1 + c = 0.134 + 20.5 / (15.3 + m) + a = (fm - xl) / (fm - xl * r) + laml = a * (1.0 + a / 2.0) + a = (xr - fm) / (xr * q) + lamr = a * (1.0 + a / 2.0) + p2 = p1 * (1.0 + 2.0 * c) + p3 = p2 + c / laml + p4 = p3 + c / lamr + + case = 10 + y = k = 0 + while 1: + if case == 10: + nrq = n * r * q + u = next_double(bitgen) * p4 + v = next_double(bitgen) + if (u > p1): + case = 20 + continue + y = int(np.floor(xm - p1 * v + u)) + case = 60 + continue + elif case == 20: + if (u > p2): + case = 30 + continue + x = xl + (u - p1) / c + v = v * c + 1.0 - np.fabs(m - x + 0.5) / p1 + if (v > 1.0): + case = 10 + continue + y = int(np.floor(x)) + case = 50 + continue + elif case == 30: + if (u > p3): + case = 40 + continue + y = int(np.floor(xl + np.log(v) / laml)) + if ((y < 0) or (v == 0.0)): + case = 10 + continue + v = v * (u - p2) * laml + case = 50 + continue + elif case == 40: + y = int(np.floor(xr - np.log(v) / lamr)) + if ((y > n) or (v == 0.0)): + case = 10 + continue + v = v * (u - p3) * lamr + case = 50 + continue + elif case == 50: + k = abs(y - m) + if ((k > 20) and (k < ((nrq) / 2.0 - 1))): + case = 52 + continue + s = r / q + a = s * (n + 1) + F = 1.0 + if (m < y): + for i in range(m + 1, y + 1): + F = F * (a / i - s) + elif (m > y): + for i in range(y + 1, m + 1): + F = F / (a / i - s) + if (v > F): + case = 10 + continue + case = 60 + continue + elif case == 52: + rho = (k / (nrq)) * \ + ((k * (k / 3.0 + 0.625) + 0.16666666666666666) / + nrq + 0.5) + t = -k * k / (2 * nrq) + A = np.log(v) + if (A < (t - rho)): + case = 60 + continue + if (A > (t + rho)): + case = 10 + continue + x1 = y + 1 + f1 = m + 1 + z = n + 1 - m + w = n - y + 1 + x2 = x1 * x1 + f2 = f1 * f1 + z2 = z * z + w2 = w * w + if (A > (xm * np.log(f1 / x1) + (n - m + 0.5) * np.log(z / w) + + (y - m) * np.log(w * r / (x1 * q)) + + (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) + / f2) / f1 / 166320. + + (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) + / z2) / z / 166320. + + (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) + / x2) / x1 / 166320. + + (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) + / w2) / w / 66320.)): + case = 10 + continue + elif case == 60: + if (p > 0.5): + y = n - y + return y + + +@register_jitable +def random_binomial_inversion(bitgen, n, p): + q = 1.0 - p + qn = np.exp(n * np.log(q)) + _np = n * p + bound = min(n, _np + 10.0 * np.sqrt(_np * q + 1)) + + X = 0 + px = qn + U = next_double(bitgen) + while (U > px): + X = X + 1 + if (X > bound): + X = 0 + px = qn + U = next_double(bitgen) + else: + U -= px + px = ((n - X + 1) * p * px) / (X * q) + + return X + + +@register_jitable +def random_binomial(bitgen, n, p): + if ((n == 0) or (p == 0.0)): + return 0 + + if (p <= 0.5): + if (p * n <= 30.0): + return random_binomial_inversion(bitgen, n, p) + else: + return random_binomial_btpe(bitgen, n, p) + else: + q = 1.0 - p + if (q * n <= 30.0): + return n - random_binomial_inversion(bitgen, n, q) + else: + return n - random_binomial_btpe(bitgen, n, q) diff --git a/venv/lib/python3.10/site-packages/numba/np/random/new_random_methods.py b/venv/lib/python3.10/site-packages/numba/np/random/new_random_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..29ddf3a0131707ebd62000871ca5bf11b9430884 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/new_random_methods.py @@ -0,0 +1,364 @@ +import numpy as np + +from numba.core.extending import register_jitable + +from numba.np.random._constants import (UINT32_MAX, UINT64_MAX, + UINT16_MAX, UINT8_MAX) +from numba.np.random.generator_core import next_uint32, next_uint64 + +# All following implementations are direct translations from: +# https://github.com/numpy/numpy/blob/7cfef93c77599bd387ecc6a15d186c5a46024dac/numpy/random/src/distributions/distributions.c + + +@register_jitable +def gen_mask(max): + mask = np.uint64(max) + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + + +@register_jitable +def buffered_bounded_bool(bitgen, off, rng, bcnt, buf): + if (rng == 0): + return off, bcnt, buf + if not bcnt: + buf = next_uint32(bitgen) + bcnt = 31 + else: + buf >>= 1 + bcnt -= 1 + + return ((buf & 1) != 0), bcnt, buf + + +@register_jitable +def buffered_uint8(bitgen, bcnt, buf): + if not bcnt: + buf = next_uint32(bitgen) + bcnt = 3 + else: + buf >>= 8 + bcnt -= 1 + + return np.uint8(buf), bcnt, buf + + +@register_jitable +def buffered_uint16(bitgen, bcnt, buf): + if not bcnt: + buf = next_uint32(bitgen) + bcnt = 1 + else: + buf >>= 16 + bcnt -= 1 + + return np.uint16(buf), bcnt, buf + + +# The following implementations use Lemire's algorithm: +# https://arxiv.org/abs/1805.10941 +@register_jitable +def buffered_bounded_lemire_uint8(bitgen, rng, bcnt, buf): + """ + Generates a random unsigned 8 bit integer bounded + within a given interval using Lemire's rejection. + + The buffer acts as storage for a 32 bit integer + drawn from the associated BitGenerator so that + multiple integers of smaller bitsize can be generated + from a single draw of the BitGenerator. + """ + # Note: `rng` should not be 0xFF. When this happens `rng_excl` becomes + # zero. + rng_excl = np.uint8(rng) + np.uint8(1) + + assert (rng != 0xFF) + + # Generate a scaled random number. + n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf) + m = np.uint16(n * rng_excl) + + # Rejection sampling to remove any bias + leftover = m & 0xFF + + if (leftover < rng_excl): + # `rng_excl` is a simple upper bound for `threshold`. + threshold = ((np.uint8(UINT8_MAX) - rng) % rng_excl) + + while (leftover < threshold): + n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf) + m = np.uint16(n * rng_excl) + leftover = m & 0xFF + + return m >> 8, bcnt, buf + + +@register_jitable +def buffered_bounded_lemire_uint16(bitgen, rng, bcnt, buf): + """ + Generates a random unsigned 16 bit integer bounded + within a given interval using Lemire's rejection. + + The buffer acts as storage for a 32 bit integer + drawn from the associated BitGenerator so that + multiple integers of smaller bitsize can be generated + from a single draw of the BitGenerator. + """ + # Note: `rng` should not be 0xFFFF. When this happens `rng_excl` becomes + # zero. + rng_excl = np.uint16(rng) + np.uint16(1) + + assert (rng != 0xFFFF) + + # Generate a scaled random number. + n, bcnt, buf = buffered_uint16(bitgen, bcnt, buf) + m = np.uint32(n * rng_excl) + + # Rejection sampling to remove any bias + leftover = m & 0xFFFF + + if (leftover < rng_excl): + # `rng_excl` is a simple upper bound for `threshold`. + threshold = ((np.uint16(UINT16_MAX) - rng) % rng_excl) + + while (leftover < threshold): + n, bcnt, buf = buffered_uint16(bitgen, bcnt, buf) + m = np.uint32(n * rng_excl) + leftover = m & 0xFFFF + + return m >> 16, bcnt, buf + + +@register_jitable +def buffered_bounded_lemire_uint32(bitgen, rng): + """ + Generates a random unsigned 32 bit integer bounded + within a given interval using Lemire's rejection. + """ + rng_excl = np.uint32(rng) + np.uint32(1) + + assert (rng != 0xFFFFFFFF) + + # Generate a scaled random number. + m = np.uint64(next_uint32(bitgen)) * np.uint64(rng_excl) + + # Rejection sampling to remove any bias + leftover = m & 0xFFFFFFFF + + if (leftover < rng_excl): + # `rng_excl` is a simple upper bound for `threshold`. + threshold = (UINT32_MAX - rng) % rng_excl + + while (leftover < threshold): + m = np.uint64(next_uint32(bitgen)) * np.uint64(rng_excl) + leftover = m & 0xFFFFFFFF + + return (m >> 32) + + +@register_jitable +def bounded_lemire_uint64(bitgen, rng): + """ + Generates a random unsigned 64 bit integer bounded + within a given interval using Lemire's rejection. + """ + rng_excl = np.uint64(rng) + np.uint64(1) + + assert (rng != 0xFFFFFFFFFFFFFFFF) + + x = next_uint64(bitgen) + + leftover = np.uint64(x) * np.uint64(rng_excl) + + if (leftover < rng_excl): + threshold = (UINT64_MAX - rng) % rng_excl + + while (leftover < threshold): + x = next_uint64(bitgen) + leftover = np.uint64(x) * np.uint64(rng_excl) + + x0 = x & np.uint64(0xFFFFFFFF) + x1 = x >> 32 + rng_excl0 = rng_excl & np.uint64(0xFFFFFFFF) + rng_excl1 = rng_excl >> 32 + w0 = x0 * rng_excl0 + t = x1 * rng_excl0 + (w0 >> 32) + w1 = t & np.uint64(0xFFFFFFFF) + w2 = t >> 32 + w1 += x0 * rng_excl1 + m1 = x1 * rng_excl1 + w2 + (w1 >> 32) + + return m1 + + +@register_jitable +def random_bounded_uint64_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 64 bit integers + bounded by given interval. + """ + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng <= 0xFFFFFFFF: + if (rng == 0xFFFFFFFF): + for i in np.ndindex(size): + out[i] = low + next_uint32(bitgen) + else: + for i in np.ndindex(size): + out[i] = low + buffered_bounded_lemire_uint32(bitgen, rng) + + elif (rng == 0xFFFFFFFFFFFFFFFF): + for i in np.ndindex(size): + out[i] = low + next_uint64(bitgen) + else: + for i in np.ndindex(size): + out[i] = low + bounded_lemire_uint64(bitgen, rng) + + return out + + +@register_jitable +def random_bounded_uint32_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 32 bit integers + bounded by given interval. + """ + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng == 0xFFFFFFFF: + # Lemire32 doesn't support rng = 0xFFFFFFFF. + for i in np.ndindex(size): + out[i] = low + next_uint32(bitgen) + else: + for i in np.ndindex(size): + out[i] = low + buffered_bounded_lemire_uint32(bitgen, rng) + return out + + +@register_jitable +def random_bounded_uint16_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 16 bit integers + bounded by given interval. + """ + buf = 0 + bcnt = 0 + + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng == 0xFFFF: + # Lemire16 doesn't support rng = 0xFFFF. + for i in np.ndindex(size): + val, bcnt, buf = buffered_uint16(bitgen, bcnt, buf) + out[i] = low + val + + else: + for i in np.ndindex(size): + val, bcnt, buf = \ + buffered_bounded_lemire_uint16(bitgen, rng, + bcnt, buf) + out[i] = low + val + return out + + +@register_jitable +def random_bounded_uint8_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 8 bit integers + bounded by given interval. + """ + buf = 0 + bcnt = 0 + + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng == 0xFF: + # Lemire8 doesn't support rng = 0xFF. + for i in np.ndindex(size): + val, bcnt, buf = buffered_uint8(bitgen, bcnt, buf) + out[i] = low + val + else: + for i in np.ndindex(size): + val, bcnt, buf = \ + buffered_bounded_lemire_uint8(bitgen, rng, + bcnt, buf) + out[i] = low + val + return out + + +@register_jitable +def random_bounded_bool_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with boolean values. + """ + buf = 0 + bcnt = 0 + out = np.empty(size, dtype=dtype) + for i in np.ndindex(size): + val, bcnt, buf = buffered_bounded_bool(bitgen, low, rng, bcnt, buf) + out[i] = low + val + return out + + +@register_jitable +def _randint_arg_check(low, high, endpoint, lower_bound, upper_bound): + """ + Check that low and high are within the bounds + for the given datatype. + """ + + if low < lower_bound: + raise ValueError("low is out of bounds") + + # This is being done to avoid high being accidentally + # casted to int64/32 while subtracting 1 before + # checking bounds, avoids overflow. + if high > 0: + high = np.uint64(high) + if not endpoint: + high -= np.uint64(1) + upper_bound = np.uint64(upper_bound) + if low > 0: + low = np.uint64(low) + if high > upper_bound: + raise ValueError("high is out of bounds") + if low > high: # -1 already subtracted, closed interval + raise ValueError("low is greater than high in given interval") + else: + if high > upper_bound: + raise ValueError("high is out of bounds") + if low > high: # -1 already subtracted, closed interval + raise ValueError("low is greater than high in given interval") + + +@register_jitable +def random_interval(bitgen, max_val): + if (max_val == 0): + return 0 + + max_val = np.uint64(max_val) + mask = np.uint64(gen_mask(max_val)) + + if (max_val <= 0xffffffff): + value = np.uint64(next_uint32(bitgen)) & mask + while value > max_val: + value = np.uint64(next_uint32(bitgen)) & mask + else: + value = next_uint64(bitgen) & mask + while value > max_val: + value = next_uint64(bitgen) & mask + + return np.uint64(value) diff --git a/venv/lib/python3.10/site-packages/numba/np/random/old_distributions.py b/venv/lib/python3.10/site-packages/numba/np/random/old_distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..336b1cf0339b2c373b5623b4d3e4b48ede33767a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/old_distributions.py @@ -0,0 +1,740 @@ +""" +Algorithmic implementations for generating different types +of random distributions. +""" + +import numpy as np + +from numba.core.extending import register_jitable +from numba.np.random._constants import (wi_double, ki_double, + ziggurat_nor_r, fi_double, + wi_float, ki_float, + ziggurat_nor_inv_r_f, + ziggurat_nor_r_f, fi_float, + we_double, ke_double, + ziggurat_exp_r, fe_double, + we_float, ke_float, + ziggurat_exp_r_f, fe_float, + INT64_MAX, ziggurat_nor_inv_r) +from numba.np.random.generator_core import (next_double, next_float, + next_uint32, next_uint64) +from numba import float32, int64 +from numba.np.numpy_support import numpy_version +# All of the following implementations are direct translations from: +# https://github.com/numpy/numpy/blob/7cfef93c77599bd387ecc6a15d186c5a46024dac/numpy/random/src/distributions/distributions.c + + +@register_jitable +def np_log1p(x): + return np.log1p(x) + + +@register_jitable +def np_log1pf(x): + return np.log1p(float32(x)) + + +@register_jitable +def random_rayleigh(bitgen, mode): + return mode * np.sqrt(2.0 * random_standard_exponential(bitgen)) + + +@register_jitable +def np_expm1(x): + return np.expm1(x) + + +@register_jitable +def random_standard_normal(bitgen): + while 1: + r = next_uint64(bitgen) + idx = r & 0xff + r >>= 8 + sign = r & 0x1 + rabs = (r >> 1) & 0x000fffffffffffff + x = rabs * wi_double[idx] + if (sign & 0x1): + x = -x + if rabs < ki_double[idx]: + return x + if idx == 0: + while 1: + xx = -ziggurat_nor_inv_r * np.log1p(-next_double(bitgen)) + yy = -np.log1p(-next_double(bitgen)) + if (yy + yy > xx * xx): + if ((rabs >> 8) & 0x1): + return -(ziggurat_nor_r + xx) + else: + return ziggurat_nor_r + xx + else: + if (((fi_double[idx - 1] - fi_double[idx]) * + next_double(bitgen) + fi_double[idx]) < + np.exp(-0.5 * x * x)): + return x + + +@register_jitable +def random_standard_normal_f(bitgen): + while 1: + r = next_uint32(bitgen) + idx = r & 0xff + sign = (r >> 8) & 0x1 + rabs = (r >> 9) & 0x0007fffff + x = float32(float32(rabs) * wi_float[idx]) + if (sign & 0x1): + x = -x + if (rabs < ki_float[idx]): + return x + if (idx == 0): + while 1: + xx = float32(-ziggurat_nor_inv_r_f * + np_log1pf(-next_float(bitgen))) + yy = float32(-np_log1pf(-next_float(bitgen))) + if (float32(yy + yy) > float32(xx * xx)): + if ((rabs >> 8) & 0x1): + return -float32(ziggurat_nor_r_f + xx) + else: + return float32(ziggurat_nor_r_f + xx) + else: + if (((fi_float[idx - 1] - fi_float[idx]) * next_float(bitgen) + + fi_float[idx]) < float32(np.exp(-float32(0.5) * x * x))): + return x + + +@register_jitable +def random_standard_exponential(bitgen): + while 1: + ri = next_uint64(bitgen) + ri >>= 3 + idx = ri & 0xFF + ri >>= 8 + x = ri * we_double[idx] + if (ri < ke_double[idx]): + return x + else: + if idx == 0: + return ziggurat_exp_r - np_log1p(-next_double(bitgen)) + elif ((fe_double[idx - 1] - fe_double[idx]) * next_double(bitgen) + + fe_double[idx] < np.exp(-x)): + return x + + +@register_jitable +def random_standard_exponential_f(bitgen): + while 1: + ri = next_uint32(bitgen) + ri >>= 1 + idx = ri & 0xFF + ri >>= 8 + x = float32(float32(ri) * we_float[idx]) + if (ri < ke_float[idx]): + return x + else: + if (idx == 0): + return float32(ziggurat_exp_r_f - + float32(np_log1pf(-next_float(bitgen)))) + elif ((fe_float[idx - 1] - fe_float[idx]) * next_float(bitgen) + + fe_float[idx] < float32(np.exp(float32(-x)))): + return x + + +@register_jitable +def random_standard_exponential_inv(bitgen): + return -np_log1p(-next_double(bitgen)) + + +@register_jitable +def random_standard_exponential_inv_f(bitgen): + return -np.log(float32(1.0) - next_float(bitgen)) + + +@register_jitable +def random_standard_gamma(bitgen, shape): + if (shape == 1.0): + return random_standard_exponential(bitgen) + elif (shape == 0.0): + return 0.0 + elif (shape < 1.0): + while 1: + U = next_double(bitgen) + V = random_standard_exponential(bitgen) + if (U <= 1.0 - shape): + X = pow(U, 1. / shape) + if (X <= V): + return X + else: + Y = -np.log((1 - U) / shape) + X = pow(1.0 - shape + shape * Y, 1. / shape) + if (X <= (V + Y)): + return X + else: + b = shape - 1. / 3. + c = 1. / np.sqrt(9 * b) + while 1: + while 1: + X = random_standard_normal(bitgen) + V = 1.0 + c * X + if (V > 0.0): + break + + V = V * V * V + U = next_double(bitgen) + if (U < 1.0 - 0.0331 * (X * X) * (X * X)): + return (b * V) + + if (np.log(U) < 0.5 * X * X + b * (1. - V + np.log(V))): + return (b * V) + + +@register_jitable +def random_standard_gamma_f(bitgen, shape): + f32_one = float32(1.0) + shape = float32(shape) + if (shape == f32_one): + return random_standard_exponential_f(bitgen) + elif (shape == float32(0.0)): + return float32(0.0) + elif (shape < f32_one): + while 1: + U = next_float(bitgen) + V = random_standard_exponential_f(bitgen) + if (U <= f32_one - shape): + X = float32(pow(U, float32(f32_one / shape))) + if (X <= V): + return X + else: + Y = float32(-np.log(float32((f32_one - U) / shape))) + X = float32(pow(f32_one - shape + float32(shape * Y), + float32(f32_one / shape))) + if (X <= (V + Y)): + return X + else: + b = shape - f32_one / float32(3.0) + c = float32(f32_one / float32(np.sqrt(float32(9.0) * b))) + while 1: + while 1: + X = float32(random_standard_normal_f(bitgen)) + V = float32(f32_one + c * X) + if (V > float32(0.0)): + break + + V = float32(V * V * V) + U = next_float(bitgen) + if (U < f32_one - float32(0.0331) * (X * X) * (X * X)): + return float32(b * V) + + if (np.log(U) < float32(0.5) * X * X + b * + (f32_one - V + np.log(V))): + return float32(b * V) + + +@register_jitable +def random_normal(bitgen, loc, scale): + scaled_normal = scale * random_standard_normal(bitgen) + return loc + scaled_normal + + +@register_jitable +def random_normal_f(bitgen, loc, scale): + scaled_normal = float32(scale * random_standard_normal_f(bitgen)) + return float32(loc + scaled_normal) + + +@register_jitable +def random_exponential(bitgen, scale): + return scale * random_standard_exponential(bitgen) + + +@register_jitable +def random_uniform(bitgen, lower, range): + scaled_uniform = range * next_double(bitgen) + return lower + scaled_uniform + + +@register_jitable +def random_gamma(bitgen, shape, scale): + return scale * random_standard_gamma(bitgen, shape) + + +@register_jitable +def random_gamma_f(bitgen, shape, scale): + return float32(scale * random_standard_gamma_f(bitgen, shape)) + + +@register_jitable +def random_beta(bitgen, a, b): + if a <= 1.0 and b <= 1.0: + while 1: + U = next_double(bitgen) + V = next_double(bitgen) + X = pow(U, 1.0 / a) + Y = pow(V, 1.0 / b) + XpY = X + Y + if XpY <= 1.0 and XpY > 0.0: + if (X + Y > 0): + return X / XpY + else: + logX = np.log(U) / a + logY = np.log(V) / b + logM = min(logX, logY) + logX -= logM + logY -= logM + + return np.exp(logX - np.log(np.exp(logX) + np.exp(logY))) + else: + Ga = random_standard_gamma(bitgen, a) + Gb = random_standard_gamma(bitgen, b) + return Ga / (Ga + Gb) + + +@register_jitable +def random_chisquare(bitgen, df): + return 2.0 * random_standard_gamma(bitgen, df / 2.0) + + +@register_jitable +def random_f(bitgen, dfnum, dfden): + return ((random_chisquare(bitgen, dfnum) * dfden) / + (random_chisquare(bitgen, dfden) * dfnum)) + + +@register_jitable +def random_standard_cauchy(bitgen): + return random_standard_normal(bitgen) / random_standard_normal(bitgen) + + +@register_jitable +def random_pareto(bitgen, a): + return np_expm1(random_standard_exponential(bitgen) / a) + + +@register_jitable +def random_weibull(bitgen, a): + if (a == 0.0): + return 0.0 + return pow(random_standard_exponential(bitgen), 1. / a) + + +@register_jitable +def random_power(bitgen, a): + return pow(-np_expm1(-random_standard_exponential(bitgen)), 1. / a) + + +@register_jitable +def random_laplace(bitgen, loc, scale): + U = next_double(bitgen) + while U <= 0: + U = next_double(bitgen) + if (U >= 0.5): + U = loc - scale * np.log(2.0 - U - U) + elif (U > 0.0): + U = loc + scale * np.log(U + U) + return U + + +@register_jitable +def random_logistic(bitgen, loc, scale): + U = next_double(bitgen) + while U <= 0.0: + U = next_double(bitgen) + return loc + scale * np.log(U / (1.0 - U)) + + +@register_jitable +def random_lognormal(bitgen, mean, sigma): + return np.exp(random_normal(bitgen, mean, sigma)) + + +@register_jitable +def random_standard_t(bitgen, df): + num = random_standard_normal(bitgen) + denom = random_standard_gamma(bitgen, df / 2) + return np.sqrt(df / 2) * num / np.sqrt(denom) + + +@register_jitable +def random_wald(bitgen, mean, scale): + mu_2l = mean / (2 * scale) + Y = random_standard_normal(bitgen) + Y = mean * Y * Y + X = mean + mu_2l * (Y - np.sqrt(4 * scale * Y + Y * Y)) + U = next_double(bitgen) + if (U <= mean / (mean + X)): + return X + else: + return mean * mean / X + + +@register_jitable +def random_geometric_search(bitgen, p): + X = 1 + sum = prod = p + q = 1.0 - p + U = next_double(bitgen) + while (U > sum): + prod *= q + sum += prod + X = X + 1 + return X + + +@register_jitable +def random_geometric_inversion(bitgen, p): + return np.ceil(-random_standard_exponential(bitgen) / np.log1p(-p)) + + +@register_jitable +def random_geometric(bitgen, p): + if (p >= 0.333333333333333333333333): + return random_geometric_search(bitgen, p) + else: + return random_geometric_inversion(bitgen, p) + + +if numpy_version < (2, 1): + @register_jitable + def random_zipf(bitgen, a): + am1 = a - 1.0 + b = pow(2.0, am1) + while 1: + U = 1.0 - next_double(bitgen) + V = next_double(bitgen) + X = np.floor(pow(U, -1.0 / am1)) + if (X > INT64_MAX or X < 1.0): + continue + T = pow(1.0 + 1.0 / X, am1) + if (V * X * (T - 1.0) / (b - 1.0) <= T / b): + return X +else: + @register_jitable + def random_zipf(bitgen, a): + am1 = a - 1.0 + b = pow(2.0, am1) + Umin = pow(INT64_MAX, -am1) + while 1: + U01 = next_double(bitgen) + U = U01 * Umin + (1 - U01) + V = next_double(bitgen) + X = np.floor(pow(U, -1.0 / am1)) + if (X > INT64_MAX or X < 1.0): + continue + + T = pow(1.0 + 1.0 / X, am1) + if (V * X * (T - 1.0) / (b - 1.0) <= T / b): + return X + + +@register_jitable +def random_triangular(bitgen, left, mode, + right): + base = right - left + leftbase = mode - left + ratio = leftbase / base + leftprod = leftbase * base + rightprod = (right - mode) * base + + U = next_double(bitgen) + if (U <= ratio): + return left + np.sqrt(U * leftprod) + else: + return right - np.sqrt((1.0 - U) * rightprod) + + +@register_jitable +def random_loggam(x): + a = [8.333333333333333e-02, -2.777777777777778e-03, + 7.936507936507937e-04, -5.952380952380952e-04, + 8.417508417508418e-04, -1.917526917526918e-03, + 6.410256410256410e-03, -2.955065359477124e-02, + 1.796443723688307e-01, -1.39243221690590e+00] + + if ((x == 1.0) or (x == 2.0)): + return 0.0 + elif (x < 7.0): + n = int(7 - x) + else: + n = 0 + + x0 = x + n + x2 = (1.0 / x0) * (1.0 / x0) + # /* log(2 * M_PI) */ + lg2pi = 1.8378770664093453e+00 + gl0 = a[9] + + for k in range(0, 9): + gl0 *= x2 + gl0 += a[8 - k] + + gl = gl0 / x0 + 0.5 * lg2pi + (x0 - 0.5) * np.log(x0) - x0 + if (x < 7.0): + for k in range(1, n + 1): + gl = gl - np.log(x0 - 1.0) + x0 = x0 - 1.0 + + return gl + + +@register_jitable +def random_poisson_mult(bitgen, lam): + enlam = np.exp(-lam) + X = 0 + prod = 1.0 + while (1): + U = next_double(bitgen) + prod *= U + if (prod > enlam): + X += 1 + else: + return X + + +@register_jitable +def random_poisson_ptrs(bitgen, lam): + + slam = np.sqrt(lam) + loglam = np.log(lam) + b = 0.931 + 2.53 * slam + a = -0.059 + 0.02483 * b + invalpha = 1.1239 + 1.1328 / (b - 3.4) + vr = 0.9277 - 3.6224 / (b - 2) + + while (1): + U = next_double(bitgen) - 0.5 + V = next_double(bitgen) + us = 0.5 - np.fabs(U) + k = int((2 * a / us + b) * U + lam + 0.43) + if ((us >= 0.07) and (V <= vr)): + return k + + if ((k < 0) or ((us < 0.013) and (V > us))): + continue + + # /* log(V) == log(0.0) ok here */ + # /* if U==0.0 so that us==0.0, log is ok since always returns */ + if ((np.log(V) + np.log(invalpha) - np.log(a / (us * us) + b)) <= + (-lam + k * loglam - random_loggam(k + 1))): + return k + + +@register_jitable +def random_poisson(bitgen, lam): + if (lam >= 10): + return random_poisson_ptrs(bitgen, lam) + elif (lam == 0): + return 0 + else: + return random_poisson_mult(bitgen, lam) + + +@register_jitable +def random_negative_binomial(bitgen, n, p): + Y = random_gamma(bitgen, n, (1 - p) / p) + return random_poisson(bitgen, Y) + + +@register_jitable +def random_noncentral_chisquare(bitgen, df, nonc): + if np.isnan(nonc): + return np.nan + + if nonc == 0: + return random_chisquare(bitgen, df) + + if 1 < df: + Chi2 = random_chisquare(bitgen, df - 1) + n = random_standard_normal(bitgen) + np.sqrt(nonc) + return Chi2 + n * n + else: + i = random_poisson(bitgen, nonc / 2.0) + return random_chisquare(bitgen, df + 2 * i) + + +@register_jitable +def random_noncentral_f(bitgen, dfnum, dfden, nonc): + t = random_noncentral_chisquare(bitgen, dfnum, nonc) * dfden + return t / (random_chisquare(bitgen, dfden) * dfnum) + + +@register_jitable +def random_logseries(bitgen, p): + r = np_log1p(-p) + + while 1: + V = next_double(bitgen) + if (V >= p): + return 1 + U = next_double(bitgen) + q = -np.expm1(r * U) + if (V <= q * q): + result = int64(np.floor(1 + np.log(V) / np.log(q))) + if result < 1 or V == 0.0: + continue + else: + return result + if (V >= q): + return 1 + else: + return 2 + + +@register_jitable +def random_binomial_btpe(bitgen, n, p): + r = min(p, 1.0 - p) + q = 1.0 - r + fm = n * r + r + m = int(np.floor(fm)) + p1 = np.floor(2.195 * np.sqrt(n * r * q) - 4.6 * q) + 0.5 + xm = m + 0.5 + xl = xm - p1 + xr = xm + p1 + c = 0.134 + 20.5 / (15.3 + m) + a = (fm - xl) / (fm - xl * r) + laml = a * (1.0 + a / 2.0) + a = (xr - fm) / (xr * q) + lamr = a * (1.0 + a / 2.0) + p2 = p1 * (1.0 + 2.0 * c) + p3 = p2 + c / laml + p4 = p3 + c / lamr + + case = 10 + y = k = 0 + while 1: + if case == 10: + nrq = n * r * q + u = next_double(bitgen) * p4 + v = next_double(bitgen) + if (u > p1): + case = 20 + continue + y = int(np.floor(xm - p1 * v + u)) + case = 60 + continue + elif case == 20: + if (u > p2): + case = 30 + continue + x = xl + (u - p1) / c + v = v * c + 1.0 - np.fabs(m - x + 0.5) / p1 + if (v > 1.0): + case = 10 + continue + y = int(np.floor(x)) + case = 50 + continue + elif case == 30: + if (u > p3): + case = 40 + continue + y = int(np.floor(xl + np.log(v) / laml)) + if ((y < 0) or (v == 0.0)): + case = 10 + continue + v = v * (u - p2) * laml + case = 50 + continue + elif case == 40: + y = int(np.floor(xr - np.log(v) / lamr)) + if ((y > n) or (v == 0.0)): + case = 10 + continue + v = v * (u - p3) * lamr + case = 50 + continue + elif case == 50: + k = abs(y - m) + if ((k > 20) and (k < ((nrq) / 2.0 - 1))): + case = 52 + continue + s = r / q + a = s * (n + 1) + F = 1.0 + if (m < y): + for i in range(m + 1, y + 1): + F = F * (a / i - s) + elif (m > y): + for i in range(y + 1, m + 1): + F = F / (a / i - s) + if (v > F): + case = 10 + continue + case = 60 + continue + elif case == 52: + rho = (k / (nrq)) * \ + ((k * (k / 3.0 + 0.625) + 0.16666666666666666) / + nrq + 0.5) + t = -k * k / (2 * nrq) + A = np.log(v) + if (A < (t - rho)): + case = 60 + continue + if (A > (t + rho)): + case = 10 + continue + x1 = y + 1 + f1 = m + 1 + z = n + 1 - m + w = n - y + 1 + x2 = x1 * x1 + f2 = f1 * f1 + z2 = z * z + w2 = w * w + if (A > (xm * np.log(f1 / x1) + (n - m + 0.5) * np.log(z / w) + + (y - m) * np.log(w * r / (x1 * q)) + + (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) + / f2) / f1 / 166320. + + (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) + / z2) / z / 166320. + + (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) + / x2) / x1 / 166320. + + (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) + / w2) / w / 66320.)): + case = 10 + continue + case = 60 + continue + elif case == 60: + if (p > 0.5): + y = n - y + return y + + +@register_jitable +def random_binomial_inversion(bitgen, n, p): + q = 1.0 - p + qn = np.exp(n * np.log(q)) + _np = n * p + bound = min(n, _np + 10.0 * np.sqrt(_np * q + 1)) + + X = 0 + px = qn + U = next_double(bitgen) + while (U > px): + X = X + 1 + if (X > bound): + X = 0 + px = qn + U = next_double(bitgen) + else: + U -= px + px = ((n - X + 1) * p * px) / (X * q) + + return X + + +@register_jitable +def random_binomial(bitgen, n, p): + if ((n == 0) or (p == 0.0)): + return 0 + + if (p <= 0.5): + if (p * n <= 30.0): + return random_binomial_inversion(bitgen, n, p) + else: + return random_binomial_btpe(bitgen, n, p) + else: + q = 1.0 - p + if (q * n <= 30.0): + return n - random_binomial_inversion(bitgen, n, q) + else: + return n - random_binomial_btpe(bitgen, n, q) diff --git a/venv/lib/python3.10/site-packages/numba/np/random/old_random_methods.py b/venv/lib/python3.10/site-packages/numba/np/random/old_random_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..ff9e9729e82e9653541b11e02898f376838b6893 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/old_random_methods.py @@ -0,0 +1,365 @@ +import numpy as np + +from numba import uint64, uint32, uint16, uint8 +from numba.core.extending import register_jitable + +from numba.np.random._constants import (UINT32_MAX, UINT64_MAX, + UINT16_MAX, UINT8_MAX) +from numba.np.random.generator_core import next_uint32, next_uint64 + +# All following implementations are direct translations from: +# https://github.com/numpy/numpy/blob/7cfef93c77599bd387ecc6a15d186c5a46024dac/numpy/random/src/distributions/distributions.c + + +@register_jitable +def gen_mask(max): + mask = uint64(max) + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + + +@register_jitable +def buffered_bounded_bool(bitgen, off, rng, bcnt, buf): + if (rng == 0): + return off, bcnt, buf + if not bcnt: + buf = next_uint32(bitgen) + bcnt = 31 + else: + buf >>= 1 + bcnt -= 1 + + return ((buf & 1) != 0), bcnt, buf + + +@register_jitable +def buffered_uint8(bitgen, bcnt, buf): + if not bcnt: + buf = next_uint32(bitgen) + bcnt = 3 + else: + buf >>= 8 + bcnt -= 1 + + return uint8(buf), bcnt, buf + + +@register_jitable +def buffered_uint16(bitgen, bcnt, buf): + if not bcnt: + buf = next_uint32(bitgen) + bcnt = 1 + else: + buf >>= 16 + bcnt -= 1 + + return uint16(buf), bcnt, buf + + +# The following implementations use Lemire's algorithm: +# https://arxiv.org/abs/1805.10941 +@register_jitable +def buffered_bounded_lemire_uint8(bitgen, rng, bcnt, buf): + """ + Generates a random unsigned 8 bit integer bounded + within a given interval using Lemire's rejection. + + The buffer acts as storage for a 32 bit integer + drawn from the associated BitGenerator so that + multiple integers of smaller bitsize can be generated + from a single draw of the BitGenerator. + """ + # Note: `rng` should not be 0xFF. When this happens `rng_excl` becomes + # zero. + rng_excl = uint8(rng) + uint8(1) + + assert (rng != 0xFF) + + # Generate a scaled random number. + n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf) + m = uint16(n * rng_excl) + + # Rejection sampling to remove any bias + leftover = m & 0xFF + + if (leftover < rng_excl): + # `rng_excl` is a simple upper bound for `threshold`. + threshold = ((uint8(UINT8_MAX) - rng) % rng_excl) + + while (leftover < threshold): + n, bcnt, buf = buffered_uint8(bitgen, bcnt, buf) + m = uint16(n * rng_excl) + leftover = m & 0xFF + + return m >> 8, bcnt, buf + + +@register_jitable +def buffered_bounded_lemire_uint16(bitgen, rng, bcnt, buf): + """ + Generates a random unsigned 16 bit integer bounded + within a given interval using Lemire's rejection. + + The buffer acts as storage for a 32 bit integer + drawn from the associated BitGenerator so that + multiple integers of smaller bitsize can be generated + from a single draw of the BitGenerator. + """ + # Note: `rng` should not be 0xFFFF. When this happens `rng_excl` becomes + # zero. + rng_excl = uint16(rng) + uint16(1) + + assert (rng != 0xFFFF) + + # Generate a scaled random number. + n, bcnt, buf = buffered_uint16(bitgen, bcnt, buf) + m = uint32(n * rng_excl) + + # Rejection sampling to remove any bias + leftover = m & 0xFFFF + + if (leftover < rng_excl): + # `rng_excl` is a simple upper bound for `threshold`. + threshold = ((uint16(UINT16_MAX) - rng) % rng_excl) + + while (leftover < threshold): + n, bcnt, buf = buffered_uint16(bitgen, bcnt, buf) + m = uint32(n * rng_excl) + leftover = m & 0xFFFF + + return m >> 16, bcnt, buf + + +@register_jitable +def buffered_bounded_lemire_uint32(bitgen, rng): + """ + Generates a random unsigned 32 bit integer bounded + within a given interval using Lemire's rejection. + """ + rng_excl = uint32(rng) + uint32(1) + + assert (rng != 0xFFFFFFFF) + + # Generate a scaled random number. + m = uint64(next_uint32(bitgen)) * uint64(rng_excl) + + # Rejection sampling to remove any bias + leftover = m & 0xFFFFFFFF + + if (leftover < rng_excl): + # `rng_excl` is a simple upper bound for `threshold`. + threshold = (UINT32_MAX - rng) % rng_excl + + while (leftover < threshold): + m = uint64(next_uint32(bitgen)) * uint64(rng_excl) + leftover = m & 0xFFFFFFFF + + return (m >> 32) + + +@register_jitable +def bounded_lemire_uint64(bitgen, rng): + """ + Generates a random unsigned 64 bit integer bounded + within a given interval using Lemire's rejection. + """ + rng_excl = uint64(rng) + uint64(1) + + assert (rng != 0xFFFFFFFFFFFFFFFF) + + x = next_uint64(bitgen) + + leftover = uint64(x) * uint64(rng_excl) + + if (leftover < rng_excl): + threshold = (UINT64_MAX - rng) % rng_excl + + while (leftover < threshold): + x = next_uint64(bitgen) + leftover = uint64(x) * uint64(rng_excl) + + x0 = x & uint64(0xFFFFFFFF) + x1 = x >> 32 + rng_excl0 = rng_excl & uint64(0xFFFFFFFF) + rng_excl1 = rng_excl >> 32 + w0 = x0 * rng_excl0 + t = x1 * rng_excl0 + (w0 >> 32) + w1 = t & uint64(0xFFFFFFFF) + w2 = t >> 32 + w1 += x0 * rng_excl1 + m1 = x1 * rng_excl1 + w2 + (w1 >> 32) + + return m1 + + +@register_jitable +def random_bounded_uint64_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 64 bit integers + bounded by given interval. + """ + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng <= 0xFFFFFFFF: + if (rng == 0xFFFFFFFF): + for i in np.ndindex(size): + out[i] = low + next_uint32(bitgen) + else: + for i in np.ndindex(size): + out[i] = low + buffered_bounded_lemire_uint32(bitgen, rng) + + elif (rng == 0xFFFFFFFFFFFFFFFF): + for i in np.ndindex(size): + out[i] = low + next_uint64(bitgen) + else: + for i in np.ndindex(size): + out[i] = low + bounded_lemire_uint64(bitgen, rng) + + return out + + +@register_jitable +def random_bounded_uint32_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 32 bit integers + bounded by given interval. + """ + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng == 0xFFFFFFFF: + # Lemire32 doesn't support rng = 0xFFFFFFFF. + for i in np.ndindex(size): + out[i] = low + next_uint32(bitgen) + else: + for i in np.ndindex(size): + out[i] = low + buffered_bounded_lemire_uint32(bitgen, rng) + return out + + +@register_jitable +def random_bounded_uint16_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 16 bit integers + bounded by given interval. + """ + buf = 0 + bcnt = 0 + + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng == 0xFFFF: + # Lemire16 doesn't support rng = 0xFFFF. + for i in np.ndindex(size): + val, bcnt, buf = buffered_uint16(bitgen, bcnt, buf) + out[i] = low + val + + else: + for i in np.ndindex(size): + val, bcnt, buf = \ + buffered_bounded_lemire_uint16(bitgen, rng, + bcnt, buf) + out[i] = low + val + return out + + +@register_jitable +def random_bounded_uint8_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with 8 bit integers + bounded by given interval. + """ + buf = 0 + bcnt = 0 + + out = np.empty(size, dtype=dtype) + if rng == 0: + for i in np.ndindex(size): + out[i] = low + elif rng == 0xFF: + # Lemire8 doesn't support rng = 0xFF. + for i in np.ndindex(size): + val, bcnt, buf = buffered_uint8(bitgen, bcnt, buf) + out[i] = low + val + else: + for i in np.ndindex(size): + val, bcnt, buf = \ + buffered_bounded_lemire_uint8(bitgen, rng, + bcnt, buf) + out[i] = low + val + return out + + +@register_jitable +def random_bounded_bool_fill(bitgen, low, rng, size, dtype): + """ + Returns a new array of given size with boolean values. + """ + buf = 0 + bcnt = 0 + out = np.empty(size, dtype=dtype) + for i in np.ndindex(size): + val, bcnt, buf = buffered_bounded_bool(bitgen, low, rng, bcnt, buf) + out[i] = low + val + return out + + +@register_jitable +def _randint_arg_check(low, high, endpoint, lower_bound, upper_bound): + """ + Check that low and high are within the bounds + for the given datatype. + """ + + if low < lower_bound: + raise ValueError("low is out of bounds") + + # This is being done to avoid high being accidentally + # casted to int64/32 while subtracting 1 before + # checking bounds, avoids overflow. + if high > 0: + high = uint64(high) + if not endpoint: + high -= uint64(1) + upper_bound = uint64(upper_bound) + if low > 0: + low = uint64(low) + if high > upper_bound: + raise ValueError("high is out of bounds") + if low > high: # -1 already subtracted, closed interval + raise ValueError("low is greater than high in given interval") + else: + if high > upper_bound: + raise ValueError("high is out of bounds") + if low > high: # -1 already subtracted, closed interval + raise ValueError("low is greater than high in given interval") + + +@register_jitable +def random_interval(bitgen, max_val): + if (max_val == 0): + return 0 + + max_val = uint64(max_val) + mask = uint64(gen_mask(max_val)) + + if (max_val <= 0xffffffff): + value = uint64(next_uint32(bitgen)) & mask + while value > max_val: + value = uint64(next_uint32(bitgen)) & mask + else: + value = next_uint64(bitgen) & mask + while value > max_val: + value = next_uint64(bitgen) & mask + + return uint64(value) diff --git a/venv/lib/python3.10/site-packages/numba/np/random/random_methods.py b/venv/lib/python3.10/site-packages/numba/np/random/random_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..608d3c82753fdcf54f70be5b97e0c37b08f72459 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/random/random_methods.py @@ -0,0 +1,12 @@ +import sys +from numba.core.utils import _RedirectSubpackage +from numba.core import config + +if config.USE_LEGACY_TYPE_SYSTEM: + sys.modules[__name__] = \ + _RedirectSubpackage(locals(), + "numba.np.random.old_random_methods") +else: + sys.modules[__name__] = \ + _RedirectSubpackage(locals(), + "numba.np.random.new_random_methods") diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__init__.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14383e17fc7b7123b0b836710ebca1495dbb7b89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- + +from numba.np.ufunc.decorators import Vectorize, GUVectorize, vectorize, guvectorize +from numba.np.ufunc._internal import PyUFunc_None, PyUFunc_Zero, PyUFunc_One +from numba.np.ufunc import _internal, array_exprs +from numba.np.ufunc.parallel import (threading_layer, get_num_threads, + set_num_threads, get_thread_id, + set_parallel_chunksize, + get_parallel_chunksize) + + +if hasattr(_internal, 'PyUFunc_ReorderableNone'): + PyUFunc_ReorderableNone = _internal.PyUFunc_ReorderableNone +del _internal, array_exprs + + +def _init(): + + def init_cuda_vectorize(): + from numba.cuda.vectorizers import CUDAVectorize + return CUDAVectorize + + def init_cuda_guvectorize(): + from numba.cuda.vectorizers import CUDAGUFuncVectorize + return CUDAGUFuncVectorize + + Vectorize.target_registry.ondemand['cuda'] = init_cuda_vectorize + GUVectorize.target_registry.ondemand['cuda'] = init_cuda_guvectorize + + +_init() +del _init diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb3e56315753c56a94a5aa10ed92c8971bd4bc37 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/array_exprs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/array_exprs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f748ca76e90cd95c9282c81995c7742b930fbfa6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/array_exprs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/decorators.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..042d52af28c27e4b88296c617562edae4872b28a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/decorators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/dufunc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/dufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10910aff551539b855bceefe87bd2acf9c827e19 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/dufunc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/gufunc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/gufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5923bbad7a58ac4292bebfae75134dc57031c2ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/gufunc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..672129ce74e8c45b2e4188dae31bca2b05b2c9f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/sigparse.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/sigparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45d700421ae10ab15ef18ca9f5595f58010494a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/sigparse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/ufunc_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/ufunc_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..181f07c6d06b3f7660d6cb4400d1ad5458df2699 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/ufunc_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/ufuncbuilder.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/ufuncbuilder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dcae44609291e93f10e7d768711ad22204b3386 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/ufuncbuilder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/wrappers.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28472f8f744d720376dddb224586264cd3282716 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/__pycache__/wrappers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/_internal.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/numba/np/ufunc/_internal.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..de0f96f415e20d9c4a5d58587633255661615fa4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/_internal.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/_num_threads.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/numba/np/ufunc/_num_threads.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8f87256023546b4f1d64762c6addda4268c580c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/ufunc/_num_threads.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/array_exprs.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/array_exprs.py new file mode 100644 index 0000000000000000000000000000000000000000..91fceaacf40fa67c17c790e6f7fb27c428bcef13 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/array_exprs.py @@ -0,0 +1,428 @@ +import ast +from collections import defaultdict, OrderedDict +import contextlib +import sys +from types import SimpleNamespace + +import numpy as np +import operator + +from numba.core import types, targetconfig, ir, rewrites, compiler +from numba.core.typing import npydecl +from numba.np.ufunc.dufunc import DUFunc + + +def _is_ufunc(func): + return isinstance(func, (np.ufunc, DUFunc)) + + +@rewrites.register_rewrite('after-inference') +class RewriteArrayExprs(rewrites.Rewrite): + '''The RewriteArrayExprs class is responsible for finding array + expressions in Numba intermediate representation code, and + rewriting those expressions to a single operation that will expand + into something similar to a ufunc call. + ''' + def __init__(self, state, *args, **kws): + super(RewriteArrayExprs, self).__init__(state, *args, **kws) + # Install a lowering hook if we are using this rewrite. + special_ops = state.targetctx.special_ops + if 'arrayexpr' not in special_ops: + special_ops['arrayexpr'] = _lower_array_expr + + def match(self, func_ir, block, typemap, calltypes): + """ + Using typing and a basic block, search the basic block for array + expressions. + Return True when one or more matches were found, False otherwise. + """ + # We can trivially reject everything if there are no + # calls in the type results. + if len(calltypes) == 0: + return False + + self.crnt_block = block + self.typemap = typemap + # { variable name: IR assignment (of a function call or operator) } + self.array_assigns = OrderedDict() + # { variable name: IR assignment (of a constant) } + self.const_assigns = {} + + assignments = block.find_insts(ir.Assign) + for instr in assignments: + target_name = instr.target.name + expr = instr.value + # Does it assign an expression to an array variable? + if (isinstance(expr, ir.Expr) and + isinstance(typemap.get(target_name, None), types.Array)): + self._match_array_expr(instr, expr, target_name) + elif isinstance(expr, ir.Const): + # Track constants since we might need them for an + # array expression. + self.const_assigns[target_name] = expr + + return len(self.array_assigns) > 0 + + def _match_array_expr(self, instr, expr, target_name): + """ + Find whether the given assignment (*instr*) of an expression (*expr*) + to variable *target_name* is an array expression. + """ + # We've matched a subexpression assignment to an + # array variable. Now see if the expression is an + # array expression. + expr_op = expr.op + array_assigns = self.array_assigns + + if ((expr_op in ('unary', 'binop')) and ( + expr.fn in npydecl.supported_array_operators)): + # It is an array operator that maps to a ufunc. + # check that all args have internal types + if all(self.typemap[var.name].is_internal + for var in expr.list_vars()): + array_assigns[target_name] = instr + + elif ((expr_op == 'call') and (expr.func.name in self.typemap)): + # It could be a match for a known ufunc call. + func_type = self.typemap[expr.func.name] + if isinstance(func_type, types.Function): + func_key = func_type.typing_key + if _is_ufunc(func_key): + # If so, check whether an explicit output is passed. + if not self._has_explicit_output(expr, func_key): + # If not, match it as a (sub)expression. + array_assigns[target_name] = instr + + def _has_explicit_output(self, expr, func): + """ + Return whether the *expr* call to *func* (a ufunc) features an + explicit output argument. + """ + nargs = len(expr.args) + len(expr.kws) + if expr.vararg is not None: + # XXX *args unsupported here, assume there may be an explicit + # output + return True + return nargs > func.nin + + def _get_array_operator(self, ir_expr): + ir_op = ir_expr.op + if ir_op in ('unary', 'binop'): + return ir_expr.fn + elif ir_op == 'call': + return self.typemap[ir_expr.func.name].typing_key + raise NotImplementedError( + "Don't know how to find the operator for '{0}' expressions.".format( + ir_op)) + + def _get_operands(self, ir_expr): + '''Given a Numba IR expression, return the operands to the expression + in order they appear in the expression. + ''' + ir_op = ir_expr.op + if ir_op == 'binop': + return ir_expr.lhs, ir_expr.rhs + elif ir_op == 'unary': + return ir_expr.list_vars() + elif ir_op == 'call': + return ir_expr.args + raise NotImplementedError( + "Don't know how to find the operands for '{0}' expressions.".format( + ir_op)) + + def _translate_expr(self, ir_expr): + '''Translate the given expression from Numba IR to an array expression + tree. + ''' + ir_op = ir_expr.op + if ir_op == 'arrayexpr': + return ir_expr.expr + operands_or_args = [self.const_assigns.get(op_var.name, op_var) + for op_var in self._get_operands(ir_expr)] + return self._get_array_operator(ir_expr), operands_or_args + + def _handle_matches(self): + '''Iterate over the matches, trying to find which instructions should + be rewritten, deleted, or moved. + ''' + replace_map = {} + dead_vars = set() + used_vars = defaultdict(int) + for instr in self.array_assigns.values(): + expr = instr.value + arr_inps = [] + arr_expr = self._get_array_operator(expr), arr_inps + new_expr = ir.Expr(op='arrayexpr', + loc=expr.loc, + expr=arr_expr, + ty=self.typemap[instr.target.name]) + new_instr = ir.Assign(new_expr, instr.target, instr.loc) + replace_map[instr] = new_instr + self.array_assigns[instr.target.name] = new_instr + for operand in self._get_operands(expr): + operand_name = operand.name + if operand.is_temp and operand_name in self.array_assigns: + child_assign = self.array_assigns[operand_name] + child_expr = child_assign.value + child_operands = child_expr.list_vars() + for operand in child_operands: + used_vars[operand.name] += 1 + arr_inps.append(self._translate_expr(child_expr)) + if child_assign.target.is_temp: + dead_vars.add(child_assign.target.name) + replace_map[child_assign] = None + elif operand_name in self.const_assigns: + arr_inps.append(self.const_assigns[operand_name]) + else: + used_vars[operand.name] += 1 + arr_inps.append(operand) + return replace_map, dead_vars, used_vars + + def _get_final_replacement(self, replacement_map, instr): + '''Find the final replacement instruction for a given initial + instruction by chasing instructions in a map from instructions + to replacement instructions. + ''' + replacement = replacement_map[instr] + while replacement in replacement_map: + replacement = replacement_map[replacement] + return replacement + + def apply(self): + '''When we've found array expressions in a basic block, rewrite that + block, returning a new, transformed block. + ''' + # Part 1: Figure out what instructions should be rewritten + # based on the matches found. + replace_map, dead_vars, used_vars = self._handle_matches() + # Part 2: Using the information above, rewrite the target + # basic block. + result = self.crnt_block.copy() + result.clear() + delete_map = {} + for instr in self.crnt_block.body: + if isinstance(instr, ir.Assign): + if instr in replace_map: + replacement = self._get_final_replacement( + replace_map, instr) + if replacement: + result.append(replacement) + for var in replacement.value.list_vars(): + var_name = var.name + if var_name in delete_map: + result.append(delete_map.pop(var_name)) + if used_vars[var_name] > 0: + used_vars[var_name] -= 1 + + else: + result.append(instr) + elif isinstance(instr, ir.Del): + instr_value = instr.value + if used_vars[instr_value] > 0: + used_vars[instr_value] -= 1 + delete_map[instr_value] = instr + elif instr_value not in dead_vars: + result.append(instr) + else: + result.append(instr) + if delete_map: + for instr in delete_map.values(): + result.insert_before_terminator(instr) + return result + + +_unaryops = { + operator.pos: ast.UAdd, + operator.neg: ast.USub, + operator.invert: ast.Invert, +} + +_binops = { + operator.add: ast.Add, + operator.sub: ast.Sub, + operator.mul: ast.Mult, + operator.truediv: ast.Div, + operator.mod: ast.Mod, + operator.or_: ast.BitOr, + operator.rshift: ast.RShift, + operator.xor: ast.BitXor, + operator.lshift: ast.LShift, + operator.and_: ast.BitAnd, + operator.pow: ast.Pow, + operator.floordiv: ast.FloorDiv, +} + + +_cmpops = { + operator.eq: ast.Eq, + operator.ne: ast.NotEq, + operator.lt: ast.Lt, + operator.le: ast.LtE, + operator.gt: ast.Gt, + operator.ge: ast.GtE, +} + + +def _arr_expr_to_ast(expr): + '''Build a Python expression AST from an array expression built by + RewriteArrayExprs. + ''' + if isinstance(expr, tuple): + op, arr_expr_args = expr + ast_args = [] + env = {} + for arg in arr_expr_args: + ast_arg, child_env = _arr_expr_to_ast(arg) + ast_args.append(ast_arg) + env.update(child_env) + if op in npydecl.supported_array_operators: + if len(ast_args) == 2: + if op in _binops: + return ast.BinOp( + ast_args[0], _binops[op](), ast_args[1]), env + if op in _cmpops: + return ast.Compare( + ast_args[0], [_cmpops[op]()], [ast_args[1]]), env + else: + assert op in _unaryops + return ast.UnaryOp(_unaryops[op](), ast_args[0]), env + elif _is_ufunc(op): + fn_name = "__ufunc_or_dufunc_{0}".format( + hex(hash(op)).replace("-", "_")) + fn_ast_name = ast.Name(fn_name, ast.Load()) + env[fn_name] = op # Stash the ufunc or DUFunc in the environment + ast_call = ast.Call(fn_ast_name, ast_args, []) + return ast_call, env + elif isinstance(expr, ir.Var): + return ast.Name(expr.name, ast.Load(), + lineno=expr.loc.line, + col_offset=expr.loc.col if expr.loc.col else 0), {} + elif isinstance(expr, ir.Const): + return ast.Constant(expr.value), {} + raise NotImplementedError( + "Don't know how to translate array expression '%r'" % (expr,)) + + +@contextlib.contextmanager +def _legalize_parameter_names(var_list): + """ + Legalize names in the variable list for use as a Python function's + parameter names. + """ + var_map = OrderedDict() + for var in var_list: + old_name = var.name + new_name = var.scope.redefine(old_name, loc=var.loc).name + new_name = new_name.replace("$", "_").replace(".", "_") + # Caller should ensure the names are unique + if new_name in var_map: + raise AssertionError(f"{new_name!r} not unique") + var_map[new_name] = var, old_name + var.name = new_name + param_names = list(var_map) + try: + yield param_names + finally: + # Make sure the old names are restored, to avoid confusing + # other parts of Numba (see issue #1466) + for var, old_name in var_map.values(): + var.name = old_name + + +class _EraseInvalidLineRanges(ast.NodeTransformer): + def generic_visit(self, node: ast.AST) -> ast.AST: + node = super().generic_visit(node) + if hasattr(node, "lineno"): + if getattr(node, "end_lineno", None) is not None: + if node.lineno > node.end_lineno: + del node.lineno + del node.end_lineno + return node + + +def _fix_invalid_lineno_ranges(astree: ast.AST): + """Inplace fixes invalid lineno ranges. + """ + # Make sure lineno and end_lineno are present + ast.fix_missing_locations(astree) + # Delete invalid lineno ranges + _EraseInvalidLineRanges().visit(astree) + # Make sure lineno and end_lineno are present + ast.fix_missing_locations(astree) + + +def _lower_array_expr(lowerer, expr): + '''Lower an array expression built by RewriteArrayExprs. + ''' + expr_name = "__numba_array_expr_%s" % (hex(hash(expr)).replace("-", "_")) + expr_filename = expr.loc.filename + expr_var_list = expr.list_vars() + # The expression may use a given variable several times, but we + # should only create one parameter for it. + expr_var_unique = sorted(set(expr_var_list), key=lambda var: var.name) + + # Arguments are the names external to the new closure + expr_args = [var.name for var in expr_var_unique] + + # 1. Create an AST tree from the array expression. + with _legalize_parameter_names(expr_var_unique) as expr_params: + ast_args = [ast.arg(param_name, None) + for param_name in expr_params] + # Parse a stub function to ensure the AST is populated with + # reasonable defaults for the Python version. + ast_module = ast.parse('def {0}(): return'.format(expr_name), + expr_filename, 'exec') + assert hasattr(ast_module, 'body') and len(ast_module.body) == 1 + ast_fn = ast_module.body[0] + ast_fn.args.args = ast_args + ast_fn.body[0].value, namespace = _arr_expr_to_ast(expr.expr) + _fix_invalid_lineno_ranges(ast_module) + + # 2. Compile the AST module and extract the Python function. + code_obj = compile(ast_module, expr_filename, 'exec') + exec(code_obj, namespace) + impl = namespace[expr_name] + + # 3. Now compile a ufunc using the Python function as kernel. + + context = lowerer.context + builder = lowerer.builder + outer_sig = expr.ty(*(lowerer.typeof(name) for name in expr_args)) + inner_sig_args = [] + for argty in outer_sig.args: + if isinstance(argty, types.Optional): + argty = argty.type + if isinstance(argty, types.Array): + inner_sig_args.append(argty.dtype) + else: + inner_sig_args.append(argty) + inner_sig = outer_sig.return_type.dtype(*inner_sig_args) + + flags = targetconfig.ConfigStack().top_or_none() + flags = compiler.Flags() if flags is None else flags.copy() # make sure it's a clone or a fresh instance + # Follow the Numpy error model. Note this also allows e.g. vectorizing + # division (issue #1223). + flags.error_model = 'numpy' + cres = context.compile_subroutine(builder, impl, inner_sig, flags=flags, + caching=False) + + # Create kernel subclass calling our native function + from numba.np import npyimpl + + class ExprKernel(npyimpl._Kernel): + def generate(self, *args): + arg_zip = zip(args, self.outer_sig.args, inner_sig.args) + cast_args = [self.cast(val, inty, outty) + for val, inty, outty in arg_zip] + result = self.context.call_internal( + builder, cres.fndesc, inner_sig, cast_args) + return self.cast(result, inner_sig.return_type, + self.outer_sig.return_type) + + # create a fake ufunc object which is enough to trick numpy_ufunc_kernel + ufunc = SimpleNamespace(nin=len(expr_args), nout=1, __name__=expr_name) + ufunc.nargs = ufunc.nin + ufunc.nout + + args = [lowerer.loadvar(name) for name in expr_args] + return npyimpl.numpy_ufunc_kernel( + context, builder, outer_sig, args, ufunc, ExprKernel) diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/decorators.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..bbbcff9d27c566a803924d3346ac9b66015af24e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/decorators.py @@ -0,0 +1,208 @@ +import inspect + +from numba.np.ufunc import _internal +from numba.np.ufunc.parallel import ParallelUFuncBuilder, ParallelGUFuncBuilder + +from numba.core.registry import DelayedRegistry +from numba.np.ufunc import dufunc +from numba.np.ufunc import gufunc + + +class _BaseVectorize(object): + + @classmethod + def get_identity(cls, kwargs): + return kwargs.pop('identity', None) + + @classmethod + def get_cache(cls, kwargs): + return kwargs.pop('cache', False) + + @classmethod + def get_writable_args(cls, kwargs): + return kwargs.pop('writable_args', ()) + + @classmethod + def get_target_implementation(cls, kwargs): + target = kwargs.pop('target', 'cpu') + try: + return cls.target_registry[target] + except KeyError: + raise ValueError("Unsupported target: %s" % target) + + +class Vectorize(_BaseVectorize): + target_registry = DelayedRegistry({'cpu': dufunc.DUFunc, + 'parallel': ParallelUFuncBuilder,}) + + def __new__(cls, func, **kws): + identity = cls.get_identity(kws) + cache = cls.get_cache(kws) + imp = cls.get_target_implementation(kws) + return imp(func, identity=identity, cache=cache, targetoptions=kws) + + +class GUVectorize(_BaseVectorize): + target_registry = DelayedRegistry({'cpu': gufunc.GUFunc, + 'parallel': ParallelGUFuncBuilder,}) + + def __new__(cls, func, signature, **kws): + identity = cls.get_identity(kws) + cache = cls.get_cache(kws) + imp = cls.get_target_implementation(kws) + writable_args = cls.get_writable_args(kws) + if imp is gufunc.GUFunc: + is_dyn = kws.pop('is_dynamic', False) + return imp(func, signature, identity=identity, cache=cache, + is_dynamic=is_dyn, targetoptions=kws, + writable_args=writable_args) + else: + return imp(func, signature, identity=identity, cache=cache, + targetoptions=kws, writable_args=writable_args) + + +def vectorize(ftylist_or_function=(), **kws): + """vectorize(ftylist_or_function=(), target='cpu', identity=None, **kws) + + A decorator that creates a NumPy ufunc object using Numba compiled + code. When no arguments or only keyword arguments are given, + vectorize will return a Numba dynamic ufunc (DUFunc) object, where + compilation/specialization may occur at call-time. + + Args + ----- + ftylist_or_function: function or iterable + + When the first argument is a function, signatures are dealt + with at call-time. + + When the first argument is an iterable of type signatures, + which are either function type object or a string describing + the function type, signatures are finalized at decoration + time. + + Keyword Args + ------------ + + target: str + A string for code generation target. Default to "cpu". + + identity: int, str, or None + The identity (or unit) value for the element-wise function + being implemented. Allowed values are None (the default), 0, 1, + and "reorderable". + + cache: bool + Turns on caching. + + + Returns + -------- + + A NumPy universal function + + Examples + ------- + @vectorize(['float32(float32, float32)', + 'float64(float64, float64)'], identity=0) + def sum(a, b): + return a + b + + @vectorize + def sum(a, b): + return a + b + + @vectorize(identity=1) + def mul(a, b): + return a * b + + """ + if isinstance(ftylist_or_function, str): + # Common user mistake + ftylist = [ftylist_or_function] + elif inspect.isfunction(ftylist_or_function): + return dufunc.DUFunc(ftylist_or_function, **kws) + elif ftylist_or_function is not None: + ftylist = ftylist_or_function + + def wrap(func): + vec = Vectorize(func, **kws) + for sig in ftylist: + vec.add(sig) + if len(ftylist) > 0: + vec.disable_compile() + return vec.build_ufunc() + + return wrap + + +def guvectorize(*args, **kwargs): + """guvectorize(ftylist, signature, target='cpu', identity=None, **kws) + + A decorator to create NumPy generalized-ufunc object from Numba compiled + code. + + Args + ----- + ftylist: iterable + An iterable of type signatures, which are either + function type object or a string describing the + function type. + + signature: str + A NumPy generalized-ufunc signature. + e.g. "(m, n), (n, p)->(m, p)" + + identity: int, str, or None + The identity (or unit) value for the element-wise function + being implemented. Allowed values are None (the default), 0, 1, + and "reorderable". + + cache: bool + Turns on caching. + + writable_args: tuple + a tuple of indices of input variables that are writable. + + target: str + A string for code generation target. Defaults to "cpu". + + Returns + -------- + + A NumPy generalized universal-function + + Example + ------- + @guvectorize(['void(int32[:,:], int32[:,:], int32[:,:])', + 'void(float32[:,:], float32[:,:], float32[:,:])'], + '(x, y),(x, y)->(x, y)') + def add_2d_array(a, b, c): + for i in range(c.shape[0]): + for j in range(c.shape[1]): + c[i, j] = a[i, j] + b[i, j] + + """ + if len(args) == 1: + ftylist = [] + signature = args[0] + kwargs.setdefault('is_dynamic', True) + elif len(args) == 2: + ftylist = args[0] + signature = args[1] + else: + raise TypeError('guvectorize() takes one or two positional arguments') + + if isinstance(ftylist, str): + # Common user mistake + ftylist = [ftylist] + + def wrap(func): + guvec = GUVectorize(func, signature, **kwargs) + for fty in ftylist: + guvec.add(fty) + if len(ftylist) > 0: + guvec.disable_compile() + return guvec.build_ufunc() + + return wrap diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/dufunc.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/dufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..280dd1d1ccf67b10964cea278690549b76a96246 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/dufunc.py @@ -0,0 +1,879 @@ +import functools +import operator +import warnings + +import numpy as np + +from numba import jit, typeof +from numba.core import cgutils, types, serialize, sigutils, errors +from numba.core.extending import (is_jitted, overload_attribute, + overload_method, register_jitable, + intrinsic) +from numba.core.typing import npydecl +from numba.core.typing.templates import AbstractTemplate, signature +from numba.cpython.unsafe.tuple import tuple_setitem +from numba.np.ufunc import _internal +from numba.np.ufunc.ufunc_base import UfuncBase, UfuncLowererBase +from numba.parfors import array_analysis +from numba.np.ufunc import ufuncbuilder +from numba.np import numpy_support +from typing import Callable +from llvmlite import ir +from numba.core.compiler_lock import global_compiler_lock + + +class UfuncAtIterator: + + def __init__(self, ufunc, a, a_ty, indices, indices_ty, b=None, b_ty=None): + self.ufunc = ufunc + self.a = a + self.a_ty = a_ty + self.indices = indices + self.indices_ty = indices_ty + self.b = b + self.b_ty = b_ty + + def run(self, context, builder): + self._prepare(context, builder) + loop_indices, _ = self.indexer.begin_loops() + self._call_ufunc(context, builder, loop_indices) + self.indexer.end_loops() + + def need_advanced_indexing(self): + return isinstance(self.indices_ty, types.BaseTuple) + + def _prepare(self, context, builder): + from numba.np.arrayobj import normalize_indices, FancyIndexer + + a, indices = self.a, self.indices + a_ty, indices_ty = self.a_ty, self.indices_ty + + zero = context.get_value_type(types.intp)(0) + + if self.b is not None: + self.b_indice = cgutils.alloca_once_value(builder, zero) + + if self.need_advanced_indexing(): + indices = cgutils.unpack_tuple(builder, indices, + count=len(indices_ty)) + index_types = indices_ty.types + index_types, indices = normalize_indices(context, builder, + index_types, indices) + else: + indices = (indices,) + index_types = (indices_ty,) + index_types, indices = normalize_indices(context, builder, + index_types, indices) + + self.indexer = FancyIndexer(context, builder, a_ty, a, + index_types, indices) + self.indexer.prepare() + self.cres = self._compile_ufunc(context, builder) + + def _load_val(self, context, builder, loop_indices, array, array_ty): + from numba.np.arrayobj import load_item + shapes = cgutils.unpack_tuple(builder, array.shape) + strides = cgutils.unpack_tuple(builder, array.strides) + data = array.data + + ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides, + array_ty.layout, loop_indices) + val = load_item(context, builder, array_ty, ptr) + return ptr, val + + def _load_flat(self, context, builder, indices, array, array_ty): + idx = builder.load(indices) + sig = array_ty.dtype(array_ty, types.intp) + impl = context.get_function(operator.getitem, sig) + val = impl(builder, (array, idx)) + + # increment indices + one = context.get_value_type(types.intp)(1) + idx = builder.add(idx, one) + builder.store(idx, indices) + + return None, val + + def _store_val(self, context, builder, array, array_ty, ptr, val): + from numba.np.arrayobj import store_item + fromty = self.cres.signature.return_type + toty = array_ty.dtype + val = context.cast(builder, val, fromty, toty) + store_item(context, builder, array_ty, val, ptr) + + def _compile_ufunc(self, context, builder): + ufunc = self.ufunc.key[0] + + if self.b is None: + sig = (self.a_ty.dtype,) + else: + sig = (self.a_ty.dtype, self.b_ty.dtype) + + cres = ufunc.add(sig) + context.add_linking_libs((cres.library,)) + return cres + + def _call_ufunc(self, context, builder, loop_indices): + cres = self.cres + a, a_ty = self.a, self.a_ty + + ptr, val = self._load_val(context, builder, loop_indices, a, a_ty) + + if self.b is None: + args = (val,) + else: + b, b_ty, b_idx = self.b, self.b_ty, self.b_indice + _, val_b = self._load_flat(context, builder, b_idx, b, b_ty) + args = (val, val_b) + + res = context.call_internal(builder, cres.fndesc, cres.signature, + args) + self._store_val(context, builder, a, a_ty, ptr, res) + + +def make_dufunc_kernel(_dufunc): + from numba.np import npyimpl + + class DUFuncKernel(npyimpl._Kernel): + """ + npyimpl._Kernel subclass responsible for lowering a DUFunc kernel + (element-wise function) inside a broadcast loop (which is + generated by npyimpl.numpy_ufunc_kernel()). + """ + dufunc = _dufunc + + def __init__(self, context, builder, outer_sig): + super().__init__(context, builder, outer_sig) + self.inner_sig, self.cres = self.dufunc.find_ewise_function( + outer_sig.args) + + DUFuncKernel.__name__ += _dufunc.ufunc.__name__ + return DUFuncKernel + + +class DUFuncLowerer(UfuncLowererBase): + '''Callable class responsible for lowering calls to a specific DUFunc. + ''' + def __init__(self, dufunc): + from numba.np import npyimpl + super().__init__(dufunc, + make_dufunc_kernel, + npyimpl.numpy_ufunc_kernel) + + +class DUFunc(serialize.ReduceMixin, _internal._DUFunc, UfuncBase): + """ + Dynamic universal function (DUFunc) intended to act like a normal + Numpy ufunc, but capable of call-time (just-in-time) compilation + of fast loops specialized to inputs. + """ + # NOTE: __base_kwargs must be kept in synch with the kwlist in + # _internal.c:dufunc_init() + __base_kwargs = set(('identity', '_keepalive', 'nin', 'nout')) + + def __init__(self, py_func, identity=None, cache=False, targetoptions={}): + if is_jitted(py_func): + py_func = py_func.py_func + with ufuncbuilder._suppress_deprecation_warning_nopython_not_supplied(): + dispatcher = jit(_target='npyufunc', + cache=cache, + **targetoptions)(py_func) + self._initialize(dispatcher, identity) + functools.update_wrapper(self, py_func) + + def _initialize(self, dispatcher, identity): + identity = ufuncbuilder.parse_identity(identity) + super(DUFunc, self).__init__(dispatcher, identity=identity) + # Loop over a copy of the keys instead of the keys themselves, + # since we're changing the dictionary while looping. + self.reorderable = (identity != _internal.PyUFunc_None) + self.__name__ = dispatcher.py_func.__name__ + self.__doc__ = dispatcher.py_func.__doc__ + self._lower_me = DUFuncLowerer(self) + self._install_cg() + self._install_type() + + def _reduce_states(self): + """ + NOTE: part of ReduceMixin protocol + """ + siglist = list(self._dispatcher.overloads.keys()) + return dict( + dispatcher=self._dispatcher, + identity=self.identity, + frozen=self._frozen, + siglist=siglist, + ) + + @classmethod + def _rebuild(cls, dispatcher, identity, frozen, siglist): + """ + NOTE: part of ReduceMixin protocol + """ + self = _internal._DUFunc.__new__(cls) + self._initialize(dispatcher, identity) + # Re-add signatures + for sig in siglist: + self.add(sig) + if frozen: + self.disable_compile() + return self + + def build_ufunc(self): + """ + For compatibility with the various *UFuncBuilder classes. + """ + return self + + @property + def targetoptions(self): + return self._dispatcher.targetoptions + + @property + def nin(self): + return self.ufunc.nin + + @property + def nout(self): + return self.ufunc.nout + + @property + def nargs(self): + return self.ufunc.nargs + + @property + def ntypes(self): + return self.ufunc.ntypes + + @property + def types(self): + return self.ufunc.types + + @property + def identity(self): + return self.ufunc.identity + + @property + def signature(self): + return self.ufunc.signature + + def disable_compile(self): + """ + Disable the compilation of new signatures at call time. + """ + # If disabling compilation then there must be at least one signature + assert len(self._dispatcher.overloads) > 0 + self._frozen = True + + def add(self, sig): + """ + Compile the DUFunc for the given signature. + """ + args, return_type = sigutils.normalize_signature(sig) + return self._compile_for_argtys(args, return_type) + + def __call__(self, *args, **kws): + """ + Allow any argument that has overridden __array_ufunc__ (NEP-18) + to take control of DUFunc.__call__. + """ + default = numpy_support.np.ndarray.__array_ufunc__ + + for arg in args + tuple(kws.values()): + if getattr(type(arg), "__array_ufunc__", default) is not default: + output = arg.__array_ufunc__(self, "__call__", *args, **kws) + if output is not NotImplemented: + return output + else: + return super().__call__(*args, **kws) + + def _compile_for_args(self, *args, **kws): + nin = self.ufunc.nin + if kws: + if 'out' in kws: + out = kws.pop('out') + args += (out,) + if kws: + raise TypeError("unexpected keyword arguments to ufunc: %s" + % ", ".join(repr(k) for k in sorted(kws))) + + args_len = len(args) + assert (args_len == nin) or (args_len == nin + self.ufunc.nout) + assert not kws + argtys = [] + for arg in args[:nin]: + argty = typeof(arg) + if isinstance(argty, types.Array): + argty = argty.dtype + else: + # To avoid a mismatch in how Numba types scalar values as + # opposed to Numpy, we need special logic for scalars. + # For example, on 64-bit systems, numba.typeof(3) => int32, but + # np.array(3).dtype => int64. + + # Note: this will not handle numpy "duckarrays" correctly, + # including but not limited to those implementing `__array__` + # and `__array_ufunc__`. + argty = numpy_support.map_arrayscalar_type(arg) + argtys.append(argty) + return self._compile_for_argtys(tuple(argtys)) + + @global_compiler_lock + def _compile_for_argtys(self, argtys, return_type=None): + """ + Given a tuple of argument types (these should be the array + dtypes, and not the array types themselves), compile the + element-wise function for those inputs, generate a UFunc loop + wrapper, and register the loop with the Numpy ufunc object for + this DUFunc. + """ + if self._frozen: + raise RuntimeError("compilation disabled for %s" % (self,)) + assert isinstance(argtys, tuple) + if return_type is None: + sig = argtys + else: + sig = return_type(*argtys) + + for k, cres in self._dispatcher.overloads.items(): + if argtys == k.args: + msg = ("Compilation requested for previously compiled argument" + f" types ({argtys}). This has no effect and perhaps " + "indicates a bug in the calling code (compiling a " + "ufunc more than once for the same signature") + warnings.warn(msg, errors.NumbaWarning) + return cres + + cres, argtys, return_type = ufuncbuilder._compile_element_wise_function( + self._dispatcher, self.targetoptions, sig) + actual_sig = ufuncbuilder._finalize_ufunc_signature( + cres, argtys, return_type) + dtypenums, ptr, env = ufuncbuilder._build_element_wise_ufunc_wrapper( + cres, actual_sig) + self._add_loop(int(ptr), dtypenums) + self._keepalive.append((ptr, cres.library, env)) + self._lower_me.libs.append(cres.library) + return cres + + def match_signature(self, ewise_types, sig): + return sig.args == ewise_types + + def _install_ufunc_attributes(self, template) -> None: + + def get_attr_fn(attr: str) -> Callable: + + def impl(ufunc): + val = getattr(ufunc.key[0], attr) + return lambda ufunc: val + return impl + + # ntypes/types needs "at" to be a BoundFunction rather than a Function + # But this fails as it cannot a weak reference to an ufunc due to NumPy + # not setting the "tp_weaklistoffset" field. See: + # https://github.com/numpy/numpy/blob/7fc72776b972bfbfdb909e4b15feb0308cf8adba/numpy/core/src/umath/ufunc_object.c#L6968-L6983 # noqa: E501 + + at = types.Function(template) + attributes = ('nin', 'nout', 'nargs', # 'ntypes', # 'types', + 'identity', 'signature') + for attr in attributes: + attr_fn = get_attr_fn(attr) + overload_attribute(at, attr)(attr_fn) + + def _install_ufunc_methods(self, template) -> None: + self._install_ufunc_reduce(template) + self._install_ufunc_at(template) + + def _install_ufunc_at(self, template) -> None: + at = types.Function(template) + + @overload_method(at, 'at') + def ol_at(ufunc, a, indices, b=None): + warnings.warn("ufunc.at feature is experimental", + category=errors.NumbaExperimentalFeatureWarning) + + if not isinstance(a, types.Array): + msg = 'The first argument "a" must be array-like' + raise errors.NumbaTypeError(msg) + + indices_arr = isinstance(indices, types.Array) + indices_list = isinstance(indices, types.List) + indices_tuple = isinstance(indices, types.Tuple) + indices_slice = isinstance(indices, types.SliceType) + indices_scalar = not (indices_arr or indices_slice or indices_tuple) + indices_empty_tuple = indices_tuple and len(indices) == 0 + b_array = isinstance(b, (types.Array, types.Sequence, types.List, + types.Tuple)) + b_none = cgutils.is_nonelike(b) + b_scalar = not (b_array or b_none) + need_cast = any([indices_list]) + + nin = self.ufunc.nin + + # missing second argument? + if nin == 2 and cgutils.is_nonelike(b): + raise errors.TypingError('second operand needed for ufunc') + + # extra second argument + if nin == 1 and not cgutils.is_nonelike(b): + msg = 'second operand provided when ufunc is unary' + raise errors.TypingError(msg) + + if cgutils.is_nonelike(b): + self.add((a.dtype,)) + elif b_scalar: + self.add((a.dtype, b)) + else: + self.add((a.dtype, b.dtype)) + + def apply_ufunc_codegen(context, builder, sig, args): + from numba.np.arrayobj import make_array + + if len(args) == 4: + _, aty, idxty, bty = sig.args + _, a, indices, b = args + else: + _, aty, idxty, bty = sig.args + (None,) + _, a, indices, b = args + (None,) + + a = make_array(aty)(context, builder, a) + at_iter = UfuncAtIterator(ufunc, a, aty, indices, idxty, b, bty) + at_iter.run(context, builder) + + @intrinsic + def apply_a_b_ufunc(typingctx, ufunc, a, indices, b): + sig = types.none(ufunc, a, indices, b) + return sig, apply_ufunc_codegen + + @intrinsic + def apply_a_ufunc(typingctx, ufunc, a, indices): + sig = types.none(ufunc, a, indices) + return sig, apply_ufunc_codegen + + def impl_cast(ufunc, a, indices, b=None): + if b_none: + return ufunc.at(a, np.asarray(indices)) + else: + return ufunc.at(a, + np.asarray(indices), + np.asarray(b)) + + def impl_generic(ufunc, a, indices, b=None): + if b_none: + apply_a_ufunc(ufunc, a, indices,) + else: + b_ = np.asarray(b) + a_ = a[indices] + b_ = np.broadcast_to(b_, a_.shape) + apply_a_b_ufunc(ufunc, a, indices, b_.flat) + + def impl_indices_empty_b_scalar(ufunc, a, indices, b=None): + a[()] = ufunc(a[()], b) + + def impl_scalar_scalar(ufunc, a, indices, b=None): + if b_none: + a[indices] = ufunc(a[indices]) + else: + a[indices] = ufunc(a[indices], b) + + if need_cast: + return impl_cast + elif indices_empty_tuple and b_scalar: + return impl_indices_empty_b_scalar + elif indices_scalar and b_scalar: + return impl_scalar_scalar + else: + return impl_generic + + def _install_ufunc_reduce(self, template) -> None: + at = types.Function(template) + + @overload_method(at, 'reduce') + def ol_reduce(ufunc, array, axis=0, dtype=None, initial=None): + + warnings.warn("ufunc.reduce feature is experimental", + category=errors.NumbaExperimentalFeatureWarning) + + if not isinstance(array, types.Array): + msg = 'The first argument "array" must be array-like' + raise errors.NumbaTypeError(msg) + + axis_int_tuple = isinstance(axis, types.UniTuple) and \ + isinstance(axis.dtype, types.Integer) + axis_empty_tuple = isinstance(axis, types.Tuple) and len(axis) == 0 + axis_none = cgutils.is_nonelike(axis) + + identity_none = self.ufunc.identity is None + ufunc_name = self.ufunc.__name__ + + # In NumPy, a ufunc is reorderable if its identity type is **not** + # PyUfunc_None. + if not self.reorderable and axis_int_tuple and len(axis) > 1: + msg = (f"reduction operation '{ufunc_name}' is not " + "reorderable, so at most one axis may be specified") + raise errors.NumbaTypeError(msg) + + tup_init = (0,) * (array.ndim) + tup_init_m1 = (0,) * (array.ndim - 1) + nb_dtype = array.dtype if cgutils.is_nonelike(dtype) else dtype + identity = self.identity + + id_none = cgutils.is_nonelike(identity) + init_none = cgutils.is_nonelike(initial) + + @register_jitable + def tuple_slice(tup, pos): + # Same as + # tup = tup[0 : pos] + tup[pos + 1:] + s = tup_init_m1 + i = 0 + for j, e in enumerate(tup): + if j == pos: + continue + s = tuple_setitem(s, i, e) + i += 1 + return s + + @register_jitable + def tuple_slice_append(tup, pos, val): + # Same as + # tup = tup[0 : pos] + val + tup[pos + 1:] + s = tup_init + i, j, sz = 0, 0, len(s) + while j < sz: + if j == pos: + s = tuple_setitem(s, j, val) + else: + e = tup[i] + s = tuple_setitem(s, j, e) + i += 1 + j += 1 + return s + + @intrinsic + def compute_flat_idx(typingctx, strides, itemsize, idx, axis): + sig = types.intp(strides, itemsize, idx, axis) + len_idx = len(idx) + + def gen_block(builder, block_pos, block_name, bb_end, args): + strides, _, idx, _ = args + bb = builder.append_basic_block(name=block_name) + + with builder.goto_block(bb): + zero = ir.IntType(64)(0) + flat_idx = zero + + if block_pos == 0: + for i in range(1, len_idx): + stride = builder.extract_value(strides, i - 1) + idx_i = builder.extract_value(idx, i) + m = builder.mul(stride, idx_i) + flat_idx = builder.add(flat_idx, m) + elif 0 < block_pos < len_idx - 1: + for i in range(0, block_pos): + stride = builder.extract_value(strides, i) + idx_i = builder.extract_value(idx, i) + m = builder.mul(stride, idx_i) + flat_idx = builder.add(flat_idx, m) + + for i in range(block_pos + 1, len_idx): + stride = builder.extract_value(strides, i - 1) + idx_i = builder.extract_value(idx, i) + m = builder.mul(stride, idx_i) + flat_idx = builder.add(flat_idx, m) + else: + for i in range(0, len_idx - 1): + stride = builder.extract_value(strides, i) + idx_i = builder.extract_value(idx, i) + m = builder.mul(stride, idx_i) + flat_idx = builder.add(flat_idx, m) + + builder.branch(bb_end) + + return bb, flat_idx + + def codegen(context, builder, sig, args): + strides, itemsize, idx, axis = args + + bb = builder.basic_block + switch_end = builder.append_basic_block(name='axis_end') + l = [] + for i in range(len_idx): + block, flat_idx = gen_block(builder, i, f"axis_{i}", + switch_end, args) + l.append((block, flat_idx)) + + with builder.goto_block(bb): + switch = builder.switch(axis, l[-1][0]) + for i in range(len_idx): + switch.add_case(i, l[i][0]) + + builder.position_at_end(switch_end) + phi = builder.phi(l[0][1].type) + for block, value in l: + phi.add_incoming(value, block) + return builder.sdiv(phi, itemsize) + + return sig, codegen + + @register_jitable + def fixup_axis(axis, ndim): + ax = axis + for i in range(len(axis)): + val = axis[i] + ndim if axis[i] < 0 else axis[i] + ax = tuple_setitem(ax, i, val) + return ax + + @register_jitable + def find_min(tup): + idx, e = 0, tup[0] + for i in range(len(tup)): + if tup[i] < e: + idx, e = i, tup[i] + return idx, e + + def impl_1d(ufunc, array, axis=0, dtype=None, initial=None): + if identity_none and initial is None and len(array) == 0: + msg = ('zero-size array to reduction operation ' + f'{ufunc_name} which has no identity') + raise ValueError(msg) + + start = 0 + if init_none and id_none: + start = 1 + r = array[0] + elif init_none: + r = identity + else: + r = initial + + sz = array.shape[0] + for i in range(start, sz): + r = ufunc(r, array[i]) + return r + + def impl_nd_axis_int(ufunc, + array, + axis=0, + dtype=None, + initial=None): + if axis is None: + raise ValueError("'axis' must be specified") + + if axis < 0: + axis += array.ndim + + if axis < 0 or axis >= array.ndim: + raise ValueError("Invalid axis") + + if identity_none and initial is None and array.shape[axis] == 0: + msg = ('zero-size array to reduction operation ' + f'{ufunc_name} which has no identity') + raise ValueError(msg) + + # create result array + shape = tuple_slice(array.shape, axis) + + if initial is None and identity is None: + r = np.empty(shape, dtype=nb_dtype) + for idx, _ in np.ndenumerate(r): + # shape[0:axis] + 0 + shape[axis:] + result_idx = tuple_slice_append(idx, axis, 0) + r[idx] = array[result_idx] + elif initial is None and identity is not None: + # Checking if identity is not none is redundant but required + # compile this block + r = np.full(shape, fill_value=identity, dtype=nb_dtype) + else: + r = np.full(shape, fill_value=initial, dtype=nb_dtype) + + # One approach to implement reduce is to remove the axis index + # from the indexing tuple returned by "np.ndenumerate". For + # instance, if idx = (X, Y, Z) and axis=1, the result index + # is (X, Y). + # Another way is to compute the result index using strides, + # which is faster than manipulating tuples. + view = r.ravel() + if initial is None and identity is None: + for idx, val in np.ndenumerate(array): + if idx[axis] == 0: + continue + else: + flat_pos = compute_flat_idx(r.strides, r.itemsize, + idx, axis) + lhs, rhs = view[flat_pos], val + view[flat_pos] = ufunc(lhs, rhs) + else: + for idx, val in np.ndenumerate(array): + if initial is None and identity is None and \ + idx[axis] == 0: + continue + flat_pos = compute_flat_idx(r.strides, r.itemsize, + idx, axis) + lhs, rhs = view[flat_pos], val + view[flat_pos] = ufunc(lhs, rhs) + return r + + def impl_nd_axis_tuple(ufunc, + array, + axis=0, + dtype=None, + initial=None): + axis_ = fixup_axis(axis, array.ndim) + for i in range(0, len(axis_)): + if axis_[i] < 0 or axis_[i] >= array.ndim: + raise ValueError("Invalid axis") + + for j in range(i + 1, len(axis_)): + if axis_[i] == axis_[j]: + raise ValueError("duplicate value in 'axis'") + + min_idx, min_elem = find_min(axis_) + r = ufunc.reduce(array, + axis=min_elem, + dtype=dtype, + initial=initial) + if len(axis) == 1: + return r + elif len(axis) == 2: + return ufunc.reduce(r, axis=axis_[(min_idx + 1) % 2] - 1) + else: + ax = axis_tup + for i in range(len(ax)): + if i != min_idx: + ax = tuple_setitem(ax, i, axis_[i]) + return ufunc.reduce(r, axis=ax) + + def impl_axis_empty_tuple(ufunc, + array, + axis=0, + dtype=None, + initial=None): + return array + + def impl_axis_none(ufunc, + array, + axis=0, + dtype=None, + initial=None): + return ufunc.reduce(array, axis_tup, dtype, initial) + + if array.ndim == 1 and not axis_empty_tuple: + return impl_1d + elif axis_empty_tuple: + # ufunc(array, axis=()) + return impl_axis_empty_tuple + elif axis_none: + # ufunc(array, axis=None) + axis_tup = tuple(range(array.ndim)) + return impl_axis_none + elif axis_int_tuple: + # axis is tuple of integers + # ufunc(array, axis=(1, 2, ...)) + axis_tup = (0,) * (len(axis) - 1) + return impl_nd_axis_tuple + elif axis == 0 or isinstance(axis, (types.Integer, + types.Omitted, + types.IntegerLiteral)): + # axis is default value (0) or an integer + # ufunc(array, axis=0) + return impl_nd_axis_int + + def at(self, a, indices, b=None): + # dynamic compile ufunc.at + args = (a,) if cgutils.is_nonelike(b) else (a, b) + argtys = (typeof(arg) for arg in args) + ewise_types = tuple(arg.dtype if isinstance(arg, types.Array) else arg + for arg in argtys) + + if self.find_ewise_function(ewise_types) == (None, None): + # cannot find a matching function and compilation is disabled + if self._frozen: + msg = "compilation disabled for %s.at(...)" % (self,) + raise RuntimeError(msg) + + self._compile_for_args(*args) + + # all good, just dispatch to the function + if cgutils.is_nonelike(b): + return super().at(a, indices) + else: + return super().at(*(a, indices, b)) + + def _install_type(self, typingctx=None): + """Constructs and installs a typing class for a DUFunc object in the + input typing context. If no typing context is given, then + _install_type() installs into the typing context of the + dispatcher object (should be same default context used by + jit() and njit()). + """ + if typingctx is None: + typingctx = self._dispatcher.targetdescr.typing_context + _ty_cls = type('DUFuncTyping_' + self.ufunc.__name__, + (AbstractTemplate,), + dict(key=self, generic=self._type_me)) + typingctx.insert_user_function(self, _ty_cls) + self._install_ufunc_attributes(_ty_cls) + self._install_ufunc_methods(_ty_cls) + + def find_ewise_function(self, ewise_types): + """ + Given a tuple of element-wise argument types, find a matching + signature in the dispatcher. + + Return a 2-tuple containing the matching signature, and + compilation result. Will return two None's if no matching + signature was found. + """ + if self._frozen: + # If we cannot compile, coerce to the best matching loop + loop = numpy_support.ufunc_find_matching_loop(self, ewise_types) + if loop is None: + return None, None + ewise_types = tuple(loop.inputs + loop.outputs)[:len(ewise_types)] + for sig, cres in self._dispatcher.overloads.items(): + if sig.args == ewise_types: + return sig, cres + return None, None + + def _type_me(self, argtys, kwtys): + """ + Implement AbstractTemplate.generic() for the typing class + built by DUFunc._install_type(). + + Return the call-site signature after either validating the + element-wise signature or compiling for it. + """ + assert not kwtys + ufunc = self.ufunc + _handle_inputs_result = npydecl.Numpy_rules_ufunc._handle_inputs( + ufunc, argtys, kwtys) + base_types, explicit_outputs, ndims, layout = _handle_inputs_result + explicit_output_count = len(explicit_outputs) + if explicit_output_count > 0: + ewise_types = tuple(base_types[:-len(explicit_outputs)]) + else: + ewise_types = tuple(base_types) + sig, cres = self.find_ewise_function(ewise_types) + if sig is None: + # Matching element-wise signature was not found; must + # compile. + if self._frozen: + raise errors.NumbaTypeError("cannot call %s with types %s" + % (self, argtys)) + self._compile_for_argtys(ewise_types) + sig, cres = self.find_ewise_function(ewise_types) + assert sig is not None + if explicit_output_count > 0: + outtys = list(explicit_outputs) + elif ufunc.nout == 1: + if ndims > 0: + outtys = [types.Array(sig.return_type, ndims, layout)] + else: + outtys = [sig.return_type] + else: + raise errors.NumbaNotImplementedError("typing gufuncs (nout > 1)") + outtys.extend(argtys) + return signature(*outtys) + + +array_analysis.MAP_TYPES.append(DUFunc) diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/gufunc.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/gufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..103ef7b5d9505cb32f48892cf474c597da160eb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/gufunc.py @@ -0,0 +1,323 @@ +from numba import typeof +from numba.core import types +from numba.np.ufunc.ufuncbuilder import GUFuncBuilder +from numba.np.ufunc.sigparse import parse_signature +from numba.np.ufunc.ufunc_base import UfuncBase, UfuncLowererBase +from numba.np.numpy_support import ufunc_find_matching_loop +from numba.core import serialize, errors +from numba.core.typing import npydecl +from numba.core.typing.templates import signature, AbstractTemplate +import functools + + +def make_gufunc_kernel(_dufunc): + from numba.np import npyimpl + + class GUFuncKernel(npyimpl._Kernel): + """ + npyimpl._Kernel subclass responsible for lowering a gufunc kernel + (element-wise function) inside a broadcast loop (which is + generated by npyimpl.numpy_gufunc_kernel()). + """ + dufunc = _dufunc + + def __init__(self, context, builder, outer_sig): + super().__init__(context, builder, outer_sig) + ewise_types = self.dufunc._get_ewise_dtypes(outer_sig.args) + self.inner_sig, self.cres = self.dufunc.find_ewise_function( + ewise_types) + + def cast(self, val, fromty, toty): + # Handle the case where "fromty" is an array and "toty" a scalar + if isinstance(fromty, types.Array) and not \ + isinstance(toty, types.Array): + return super().cast(val, fromty.dtype, toty) + return super().cast(val, fromty, toty) + + def generate(self, *args): + if self.cres.objectmode: + msg = ('Calling a guvectorize function in object mode is not ' + 'supported yet.') + raise errors.NumbaRuntimeError(msg) + self.context.add_linking_libs((self.cres.library,)) + return super().generate(*args) + + GUFuncKernel.__name__ += _dufunc.__name__ + return GUFuncKernel + + +class GUFuncLowerer(UfuncLowererBase): + '''Callable class responsible for lowering calls to a specific gufunc. + ''' + def __init__(self, gufunc): + from numba.np import npyimpl + super().__init__(gufunc, + make_gufunc_kernel, + npyimpl.numpy_gufunc_kernel) + + +class GUFunc(serialize.ReduceMixin, UfuncBase): + """ + Dynamic generalized universal function (GUFunc) + intended to act like a normal Numpy gufunc, but capable + of call-time (just-in-time) compilation of fast loops + specialized to inputs. + """ + + def __init__(self, py_func, signature, identity=None, cache=None, + is_dynamic=False, targetoptions={}, writable_args=()): + self.ufunc = None + self._frozen = False + self._is_dynamic = is_dynamic + self._identity = identity + + # GUFunc cannot inherit from GUFuncBuilder because "identity" + # is a property of GUFunc. Thus, we hold a reference to a GUFuncBuilder + # object here + self.gufunc_builder = GUFuncBuilder( + py_func, signature, identity, cache, targetoptions, writable_args) + + self.__name__ = self.gufunc_builder.py_func.__name__ + self.__doc__ = self.gufunc_builder.py_func.__doc__ + self._dispatcher = self.gufunc_builder.nb_func + self._initialize(self._dispatcher) + functools.update_wrapper(self, py_func) + + def _initialize(self, dispatcher): + self.build_ufunc() + self._install_type() + self._lower_me = GUFuncLowerer(self) + self._install_cg() + + def _reduce_states(self): + gb = self.gufunc_builder + dct = dict( + py_func=gb.py_func, + signature=gb.signature, + identity=self._identity, + cache=gb.cache, + is_dynamic=self._is_dynamic, + targetoptions=gb.targetoptions, + writable_args=gb.writable_args, + typesigs=gb._sigs, + frozen=self._frozen, + ) + return dct + + @classmethod + def _rebuild(cls, py_func, signature, identity, cache, is_dynamic, + targetoptions, writable_args, typesigs, frozen): + self = cls(py_func=py_func, signature=signature, identity=identity, + cache=cache, is_dynamic=is_dynamic, + targetoptions=targetoptions, writable_args=writable_args) + for sig in typesigs: + self.add(sig) + self.build_ufunc() + self._frozen = frozen + return self + + def __repr__(self): + return f"" + + def _install_type(self, typingctx=None): + """Constructs and installs a typing class for a gufunc object in the + input typing context. If no typing context is given, then + _install_type() installs into the typing context of the + dispatcher object (should be same default context used by + jit() and njit()). + """ + if typingctx is None: + typingctx = self._dispatcher.targetdescr.typing_context + _ty_cls = type('GUFuncTyping_' + self.__name__, + (AbstractTemplate,), + dict(key=self, generic=self._type_me)) + typingctx.insert_user_function(self, _ty_cls) + + def add(self, fty): + self.gufunc_builder.add(fty) + + def build_ufunc(self): + self.ufunc = self.gufunc_builder.build_ufunc() + return self + + def expected_ndims(self): + parsed_sig = parse_signature(self.gufunc_builder.signature) + return (tuple(map(len, parsed_sig[0])), tuple(map(len, parsed_sig[1]))) + + def _type_me(self, argtys, kws): + """ + Implement AbstractTemplate.generic() for the typing class + built by gufunc._install_type(). + + Return the call-site signature after either validating the + element-wise signature or compiling for it. + """ + assert not kws + ufunc = self.ufunc + sig = self.gufunc_builder.signature + inp_ndims, out_ndims = self.expected_ndims() + ndims = inp_ndims + out_ndims + + assert len(argtys), len(ndims) + for idx, arg in enumerate(argtys): + if isinstance(arg, types.Array) and arg.ndim < ndims[idx]: + kind = "Input" if idx < len(inp_ndims) else "Output" + i = idx if idx < len(inp_ndims) else idx - len(inp_ndims) + msg = ( + f"{self.__name__}: {kind} operand {i} does not have " + f"enough dimensions (has {arg.ndim}, gufunc core with " + f"signature {sig} requires {ndims[idx]})") + raise errors.TypingError(msg) + + _handle_inputs_result = npydecl.Numpy_rules_ufunc._handle_inputs( + ufunc, argtys, kws) + ewise_types, _, _, _ = _handle_inputs_result + sig, _ = self.find_ewise_function(ewise_types) + + if sig is None: + # Matching element-wise signature was not found; must + # compile. + if self._frozen: + msg = f"cannot call {self} with types {argtys}" + raise errors.TypingError(msg) + self._compile_for_argtys(ewise_types) + # double check to ensure there is a match + sig, _ = self.find_ewise_function(ewise_types) + if sig == (None, None): + msg = f"Fail to compile {self.__name__} with types {argtys}" + raise errors.TypingError(msg) + + assert sig is not None + + return signature(types.none, *argtys) + + def _compile_for_argtys(self, argtys, return_type=None): + # Compile a new guvectorize function! Use the gufunc signature + # i.e. (n,m),(m)->(n) + # plus ewise_types to build a numba function type + fnty = self._get_function_type(*argtys) + self.gufunc_builder.add(fnty) + + def match_signature(self, ewise_types, sig): + dtypes = self._get_ewise_dtypes(sig.args) + return tuple(dtypes) == tuple(ewise_types) + + @property + def is_dynamic(self): + return self._is_dynamic + + def _get_ewise_dtypes(self, args): + argtys = map(lambda arg: arg if isinstance(arg, types.Type) else + typeof(arg), args) + tys = [] + for argty in argtys: + if isinstance(argty, types.Array): + tys.append(argty.dtype) + else: + tys.append(argty) + return tys + + def _num_args_match(self, *args): + parsed_sig = parse_signature(self.gufunc_builder.signature) + return len(args) == len(parsed_sig[0]) + len(parsed_sig[1]) + + def _get_function_type(self, *args): + parsed_sig = parse_signature(self.gufunc_builder.signature) + # ewise_types is a list of [int32, int32, int32, ...] + ewise_types = self._get_ewise_dtypes(args) + + # first time calling the gufunc + # generate a signature based on input arguments + l = [] + for idx, sig_dim in enumerate(parsed_sig[0]): + ndim = len(sig_dim) + if ndim == 0: # append scalar + l.append(ewise_types[idx]) + else: + l.append(types.Array(ewise_types[idx], ndim, 'A')) + + offset = len(parsed_sig[0]) + # add return type to signature + for idx, sig_dim in enumerate(parsed_sig[1]): + retty = ewise_types[idx + offset] + ret_ndim = len(sig_dim) or 1 # small hack to return scalars + l.append(types.Array(retty, ret_ndim, 'A')) + + return types.none(*l) + + def __call__(self, *args, **kwargs): + # If compilation is disabled OR it is NOT a dynamic gufunc + # call the underlying gufunc + if self._frozen or not self.is_dynamic: + # Do not unwrap the ufunc if the argument is a wrapper that will + # potentially pickle the ufunc after it receives it in + # __array_ufunc__. The same logic in theory should be replicated + # for reduce(), outer(), etc., but they're not implemented in dask. + if args and _is_array_wrapper(args[0]): + return args[0].__array_ufunc__( + self, "__call__", *args, **kwargs + ) + else: + return self.ufunc(*args, **kwargs) + elif "out" in kwargs: + # If "out" argument is supplied + args += (kwargs.pop("out"),) + + if self._num_args_match(*args) is False: + # It is not allowed to call a dynamic gufunc without + # providing all the arguments + # see: https://github.com/numba/numba/pull/5938#discussion_r506429392 # noqa: E501 + msg = ( + f"Too few arguments for function '{self.__name__}'. " + "Note that the pattern `out = gufunc(Arg1, Arg2, ..., ArgN)` " + "is not allowed. Use `gufunc(Arg1, Arg2, ..., ArgN, out) " + "instead.") + raise TypeError(msg) + + # at this point we know the gufunc is a dynamic one + ewise = self._get_ewise_dtypes(args) + if not (self.ufunc and ufunc_find_matching_loop(self.ufunc, ewise)): + # A previous call (@njit -> @guvectorize) may have compiled a + # version for the element-wise dtypes. In this case, we don't need + # to compile it again, just build the (g)ufunc + if not self.find_ewise_function(ewise) != (None, None): + sig = self._get_function_type(*args) + self.add(sig) + self.build_ufunc() + + return self.ufunc(*args, **kwargs) + + +def _is_array_wrapper(obj): + """Return True if obj wraps around numpy or another numpy-like library + and is likely going to apply the ufunc to the wrapped array; False + otherwise. + + At the moment, this returns True for + + - dask.array.Array + - dask.dataframe.DataFrame + - dask.dataframe.Series + - xarray.DataArray + - xarray.Dataset + - xarray.Variable + - pint.Quantity + - other potential wrappers around dask array or dask dataframe + + We may need to add other libraries that pickle ufuncs from their + __array_ufunc__ method in the future. + + Note that the below test is a lot more naive than + `dask.base.is_dask_collection` + (https://github.com/dask/dask/blob/5949e54bc04158d215814586a44d51e0eb4a964d/dask/base.py#L209-L249), # noqa: E501 + because it doesn't need to find out if we're actually dealing with + a dask collection, only that we're dealing with a wrapper. + Namely, it will return True for a pint.Quantity wrapping around a plain float, a + numpy.ndarray, or a dask.array.Array, and it's OK because in all cases + Quantity.__array_ufunc__ is going to forward the ufunc call inwards. + """ + return ( + not isinstance(obj, type) + and hasattr(obj, "__dask_graph__") + and hasattr(obj, "__array_ufunc__") + ) diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/parallel.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..b80262dcf2c6123a46cc51f14db86c4000e1eb1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/parallel.py @@ -0,0 +1,761 @@ +""" +This file implements the code-generator for parallel-vectorize. + +ParallelUFunc is the platform independent base class for generating +the thread dispatcher. This thread dispatcher launches threads +that execute the generated function of UFuncCore. +UFuncCore is subclassed to specialize for the input/output types. +The actual workload is invoked inside the function generated by UFuncCore. +UFuncCore also defines a work-stealing mechanism that allows idle threads +to steal works from other threads. +""" + +import os +import sys +import warnings +from threading import RLock as threadRLock +from ctypes import CFUNCTYPE, c_int, CDLL, POINTER, c_uint + +import numpy as np + +import llvmlite.binding as ll +from llvmlite import ir + +from numba.np.numpy_support import as_dtype +from numba.core import types, cgutils, config, errors +from numba.core.typing import signature +from numba.np.ufunc.wrappers import _wrapper_info +from numba.np.ufunc import ufuncbuilder +from numba.extending import overload, intrinsic + +_IS_OSX = sys.platform.startswith('darwin') +_IS_LINUX = sys.platform.startswith('linux') +_IS_WINDOWS = sys.platform.startswith('win32') + + +def get_thread_count(): + """ + Gets the available thread count. + """ + t = config.NUMBA_NUM_THREADS + if t < 1: + raise ValueError("Number of threads specified must be > 0.") + return t + + +NUM_THREADS = get_thread_count() + + +def build_gufunc_kernel(library, ctx, info, sig, inner_ndim): + """Wrap the original CPU ufunc/gufunc with a parallel dispatcher. + This function will wrap gufuncs and ufuncs something like. + + Args + ---- + ctx + numba's codegen context + + info: (library, env, name) + inner function info + + sig + type signature of the gufunc + + inner_ndim + inner dimension of the gufunc (this is len(sig.args) in the case of a + ufunc) + + Returns + ------- + wrapper_info : (library, env, name) + The info for the gufunc wrapper. + + Details + ------- + + The kernel signature looks like this: + + void kernel(char **args, npy_intp *dimensions, npy_intp* steps, void* data) + + args - the input arrays + output arrays + dimensions - the dimensions of the arrays + steps - the step size for the array (this is like sizeof(type)) + data - any additional data + + The parallel backend then stages multiple calls to this kernel concurrently + across a number of threads. Practically, for each item of work, the backend + duplicates `dimensions` and adjusts the first entry to reflect the size of + the item of work, it also forms up an array of pointers into the args for + offsets to read/write from/to with respect to its position in the items of + work. This allows the same kernel to be used for each item of work, with + simply adjusted reads/writes/domain sizes and is safe by virtue of the + domain partitioning. + + NOTE: The execution backend is passed the requested thread count, but it can + choose to ignore it (TBB)! + """ + assert isinstance(info, tuple) # guard against old usage + # Declare types and function + byte_t = ir.IntType(8) + byte_ptr_t = ir.PointerType(byte_t) + byte_ptr_ptr_t = ir.PointerType(byte_ptr_t) + + intp_t = ctx.get_value_type(types.intp) + intp_ptr_t = ir.PointerType(intp_t) + + fnty = ir.FunctionType(ir.VoidType(), [ir.PointerType(byte_ptr_t), + ir.PointerType(intp_t), + ir.PointerType(intp_t), + byte_ptr_t]) + wrapperlib = ctx.codegen().create_library('parallelgufuncwrapper') + mod = wrapperlib.create_ir_module('parallel.gufunc.wrapper') + kernel_name = ".kernel.{}_{}".format(id(info.env), info.name) + lfunc = ir.Function(mod, fnty, name=kernel_name) + + bb_entry = lfunc.append_basic_block('') + + # Function body starts + builder = ir.IRBuilder(bb_entry) + + args, dimensions, steps, data = lfunc.args + + # Release the GIL (and ensure we have the GIL) + # Note: numpy ufunc may not always release the GIL; thus, + # we need to ensure we have the GIL. + pyapi = ctx.get_python_api(builder) + gil_state = pyapi.gil_ensure() + thread_state = pyapi.save_thread() + + def as_void_ptr(arg): + return builder.bitcast(arg, byte_ptr_t) + + # Array count depends on whether an "output" array is needed. In the case + # of a void return type cf. gufunc it is the number of args, in the case of + # a non-void return type cf. ufunc it is the number of args + 1 so as to + # account for the output array. + array_count = len(sig.args) + if not isinstance(sig.return_type, types.NoneType): + array_count += 1 + + parallel_for_ty = ir.FunctionType(ir.VoidType(), + [byte_ptr_t] * 5 + [intp_t, ] * 3) + parallel_for = cgutils.get_or_insert_function(mod, parallel_for_ty, + 'numba_parallel_for') + + # Reference inner-function and link + innerfunc_fnty = ir.FunctionType( + ir.VoidType(), + [byte_ptr_ptr_t, intp_ptr_t, intp_ptr_t, byte_ptr_t], + ) + tmp_voidptr = cgutils.get_or_insert_function(mod, innerfunc_fnty, + info.name,) + wrapperlib.add_linking_library(info.library) + + get_num_threads = cgutils.get_or_insert_function( + builder.module, + ir.FunctionType(ir.IntType(types.intp.bitwidth), []), + "get_num_threads") + + num_threads = builder.call(get_num_threads, []) + + # Prepare call + fnptr = builder.bitcast(tmp_voidptr, byte_ptr_t) + innerargs = [as_void_ptr(x) for x + in [args, dimensions, steps, data]] + builder.call(parallel_for, [fnptr] + innerargs + + [intp_t(x) for x in (inner_ndim, array_count)] + [num_threads]) + + # Release the GIL + pyapi.restore_thread(thread_state) + pyapi.gil_release(gil_state) + + builder.ret_void() + + wrapperlib.add_ir_module(mod) + wrapperlib.add_linking_library(library) + return _wrapper_info(library=wrapperlib, name=lfunc.name, env=info.env) + + +# ------------------------------------------------------------------------------ + +class ParallelUFuncBuilder(ufuncbuilder.UFuncBuilder): + def build(self, cres, sig): + _launch_threads() + + # Builder wrapper for ufunc entry point + ctx = cres.target_context + signature = cres.signature + library = cres.library + fname = cres.fndesc.llvm_func_name + + info = build_ufunc_wrapper(library, ctx, fname, signature, cres) + ptr = info.library.get_pointer_to_function(info.name) + # Get dtypes + dtypenums = [np.dtype(a.name).num for a in signature.args] + dtypenums.append(np.dtype(signature.return_type.name).num) + keepalive = () + return dtypenums, ptr, keepalive + + +def build_ufunc_wrapper(library, ctx, fname, signature, cres): + innerfunc = ufuncbuilder.build_ufunc_wrapper(library, ctx, fname, + signature, objmode=False, + cres=cres) + info = build_gufunc_kernel(library, ctx, innerfunc, signature, + len(signature.args)) + return info + +# --------------------------------------------------------------------------- + + +class ParallelGUFuncBuilder(ufuncbuilder.GUFuncBuilder): + def __init__(self, py_func, signature, identity=None, cache=False, + targetoptions={}, writable_args=()): + # Force nopython mode + targetoptions.update(dict(nopython=True)) + super( + ParallelGUFuncBuilder, + self).__init__( + py_func=py_func, + signature=signature, + identity=identity, + cache=cache, + targetoptions=targetoptions, + writable_args=writable_args) + + def build(self, cres): + """ + Returns (dtype numbers, function ptr, EnvironmentObject) + """ + _launch_threads() + + # Build wrapper for ufunc entry point + info = build_gufunc_wrapper( + self.py_func, cres, self.sin, self.sout, cache=self.cache, + is_parfors=False, + ) + ptr = info.library.get_pointer_to_function(info.name) + env = info.env + + # Get dtypes + dtypenums = [] + for a in cres.signature.args: + if isinstance(a, types.Array): + ty = a.dtype + else: + ty = a + dtypenums.append(as_dtype(ty).num) + + return dtypenums, ptr, env + + +# This is not a member of the ParallelGUFuncBuilder function because it is +# called without an enclosing instance from parfors + +def build_gufunc_wrapper(py_func, cres, sin, sout, cache, is_parfors): + """Build gufunc wrapper for the given arguments. + The *is_parfors* is a boolean indicating whether the gufunc is being + built for use as a ParFors kernel. This changes codegen and caching + behavior. + """ + library = cres.library + ctx = cres.target_context + signature = cres.signature + innerinfo = ufuncbuilder.build_gufunc_wrapper( + py_func, cres, sin, sout, cache=cache, is_parfors=is_parfors, + ) + sym_in = set(sym for term in sin for sym in term) + sym_out = set(sym for term in sout for sym in term) + inner_ndim = len(sym_in | sym_out) + + info = build_gufunc_kernel( + library, ctx, innerinfo, signature, inner_ndim, + ) + return info + +# --------------------------------------------------------------------------- + + +_backend_init_thread_lock = threadRLock() + +_windows = sys.platform.startswith('win32') + + +class _nop(object): + """A no-op contextmanager + """ + + def __enter__(self): + pass + + def __exit__(self, *args): + pass + + +_backend_init_process_lock = None + + +def _set_init_process_lock(): + global _backend_init_process_lock + try: + # Force the use of an RLock in the case a fork was used to start the + # process and thereby the init sequence, some of the threading backend + # init sequences are not fork safe. Also, windows global mp locks seem + # to be fine. + with _backend_init_thread_lock: # protect part-initialized module access + import multiprocessing + if "fork" in multiprocessing.get_start_method() or _windows: + ctx = multiprocessing.get_context() + _backend_init_process_lock = ctx.RLock() + else: + _backend_init_process_lock = _nop() + + except OSError as e: + + # probably lack of /dev/shm for semaphore writes, warn the user + msg = ( + "Could not obtain multiprocessing lock due to OS level error: %s\n" + "A likely cause of this problem is '/dev/shm' is missing or " + "read-only such that necessary semaphores cannot be written.\n" + "*** The responsibility of ensuring multiprocessing safe access to " + "this initialization sequence/module import is deferred to the " + "user! ***\n" + ) + warnings.warn(msg % str(e), errors.NumbaSystemWarning) + + _backend_init_process_lock = _nop() + + +_is_initialized = False + +# this is set by _launch_threads +_threading_layer = None + + +def threading_layer(): + """ + Get the name of the threading layer in use for parallel CPU targets + """ + if _threading_layer is None: + raise ValueError("Threading layer is not initialized.") + else: + return _threading_layer + + +def _check_tbb_version_compatible(): + """ + Checks that if TBB is present it is of a compatible version. + """ + try: + # first check that the TBB version is new enough + if _IS_WINDOWS: + libtbb_name = 'tbb12.dll' + elif _IS_OSX: + libtbb_name = 'libtbb.12.dylib' + elif _IS_LINUX: + libtbb_name = 'libtbb.so.12' + else: + raise ValueError("Unknown operating system") + libtbb = CDLL(libtbb_name) + version_func = libtbb.TBB_runtime_interface_version + version_func.argtypes = [] + version_func.restype = c_int + tbb_iface_ver = version_func() + if tbb_iface_ver < 12060: # magic number from TBB + msg = ("The TBB threading layer requires TBB " + "version 2021 update 6 or later i.e., " + "TBB_INTERFACE_VERSION >= 12060. Found " + "TBB_INTERFACE_VERSION = %s. The TBB " + "threading layer is disabled.") % tbb_iface_ver + problem = errors.NumbaWarning(msg) + warnings.warn(problem) + raise ImportError("Problem with TBB. Reason: %s" % msg) + except (ValueError, OSError) as e: + # Translate as an ImportError for consistent error class use, this error + # will never materialise + raise ImportError("Problem with TBB. Reason: %s" % e) + + +def _launch_threads(): + if not _backend_init_process_lock: + _set_init_process_lock() + + with _backend_init_process_lock: + with _backend_init_thread_lock: + global _is_initialized + if _is_initialized: + return + + def select_known_backend(backend): + """ + Loads a specific threading layer backend based on string + """ + lib = None + if backend.startswith("tbb"): + try: + # check if TBB is present and compatible + _check_tbb_version_compatible() + # now try and load the backend + from numba.np.ufunc import tbbpool as lib + except ImportError: + pass + elif backend.startswith("omp"): + # TODO: Check that if MKL is present that it is a version + # that understands GNU OMP might be present + try: + from numba.np.ufunc import omppool as lib + except ImportError: + pass + elif backend.startswith("workqueue"): + from numba.np.ufunc import workqueue as lib + else: + msg = "Unknown value specified for threading layer: %s" + raise ValueError(msg % backend) + return lib + + def select_from_backends(backends): + """ + Selects from presented backends and returns the first working + """ + lib = None + for backend in backends: + lib = select_known_backend(backend) + if lib is not None: + break + else: + backend = '' + return lib, backend + + t = str(config.THREADING_LAYER).lower() + namedbackends = config.THREADING_LAYER_PRIORITY + if not (len(namedbackends) == 3 and + set(namedbackends) == {'tbb', 'omp', 'workqueue'}): + raise ValueError( + "THREADING_LAYER_PRIORITY invalid: %s. " + "It must be a permutation of " + "{'tbb', 'omp', 'workqueue'}" + % namedbackends + ) + + lib = None + err_helpers = dict() + err_helpers['TBB'] = ("Intel TBB is required, try:\n" + "$ conda/pip install tbb") + err_helpers['OSX_OMP'] = ("Intel OpenMP is required, try:\n" + "$ conda/pip install intel-openmp") + requirements = [] + + def raise_with_hint(required): + errmsg = "No threading layer could be loaded.\n%s" + hintmsg = "HINT:\n%s" + if len(required) == 0: + hint = '' + if len(required) == 1: + hint = hintmsg % err_helpers[required[0]] + if len(required) > 1: + options = '\nOR\n'.join([err_helpers[x] for x in required]) + hint = hintmsg % ("One of:\n%s" % options) + raise ValueError(errmsg % hint) + + if t in namedbackends: + # Try and load the specific named backend + lib = select_known_backend(t) + if not lib: + # something is missing preventing a valid backend from + # loading, set requirements for hinting + if t == 'tbb': + requirements.append('TBB') + elif t == 'omp' and _IS_OSX: + requirements.append('OSX_OMP') + libname = t + elif t in ['threadsafe', 'forksafe', 'safe']: + # User wants a specific behaviour... + available = ['tbb'] + requirements.append('TBB') + if t == "safe": + # "safe" is TBB, which is fork and threadsafe everywhere + pass + elif t == "threadsafe": + if _IS_OSX: + requirements.append('OSX_OMP') + # omp is threadsafe everywhere + available.append('omp') + elif t == "forksafe": + # everywhere apart from linux (GNU OpenMP) has a guaranteed + # forksafe OpenMP, as OpenMP has better performance, prefer + # this to workqueue + if not _IS_LINUX: + available.append('omp') + if _IS_OSX: + requirements.append('OSX_OMP') + # workqueue is forksafe everywhere + available.append('workqueue') + else: # unreachable + msg = "No threading layer available for purpose %s" + raise ValueError(msg % t) + # select amongst available + lib, libname = select_from_backends(available) + elif t == 'default': + # If default is supplied, try them in order, tbb, omp, + # workqueue + lib, libname = select_from_backends(namedbackends) + if not lib: + # set requirements for hinting + requirements.append('TBB') + if _IS_OSX: + requirements.append('OSX_OMP') + else: + msg = "The threading layer requested '%s' is unknown to Numba." + raise ValueError(msg % t) + + # No lib found, raise and hint + if not lib: + raise_with_hint(requirements) + + ll.add_symbol('numba_parallel_for', lib.parallel_for) + ll.add_symbol('do_scheduling_signed', lib.do_scheduling_signed) + ll.add_symbol('do_scheduling_unsigned', lib.do_scheduling_unsigned) + ll.add_symbol('allocate_sched', lib.allocate_sched) + ll.add_symbol('deallocate_sched', lib.deallocate_sched) + + launch_threads = CFUNCTYPE(None, c_int)(lib.launch_threads) + launch_threads(NUM_THREADS) + + _load_threading_functions(lib) # load late + + # set library name so it can be queried + global _threading_layer + _threading_layer = libname + _is_initialized = True + + +def _load_threading_functions(lib): + + ll.add_symbol('get_num_threads', lib.get_num_threads) + ll.add_symbol('set_num_threads', lib.set_num_threads) + ll.add_symbol('get_thread_id', lib.get_thread_id) + + global _set_num_threads + _set_num_threads = CFUNCTYPE(None, c_int)(lib.set_num_threads) + _set_num_threads(NUM_THREADS) + + global _get_num_threads + _get_num_threads = CFUNCTYPE(c_int)(lib.get_num_threads) + + global _get_thread_id + _get_thread_id = CFUNCTYPE(c_int)(lib.get_thread_id) + + ll.add_symbol('set_parallel_chunksize', lib.set_parallel_chunksize) + ll.add_symbol('get_parallel_chunksize', lib.get_parallel_chunksize) + ll.add_symbol('get_sched_size', lib.get_sched_size) + global _set_parallel_chunksize + _set_parallel_chunksize = CFUNCTYPE(c_uint, + c_uint)(lib.set_parallel_chunksize) + global _get_parallel_chunksize + _get_parallel_chunksize = CFUNCTYPE(c_uint)(lib.get_parallel_chunksize) + global _get_sched_size + _get_sched_size = CFUNCTYPE(c_uint, + c_uint, + c_uint, + POINTER(c_int), + POINTER(c_int))(lib.get_sched_size) + + +# Some helpers to make set_num_threads jittable + +def gen_snt_check(): + from numba.core.config import NUMBA_NUM_THREADS + msg = "The number of threads must be between 1 and %s" % NUMBA_NUM_THREADS + + def snt_check(n): + if n > NUMBA_NUM_THREADS or n < 1: + raise ValueError(msg) + return snt_check + + +snt_check = gen_snt_check() + + +@overload(snt_check) +def ol_snt_check(n): + return snt_check + + +def set_num_threads(n): + """ + Set the number of threads to use for parallel execution. + + By default, all :obj:`numba.config.NUMBA_NUM_THREADS` threads are used. + + This functionality works by masking out threads that are not used. + Therefore, the number of threads *n* must be less than or equal to + :obj:`~.NUMBA_NUM_THREADS`, the total number of threads that are launched. + See its documentation for more details. + + This function can be used inside of a jitted function. + + Parameters + ---------- + n: The number of threads. Must be between 1 and NUMBA_NUM_THREADS. + + See Also + -------- + get_num_threads, numba.config.NUMBA_NUM_THREADS, + numba.config.NUMBA_DEFAULT_NUM_THREADS, :envvar:`NUMBA_NUM_THREADS` + + """ + _launch_threads() + if not isinstance(n, (int, np.integer)): + raise TypeError("The number of threads specified must be an integer") + snt_check(n) + _set_num_threads(n) + + +@overload(set_num_threads) +def ol_set_num_threads(n): + _launch_threads() + if not isinstance(n, types.Integer): + msg = "The number of threads specified must be an integer" + raise errors.TypingError(msg) + + def impl(n): + snt_check(n) + _set_num_threads(n) + return impl + + +def get_num_threads(): + """ + Get the number of threads used for parallel execution. + + By default (if :func:`~.set_num_threads` is never called), all + :obj:`numba.config.NUMBA_NUM_THREADS` threads are used. + + This number is less than or equal to the total number of threads that are + launched, :obj:`numba.config.NUMBA_NUM_THREADS`. + + This function can be used inside of a jitted function. + + Returns + ------- + The number of threads. + + See Also + -------- + set_num_threads, numba.config.NUMBA_NUM_THREADS, + numba.config.NUMBA_DEFAULT_NUM_THREADS, :envvar:`NUMBA_NUM_THREADS` + + """ + _launch_threads() + num_threads = _get_num_threads() + if num_threads <= 0: + raise RuntimeError("Invalid number of threads. " + "This likely indicates a bug in Numba. " + "(thread_id=%s, num_threads=%s)" % + (get_thread_id(), num_threads)) + return num_threads + + +@overload(get_num_threads) +def ol_get_num_threads(): + _launch_threads() + + def impl(): + num_threads = _get_num_threads() + if num_threads <= 0: + print("Broken thread_id: ", get_thread_id()) + print("num_threads: ", num_threads) + raise RuntimeError("Invalid number of threads. " + "This likely indicates a bug in Numba.") + return num_threads + return impl + + +@intrinsic +def _iget_num_threads(typingctx): + _launch_threads() + + def codegen(context, builder, signature, args): + mod = builder.module + fnty = ir.FunctionType(cgutils.intp_t, []) + fn = cgutils.get_or_insert_function(mod, fnty, "get_num_threads") + return builder.call(fn, []) + return signature(types.intp), codegen + + +def get_thread_id(): + """ + Returns a unique ID for each thread in the range 0 (inclusive) + to :func:`~.get_num_threads` (exclusive). + """ + # Called from the interpreter directly, this should return 0 + # Called from a sequential JIT region, this should return 0 + # Called from a parallel JIT region, this should return 0..N + # Called from objmode in a parallel JIT region, this should return 0..N + _launch_threads() + return _get_thread_id() + + +@overload(get_thread_id) +def ol_get_thread_id(): + _launch_threads() + + def impl(): + return _iget_thread_id() + return impl + + +@intrinsic +def _iget_thread_id(typingctx): + def codegen(context, builder, signature, args): + mod = builder.module + fnty = ir.FunctionType(cgutils.intp_t, []) + fn = cgutils.get_or_insert_function(mod, fnty, "get_thread_id") + return builder.call(fn, []) + return signature(types.intp), codegen + + +_DYLD_WORKAROUND_SET = 'NUMBA_DYLD_WORKAROUND' in os.environ +_DYLD_WORKAROUND_VAL = int(os.environ.get('NUMBA_DYLD_WORKAROUND', 0)) + +if _DYLD_WORKAROUND_SET and _DYLD_WORKAROUND_VAL: + _launch_threads() + + +def set_parallel_chunksize(n): + _launch_threads() + if not isinstance(n, (int, np.integer)): + raise TypeError("The parallel chunksize must be an integer") + global _set_parallel_chunksize + if n < 0: + raise ValueError("chunksize must be greater than or equal to zero") + return _set_parallel_chunksize(n) + + +def get_parallel_chunksize(): + _launch_threads() + global _get_parallel_chunksize + return _get_parallel_chunksize() + + +@overload(set_parallel_chunksize) +def ol_set_parallel_chunksize(n): + _launch_threads() + if not isinstance(n, types.Integer): + msg = "The parallel chunksize must be an integer" + raise errors.TypingError(msg) + + def impl(n): + if n < 0: + raise ValueError("chunksize must be greater than or equal to zero") + return _set_parallel_chunksize(n) + return impl + + +@overload(get_parallel_chunksize) +def ol_get_parallel_chunksize(): + _launch_threads() + + def impl(): + return _get_parallel_chunksize() + return impl diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/sigparse.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/sigparse.py new file mode 100644 index 0000000000000000000000000000000000000000..67ca346c903e578653cd72a861d05ee5462c2ab3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/sigparse.py @@ -0,0 +1,63 @@ +import tokenize +import string + + +def parse_signature(sig): + '''Parse generalized ufunc signature. + + NOTE: ',' (COMMA) is a delimiter; not separator. + This means trailing comma is legal. + ''' + def stripws(s): + return ''.join(c for c in s if c not in string.whitespace) + + def tokenizer(src): + def readline(): + yield src + gen = readline() + return tokenize.generate_tokens(lambda: next(gen)) + + def parse(src): + tokgen = tokenizer(src) + while True: + tok = next(tokgen) + if tok[1] == '(': + symbols = [] + while True: + tok = next(tokgen) + if tok[1] == ')': + break + elif tok[0] == tokenize.NAME: + symbols.append(tok[1]) + elif tok[1] == ',': + continue + else: + raise ValueError('bad token in signature "%s"' % tok[1]) + yield tuple(symbols) + tok = next(tokgen) + if tok[1] == ',': + continue + elif tokenize.ISEOF(tok[0]): + break + elif tokenize.ISEOF(tok[0]): + break + else: + raise ValueError('bad token in signature "%s"' % tok[1]) + + ins, _, outs = stripws(sig).partition('->') + inputs = list(parse(ins)) + outputs = list(parse(outs)) + + # check that all output symbols are defined in the inputs + isym = set() + osym = set() + for grp in inputs: + isym |= set(grp) + for grp in outputs: + osym |= set(grp) + + diff = osym.difference(isym) + if diff: + raise NameError('undefined output symbols: %s' % ','.join(sorted(diff))) + + return inputs, outputs diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/ufunc_base.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/ufunc_base.py new file mode 100644 index 0000000000000000000000000000000000000000..6864b724eda599f0e3b252006f9be37139ae35a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/ufunc_base.py @@ -0,0 +1,113 @@ +from numba.np import numpy_support +from numba.core import types + + +class UfuncLowererBase: + '''Callable class responsible for lowering calls to a specific gufunc. + ''' + def __init__(self, ufunc, make_kernel_fn, make_ufunc_kernel_fn): + self.ufunc = ufunc + self.make_ufunc_kernel_fn = make_ufunc_kernel_fn + self.kernel = make_kernel_fn(ufunc) + self.libs = [] + + def __call__(self, context, builder, sig, args): + return self.make_ufunc_kernel_fn(context, builder, sig, args, + self.ufunc, self.kernel) + + +class UfuncBase: + + @property + def nin(self): + return self.ufunc.nin + + @property + def nout(self): + return self.ufunc.nout + + @property + def nargs(self): + return self.ufunc.nargs + + @property + def ntypes(self): + return self.ufunc.ntypes + + @property + def types(self): + return self.ufunc.types + + @property + def identity(self): + return self.ufunc.identity + + @property + def signature(self): + return self.ufunc.signature + + @property + def accumulate(self): + return self.ufunc.accumulate + + @property + def at(self): + return self.ufunc.at + + @property + def outer(self): + return self.ufunc.outer + + @property + def reduce(self): + return self.ufunc.reduce + + @property + def reduceat(self): + return self.ufunc.reduceat + + def disable_compile(self): + """ + Disable the compilation of new signatures at call time. + """ + # If disabling compilation then there must be at least one signature + assert len(self._dispatcher.overloads) > 0 + self._frozen = True + + def _install_cg(self, targetctx=None): + """ + Install an implementation function for a GUFunc/DUFunc object in the + given target context. If no target context is given, then + _install_cg() installs into the target context of the + dispatcher object (should be same default context used by + jit() and njit()). + """ + if targetctx is None: + targetctx = self._dispatcher.targetdescr.target_context + _any = types.Any + _arr = types.Array + # Either all outputs are explicit or none of them are + sig0 = (_any,) * self.ufunc.nin + (_arr,) * self.ufunc.nout + sig1 = (_any,) * self.ufunc.nin + targetctx.insert_func_defn( + [(self._lower_me, self, sig) for sig in (sig0, sig1)]) + + def find_ewise_function(self, ewise_types): + """ + Given a tuple of element-wise argument types, find a matching + signature in the dispatcher. + + Return a 2-tuple containing the matching signature, and + compilation result. Will return two None's if no matching + signature was found. + """ + if self._frozen: + # If we cannot compile, coerce to the best matching loop + loop = numpy_support.ufunc_find_matching_loop(self, ewise_types) + if loop is None: + return None, None + ewise_types = tuple(loop.inputs + loop.outputs)[:len(ewise_types)] + for sig, cres in self._dispatcher.overloads.items(): + if self.match_signature(ewise_types, sig): + return sig, cres + return None, None diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/ufuncbuilder.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/ufuncbuilder.py new file mode 100644 index 0000000000000000000000000000000000000000..e23ec229e5d8a53441281924103f4a75b75c574e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/ufuncbuilder.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- + +import inspect +import warnings +from contextlib import contextmanager + +from numba.core import config, targetconfig +from numba.core.decorators import jit +from numba.core.descriptors import TargetDescriptor +from numba.core.extending import is_jitted +from numba.core.errors import NumbaDeprecationWarning +from numba.core.options import TargetOptions, include_default_options +from numba.core.registry import cpu_target +from numba.core.target_extension import dispatcher_registry, target_registry +from numba.core import utils, types, serialize, compiler, sigutils +from numba.np.numpy_support import as_dtype +from numba.np.ufunc import _internal +from numba.np.ufunc.sigparse import parse_signature +from numba.np.ufunc.wrappers import build_ufunc_wrapper, build_gufunc_wrapper +from numba.core.caching import FunctionCache, NullCache +from numba.core.compiler_lock import global_compiler_lock + + +_options_mixin = include_default_options( + "nopython", + "forceobj", + "boundscheck", + "fastmath", + "writable_args" +) + + +class UFuncTargetOptions(_options_mixin, TargetOptions): + + def finalize(self, flags, options): + if not flags.is_set("enable_pyobject"): + flags.enable_pyobject = True + + if not flags.is_set("enable_looplift"): + flags.enable_looplift = True + + flags.inherit_if_not_set("nrt", default=True) + + if not flags.is_set("debuginfo"): + flags.debuginfo = config.DEBUGINFO_DEFAULT + + if not flags.is_set("boundscheck"): + flags.boundscheck = flags.debuginfo + + flags.enable_pyobject_looplift = True + + flags.inherit_if_not_set("fastmath") + + +class UFuncTarget(TargetDescriptor): + options = UFuncTargetOptions + + def __init__(self): + super().__init__('ufunc') + + @property + def typing_context(self): + return cpu_target.typing_context + + @property + def target_context(self): + return cpu_target.target_context + + +ufunc_target = UFuncTarget() + + +class UFuncDispatcher(serialize.ReduceMixin): + """ + An object handling compilation of various signatures for a ufunc. + """ + targetdescr = ufunc_target + + def __init__(self, py_func, locals={}, targetoptions={}): + self.py_func = py_func + self.overloads = utils.UniqueDict() + self.targetoptions = targetoptions + self.locals = locals + self.cache = NullCache() + + def _reduce_states(self): + """ + NOTE: part of ReduceMixin protocol + """ + return dict( + pyfunc=self.py_func, + locals=self.locals, + targetoptions=self.targetoptions, + ) + + @classmethod + def _rebuild(cls, pyfunc, locals, targetoptions): + """ + NOTE: part of ReduceMixin protocol + """ + return cls(py_func=pyfunc, locals=locals, targetoptions=targetoptions) + + def enable_caching(self): + self.cache = FunctionCache(self.py_func) + + def compile(self, sig, locals={}, **targetoptions): + locs = self.locals.copy() + locs.update(locals) + + topt = self.targetoptions.copy() + topt.update(targetoptions) + + flags = compiler.Flags() + self.targetdescr.options.parse_as_flags(flags, topt) + + flags.no_cpython_wrapper = True + flags.error_model = "numpy" + # Disable loop lifting + # The feature requires a real + # python function + flags.enable_looplift = False + + return self._compile_core(sig, flags, locals) + + def _compile_core(self, sig, flags, locals): + """ + Trigger the compiler on the core function or load a previously + compiled version from the cache. Returns the CompileResult. + """ + typingctx = self.targetdescr.typing_context + targetctx = self.targetdescr.target_context + + @contextmanager + def store_overloads_on_success(): + # use to ensure overloads are stored on success + try: + yield + except Exception: + raise + else: + exists = self.overloads.get(cres.signature) + if exists is None: + self.overloads[cres.signature] = cres + + # Use cache and compiler in a critical section + with global_compiler_lock: + with targetconfig.ConfigStack().enter(flags.copy()): + with store_overloads_on_success(): + # attempt look up of existing + cres = self.cache.load_overload(sig, targetctx) + if cres is not None: + return cres + + # Compile + args, return_type = sigutils.normalize_signature(sig) + cres = compiler.compile_extra(typingctx, targetctx, + self.py_func, args=args, + return_type=return_type, + flags=flags, locals=locals) + + # cache lookup failed before so safe to save + self.cache.save_overload(sig, cres) + + return cres + + +dispatcher_registry[target_registry['npyufunc']] = UFuncDispatcher + + +# Utility functions + +def _compile_element_wise_function(nb_func, targetoptions, sig): + # Do compilation + # Return CompileResult to test + cres = nb_func.compile(sig, **targetoptions) + args, return_type = sigutils.normalize_signature(sig) + return cres, args, return_type + + +def _finalize_ufunc_signature(cres, args, return_type): + '''Given a compilation result, argument types, and a return type, + build a valid Numba signature after validating that it doesn't + violate the constraints for the compilation mode. + ''' + if return_type is None: + if cres.objectmode: + # Object mode is used and return type is not specified + raise TypeError("return type must be specified for object mode") + else: + return_type = cres.signature.return_type + + assert return_type != types.pyobject + return return_type(*args) + + +def _build_element_wise_ufunc_wrapper(cres, signature): + '''Build a wrapper for the ufunc loop entry point given by the + compilation result object, using the element-wise signature. + ''' + ctx = cres.target_context + library = cres.library + fname = cres.fndesc.llvm_func_name + + with global_compiler_lock: + info = build_ufunc_wrapper(library, ctx, fname, signature, + cres.objectmode, cres) + ptr = info.library.get_pointer_to_function(info.name) + # Get dtypes + dtypenums = [as_dtype(a).num for a in signature.args] + dtypenums.append(as_dtype(signature.return_type).num) + return dtypenums, ptr, cres.environment + + +_identities = { + 0: _internal.PyUFunc_Zero, + 1: _internal.PyUFunc_One, + None: _internal.PyUFunc_None, + "reorderable": _internal.PyUFunc_ReorderableNone, +} + + +def parse_identity(identity): + """ + Parse an identity value and return the corresponding low-level value + for Numpy. + """ + try: + identity = _identities[identity] + except KeyError: + raise ValueError("Invalid identity value %r" % (identity,)) + return identity + + +@contextmanager +def _suppress_deprecation_warning_nopython_not_supplied(): + """This suppresses the NumbaDeprecationWarning that occurs through the use + of `jit` without the `nopython` kwarg. This use of `jit` occurs in a few + places in the `{g,}ufunc` mechanism in Numba, predominantly to wrap the + "kernel" function.""" + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', + category=NumbaDeprecationWarning, + message=(".*The 'nopython' keyword argument " + "was not supplied*"),) + yield + + +# Class definitions + +class _BaseUFuncBuilder(object): + + def add(self, sig=None): + if hasattr(self, 'targetoptions'): + targetoptions = self.targetoptions + else: + targetoptions = self.nb_func.targetoptions + cres, args, return_type = _compile_element_wise_function( + self.nb_func, targetoptions, sig) + sig = self._finalize_signature(cres, args, return_type) + self._sigs.append(sig) + self._cres[sig] = cres + return cres + + def disable_compile(self): + """ + Disable the compilation of new signatures at call time. + """ + # Override this for implementations that support lazy compilation + + +class UFuncBuilder(_BaseUFuncBuilder): + + def __init__(self, py_func, identity=None, cache=False, targetoptions={}): + if is_jitted(py_func): + py_func = py_func.py_func + self.py_func = py_func + self.identity = parse_identity(identity) + with _suppress_deprecation_warning_nopython_not_supplied(): + self.nb_func = jit(_target='npyufunc', + cache=cache, + **targetoptions)(py_func) + self._sigs = [] + self._cres = {} + + def _finalize_signature(self, cres, args, return_type): + '''Slated for deprecation, use ufuncbuilder._finalize_ufunc_signature() + instead. + ''' + return _finalize_ufunc_signature(cres, args, return_type) + + def build_ufunc(self): + with global_compiler_lock: + dtypelist = [] + ptrlist = [] + if not self.nb_func: + raise TypeError("No definition") + + # Get signature in the order they are added + keepalive = [] + cres = None + for sig in self._sigs: + cres = self._cres[sig] + dtypenums, ptr, env = self.build(cres, sig) + dtypelist.append(dtypenums) + ptrlist.append(int(ptr)) + keepalive.append((cres.library, env)) + + datlist = [None] * len(ptrlist) + + if cres is None: + argspec = inspect.getfullargspec(self.py_func) + inct = len(argspec.args) + else: + inct = len(cres.signature.args) + outct = 1 + + # Becareful that fromfunc does not provide full error checking yet. + # If typenum is out-of-bound, we have nasty memory corruptions. + # For instance, -1 for typenum will cause segfault. + # If elements of type-list (2nd arg) is tuple instead, + # there will also memory corruption. (Seems like code rewrite.) + ufunc = _internal.fromfunc( + self.py_func.__name__, self.py_func.__doc__, + ptrlist, dtypelist, inct, outct, datlist, + keepalive, self.identity, + ) + + return ufunc + + def build(self, cres, signature): + '''Slated for deprecation, use + ufuncbuilder._build_element_wise_ufunc_wrapper(). + ''' + return _build_element_wise_ufunc_wrapper(cres, signature) + + +class GUFuncBuilder(_BaseUFuncBuilder): + + # TODO handle scalar + def __init__(self, py_func, signature, identity=None, cache=False, + targetoptions={}, writable_args=()): + self.py_func = py_func + self.identity = parse_identity(identity) + with _suppress_deprecation_warning_nopython_not_supplied(): + self.nb_func = jit(_target='npyufunc', cache=cache)(py_func) + self.signature = signature + self.sin, self.sout = parse_signature(signature) + self.targetoptions = targetoptions + self.cache = cache + self._sigs = [] + self._cres = {} + + transform_arg = _get_transform_arg(py_func) + self.writable_args = tuple([transform_arg(a) for a in writable_args]) + + def _finalize_signature(self, cres, args, return_type): + if not cres.objectmode and cres.signature.return_type != types.void: + raise TypeError("gufunc kernel must have void return type") + + if return_type is None: + return_type = types.void + + return return_type(*args) + + @global_compiler_lock + def build_ufunc(self): + type_list = [] + func_list = [] + if not self.nb_func: + raise TypeError("No definition") + + # Get signature in the order they are added + keepalive = [] + for sig in self._sigs: + cres = self._cres[sig] + dtypenums, ptr, env = self.build(cres) + type_list.append(dtypenums) + func_list.append(int(ptr)) + keepalive.append((cres.library, env)) + + datalist = [None] * len(func_list) + + nin = len(self.sin) + nout = len(self.sout) + + # Pass envs to fromfuncsig to bind to the lifetime of the ufunc object + ufunc = _internal.fromfunc( + self.py_func.__name__, self.py_func.__doc__, + func_list, type_list, nin, nout, datalist, + keepalive, self.identity, self.signature, self.writable_args + ) + return ufunc + + def build(self, cres): + """ + Returns (dtype numbers, function ptr, EnvironmentObject) + """ + # Builder wrapper for ufunc entry point + signature = cres.signature + info = build_gufunc_wrapper( + self.py_func, cres, self.sin, self.sout, + cache=self.cache, is_parfors=False, + ) + + env = info.env + ptr = info.library.get_pointer_to_function(info.name) + # Get dtypes + dtypenums = [] + for a in signature.args: + if isinstance(a, types.Array): + ty = a.dtype + else: + ty = a + dtypenums.append(as_dtype(ty).num) + return dtypenums, ptr, env + + +def _get_transform_arg(py_func): + """Return function that transform arg into index""" + args = inspect.getfullargspec(py_func).args + pos_by_arg = {arg: i for i, arg in enumerate(args)} + + def transform_arg(arg): + if isinstance(arg, int): + return arg + + try: + return pos_by_arg[arg] + except KeyError: + msg = (f"Specified writable arg {arg} not found in arg list " + f"{args} for function {py_func.__qualname__}") + raise RuntimeError(msg) + + return transform_arg diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc/wrappers.py b/venv/lib/python3.10/site-packages/numba/np/ufunc/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..ef37c40770aeed73253b6e0709ad5c4f5a8de352 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc/wrappers.py @@ -0,0 +1,743 @@ +from collections import namedtuple + +import numpy as np + +from llvmlite.ir import Constant, IRBuilder +from llvmlite import ir + +from numba.core import types, cgutils +from numba.core.compiler_lock import global_compiler_lock +from numba.core.caching import make_library_cache, NullCache + + +_wrapper_info = namedtuple('_wrapper_info', ['library', 'env', 'name']) + + +def _build_ufunc_loop_body(load, store, context, func, builder, arrays, out, + offsets, store_offset, signature, pyapi, env): + elems = load() + + # Compute + status, retval = context.call_conv.call_function(builder, func, + signature.return_type, + signature.args, elems) + + # Store + with builder.if_else(status.is_ok, likely=True) as (if_ok, if_error): + with if_ok: + store(retval) + with if_error: + gil = pyapi.gil_ensure() + context.call_conv.raise_error(builder, pyapi, status) + pyapi.gil_release(gil) + + # increment indices + for off, ary in zip(offsets, arrays): + builder.store(builder.add(builder.load(off), ary.step), off) + + builder.store(builder.add(builder.load(store_offset), out.step), + store_offset) + + return status.code + + +def _build_ufunc_loop_body_objmode(load, store, context, func, builder, + arrays, out, offsets, store_offset, + signature, env, pyapi): + elems = load() + + # Compute + _objargs = [types.pyobject] * len(signature.args) + # We need to push the error indicator to avoid it messing with + # the ufunc's execution. We restore it unless the ufunc raised + # a new error. + with pyapi.err_push(keep_new=True): + status, retval = context.call_conv.call_function(builder, func, + types.pyobject, + _objargs, elems) + # Release owned reference to arguments + for elem in elems: + pyapi.decref(elem) + # NOTE: if an error occurred, it will be caught by the Numpy machinery + + # Store + store(retval) + + # increment indices + for off, ary in zip(offsets, arrays): + builder.store(builder.add(builder.load(off), ary.step), off) + + builder.store(builder.add(builder.load(store_offset), out.step), + store_offset) + + return status.code + + +def build_slow_loop_body(context, func, builder, arrays, out, offsets, + store_offset, signature, pyapi, env): + def load(): + elems = [ary.load_direct(builder.load(off)) + for off, ary in zip(offsets, arrays)] + return elems + + def store(retval): + out.store_direct(retval, builder.load(store_offset)) + + return _build_ufunc_loop_body(load, store, context, func, builder, arrays, + out, offsets, store_offset, signature, pyapi, + env=env) + + +def build_obj_loop_body(context, func, builder, arrays, out, offsets, + store_offset, signature, pyapi, envptr, env): + env_body = context.get_env_body(builder, envptr) + env_manager = pyapi.get_env_manager(env, env_body, envptr) + + def load(): + # Load + elems = [ary.load_direct(builder.load(off)) + for off, ary in zip(offsets, arrays)] + # Box + elems = [pyapi.from_native_value(t, v, env_manager) + for v, t in zip(elems, signature.args)] + return elems + + def store(retval): + is_ok = cgutils.is_not_null(builder, retval) + # If an error is raised by the object mode ufunc, it will + # simply get caught by the Numpy ufunc machinery. + with builder.if_then(is_ok, likely=True): + # Unbox + native = pyapi.to_native_value(signature.return_type, retval) + assert native.cleanup is None + # Store + out.store_direct(native.value, builder.load(store_offset)) + # Release owned reference + pyapi.decref(retval) + + return _build_ufunc_loop_body_objmode(load, store, context, func, builder, + arrays, out, offsets, store_offset, + signature, envptr, pyapi) + + +def build_fast_loop_body(context, func, builder, arrays, out, offsets, + store_offset, signature, ind, pyapi, env): + def load(): + elems = [ary.load_aligned(ind) + for ary in arrays] + return elems + + def store(retval): + out.store_aligned(retval, ind) + + return _build_ufunc_loop_body(load, store, context, func, builder, arrays, + out, offsets, store_offset, signature, pyapi, + env=env) + + +def build_ufunc_wrapper(library, context, fname, signature, objmode, cres): + """ + Wrap the scalar function with a loop that iterates over the arguments + + Returns + ------- + (library, env, name) + """ + assert isinstance(fname, str) + byte_t = ir.IntType(8) + byte_ptr_t = ir.PointerType(byte_t) + byte_ptr_ptr_t = ir.PointerType(byte_ptr_t) + intp_t = context.get_value_type(types.intp) + intp_ptr_t = ir.PointerType(intp_t) + + fnty = ir.FunctionType(ir.VoidType(), [byte_ptr_ptr_t, intp_ptr_t, + intp_ptr_t, byte_ptr_t]) + + wrapperlib = context.codegen().create_library('ufunc_wrapper') + wrapper_module = wrapperlib.create_ir_module('') + if objmode: + func_type = context.call_conv.get_function_type( + types.pyobject, [types.pyobject] * len(signature.args)) + else: + func_type = context.call_conv.get_function_type( + signature.return_type, signature.args) + + func = ir.Function(wrapper_module, func_type, name=fname) + func.attributes.add("alwaysinline") + + wrapper = ir.Function(wrapper_module, fnty, "__ufunc__." + func.name) + arg_args, arg_dims, arg_steps, arg_data = wrapper.args + arg_args.name = "args" + arg_dims.name = "dims" + arg_steps.name = "steps" + arg_data.name = "data" + + builder = IRBuilder(wrapper.append_basic_block("entry")) + + # Prepare Environment + envname = context.get_env_name(cres.fndesc) + env = cres.environment + envptr = builder.load(context.declare_env_global(builder.module, envname)) + + # Emit loop + loopcount = builder.load(arg_dims, name="loopcount") + + # Prepare inputs + arrays = [] + for i, typ in enumerate(signature.args): + arrays.append(UArrayArg(context, builder, arg_args, arg_steps, i, typ)) + + # Prepare output + out = UArrayArg(context, builder, arg_args, arg_steps, len(arrays), + signature.return_type) + + # Setup indices + offsets = [] + zero = context.get_constant(types.intp, 0) + for _ in arrays: + p = cgutils.alloca_once(builder, intp_t) + offsets.append(p) + builder.store(zero, p) + + store_offset = cgutils.alloca_once(builder, intp_t) + builder.store(zero, store_offset) + + unit_strided = cgutils.true_bit + for ary in arrays: + unit_strided = builder.and_(unit_strided, ary.is_unit_strided) + + pyapi = context.get_python_api(builder) + if objmode: + # General loop + gil = pyapi.gil_ensure() + with cgutils.for_range(builder, loopcount, intp=intp_t): + build_obj_loop_body( + context, func, builder, arrays, out, offsets, + store_offset, signature, pyapi, envptr, env, + ) + pyapi.gil_release(gil) + builder.ret_void() + + else: + with builder.if_else(unit_strided) as (is_unit_strided, is_strided): + with is_unit_strided: + with cgutils.for_range(builder, loopcount, intp=intp_t) as loop: + build_fast_loop_body( + context, func, builder, arrays, out, offsets, + store_offset, signature, loop.index, pyapi, + env=envptr, + ) + + with is_strided: + # General loop + with cgutils.for_range(builder, loopcount, intp=intp_t): + build_slow_loop_body( + context, func, builder, arrays, out, offsets, + store_offset, signature, pyapi, + env=envptr, + ) + + builder.ret_void() + del builder + + # Link and finalize + wrapperlib.add_ir_module(wrapper_module) + wrapperlib.add_linking_library(library) + return _wrapper_info(library=wrapperlib, env=env, name=wrapper.name) + + +class UArrayArg(object): + def __init__(self, context, builder, args, steps, i, fe_type): + self.context = context + self.builder = builder + self.fe_type = fe_type + offset = self.context.get_constant(types.intp, i) + offseted_args = self.builder.load(builder.gep(args, [offset])) + data_type = context.get_data_type(fe_type) + self.dataptr = self.builder.bitcast(offseted_args, + data_type.as_pointer()) + sizeof = self.context.get_abi_sizeof(data_type) + self.abisize = self.context.get_constant(types.intp, sizeof) + offseted_step = self.builder.gep(steps, [offset]) + self.step = self.builder.load(offseted_step) + self.is_unit_strided = builder.icmp_unsigned('==', + self.abisize, self.step) + self.builder = builder + + def load_direct(self, byteoffset): + """ + Generic load from the given *byteoffset*. load_aligned() is + preferred if possible. + """ + ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) + return self.context.unpack_value(self.builder, self.fe_type, ptr) + + def load_aligned(self, ind): + # Using gep() instead of explicit pointer addition helps LLVM + # vectorize the loop. + ptr = self.builder.gep(self.dataptr, [ind]) + return self.context.unpack_value(self.builder, self.fe_type, ptr) + + def store_direct(self, value, byteoffset): + ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset) + self.context.pack_value(self.builder, self.fe_type, value, ptr) + + def store_aligned(self, value, ind): + ptr = self.builder.gep(self.dataptr, [ind]) + self.context.pack_value(self.builder, self.fe_type, value, ptr) + + +GufWrapperCache = make_library_cache('guf') + + +class _GufuncWrapper(object): + def __init__(self, py_func, cres, sin, sout, cache, is_parfors): + """ + The *is_parfors* argument is a boolean that indicates if the GUfunc + being built is to be used as a ParFors kernel. If True, it disables + the caching on the wrapper as a separate unit because it will be linked + into the caller function and cached along with it. + """ + self.py_func = py_func + self.cres = cres + self.sin = sin + self.sout = sout + self.is_objectmode = self.signature.return_type == types.pyobject + self.cache = (GufWrapperCache(py_func=self.py_func) + if cache else NullCache()) + self.is_parfors = bool(is_parfors) + + @property + def library(self): + return self.cres.library + + @property + def context(self): + return self.cres.target_context + + @property + def call_conv(self): + return self.context.call_conv + + @property + def signature(self): + return self.cres.signature + + @property + def fndesc(self): + return self.cres.fndesc + + @property + def env(self): + return self.cres.environment + + def _wrapper_function_type(self): + byte_t = ir.IntType(8) + byte_ptr_t = ir.PointerType(byte_t) + byte_ptr_ptr_t = ir.PointerType(byte_ptr_t) + intp_t = self.context.get_value_type(types.intp) + intp_ptr_t = ir.PointerType(intp_t) + + fnty = ir.FunctionType(ir.VoidType(), [byte_ptr_ptr_t, intp_ptr_t, + intp_ptr_t, byte_ptr_t]) + return fnty + + def _build_wrapper(self, library, name): + """ + The LLVM IRBuilder code to create the gufunc wrapper. + The *library* arg is the CodeLibrary to which the wrapper should + be added. The *name* arg is the name of the wrapper function being + created. + """ + intp_t = self.context.get_value_type(types.intp) + fnty = self._wrapper_function_type() + + wrapper_module = library.create_ir_module('_gufunc_wrapper') + func_type = self.call_conv.get_function_type(self.fndesc.restype, + self.fndesc.argtypes) + fname = self.fndesc.llvm_func_name + func = ir.Function(wrapper_module, func_type, name=fname) + + func.attributes.add("alwaysinline") + wrapper = ir.Function(wrapper_module, fnty, name) + # The use of weak_odr linkage avoids the function being dropped due + # to the order in which the wrappers and the user function are linked. + wrapper.linkage = 'weak_odr' + arg_args, arg_dims, arg_steps, arg_data = wrapper.args + arg_args.name = "args" + arg_dims.name = "dims" + arg_steps.name = "steps" + arg_data.name = "data" + + builder = IRBuilder(wrapper.append_basic_block("entry")) + loopcount = builder.load(arg_dims, name="loopcount") + pyapi = self.context.get_python_api(builder) + + # Unpack shapes + unique_syms = set() + for grp in (self.sin, self.sout): + for syms in grp: + unique_syms |= set(syms) + + sym_map = {} + for syms in self.sin: + for s in syms: + if s not in sym_map: + sym_map[s] = len(sym_map) + + sym_dim = {} + for s, i in sym_map.items(): + sym_dim[s] = builder.load(builder.gep(arg_dims, + [self.context.get_constant( + types.intp, + i + 1)])) + + # Prepare inputs + arrays = [] + step_offset = len(self.sin) + len(self.sout) + for i, (typ, sym) in enumerate(zip(self.signature.args, + self.sin + self.sout)): + ary = GUArrayArg(self.context, builder, arg_args, + arg_steps, i, step_offset, typ, sym, sym_dim) + step_offset += len(sym) + arrays.append(ary) + + bbreturn = builder.append_basic_block('.return') + + # Prologue + self.gen_prologue(builder, pyapi) + + # Loop + with cgutils.for_range(builder, loopcount, intp=intp_t) as loop: + args = [a.get_array_at_offset(loop.index) for a in arrays] + innercall, error = self.gen_loop_body(builder, pyapi, func, args) + # If error, escape + cgutils.cbranch_or_continue(builder, error, bbreturn) + + builder.branch(bbreturn) + builder.position_at_end(bbreturn) + + # Epilogue + self.gen_epilogue(builder, pyapi) + + builder.ret_void() + + # Link + library.add_ir_module(wrapper_module) + library.add_linking_library(self.library) + + def _compile_wrapper(self, wrapper_name): + # Gufunc created by Parfors? + if self.is_parfors: + # No wrapper caching for parfors + wrapperlib = self.context.codegen().create_library(str(self)) + # Build wrapper + self._build_wrapper(wrapperlib, wrapper_name) + # Non-parfors? + else: + # Use cache and compiler in a critical section + wrapperlib = self.cache.load_overload( + self.cres.signature, self.cres.target_context, + ) + if wrapperlib is None: + # Create library and enable caching + wrapperlib = self.context.codegen().create_library(str(self)) + wrapperlib.enable_object_caching() + # Build wrapper + self._build_wrapper(wrapperlib, wrapper_name) + # Cache + self.cache.save_overload(self.cres.signature, wrapperlib) + + return wrapperlib + + @global_compiler_lock + def build(self): + wrapper_name = "__gufunc__." + self.fndesc.mangled_name + wrapperlib = self._compile_wrapper(wrapper_name) + return _wrapper_info( + library=wrapperlib, env=self.env, name=wrapper_name, + ) + + def gen_loop_body(self, builder, pyapi, func, args): + status, retval = self.call_conv.call_function( + builder, func, self.signature.return_type, self.signature.args, + args) + + with builder.if_then(status.is_error, likely=False): + gil = pyapi.gil_ensure() + self.context.call_conv.raise_error(builder, pyapi, status) + pyapi.gil_release(gil) + + return status.code, status.is_error + + def gen_prologue(self, builder, pyapi): + pass # Do nothing + + def gen_epilogue(self, builder, pyapi): + pass # Do nothing + + +class _GufuncObjectWrapper(_GufuncWrapper): + def gen_loop_body(self, builder, pyapi, func, args): + innercall, error = _prepare_call_to_object_mode(self.context, + builder, pyapi, func, + self.signature, + args) + return innercall, error + + def gen_prologue(self, builder, pyapi): + # Acquire the GIL + self.gil = pyapi.gil_ensure() + + def gen_epilogue(self, builder, pyapi): + # Release GIL + pyapi.gil_release(self.gil) + + +def build_gufunc_wrapper(py_func, cres, sin, sout, cache, is_parfors): + signature = cres.signature + wrapcls = (_GufuncObjectWrapper + if signature.return_type == types.pyobject + else _GufuncWrapper) + return wrapcls( + py_func, cres, sin, sout, cache, is_parfors=is_parfors, + ).build() + + +def _prepare_call_to_object_mode(context, builder, pyapi, func, + signature, args): + mod = builder.module + + bb_core_return = builder.append_basic_block('ufunc.core.return') + + # Call to + # PyObject* ndarray_new(int nd, + # npy_intp *dims, /* shape */ + # npy_intp *strides, + # void* data, + # int type_num, + # int itemsize) + + ll_int = context.get_value_type(types.int32) + ll_intp = context.get_value_type(types.intp) + ll_intp_ptr = ir.PointerType(ll_intp) + ll_voidptr = context.get_value_type(types.voidptr) + ll_pyobj = context.get_value_type(types.pyobject) + fnty = ir.FunctionType(ll_pyobj, [ll_int, ll_intp_ptr, + ll_intp_ptr, ll_voidptr, + ll_int, ll_int]) + + fn_array_new = cgutils.get_or_insert_function(mod, fnty, + "numba_ndarray_new") + + # Convert each llarray into pyobject + error_pointer = cgutils.alloca_once(builder, ir.IntType(1), name='error') + builder.store(cgutils.true_bit, error_pointer) + + # The PyObject* arguments to the kernel function + object_args = [] + object_pointers = [] + + for i, (arg, argty) in enumerate(zip(args, signature.args)): + # Allocate NULL-initialized slot for this argument + objptr = cgutils.alloca_once(builder, ll_pyobj, zfill=True) + object_pointers.append(objptr) + + if isinstance(argty, types.Array): + # Special case arrays: we don't need full-blown NRT reflection + # since the argument will be gone at the end of the kernel + arycls = context.make_array(argty) + array = arycls(context, builder, value=arg) + + zero = Constant(ll_int, 0) + + # Extract members of the llarray + nd = Constant(ll_int, argty.ndim) + dims = builder.gep(array._get_ptr_by_name('shape'), [zero, zero]) + strides = builder.gep(array._get_ptr_by_name('strides'), + [zero, zero]) + data = builder.bitcast(array.data, ll_voidptr) + dtype = np.dtype(str(argty.dtype)) + + # Prepare other info for reconstruction of the PyArray + type_num = Constant(ll_int, dtype.num) + itemsize = Constant(ll_int, dtype.itemsize) + + # Call helper to reconstruct PyArray objects + obj = builder.call(fn_array_new, [nd, dims, strides, data, + type_num, itemsize]) + else: + # Other argument types => use generic boxing + obj = pyapi.from_native_value(argty, arg) + + builder.store(obj, objptr) + object_args.append(obj) + + obj_is_null = cgutils.is_null(builder, obj) + builder.store(obj_is_null, error_pointer) + cgutils.cbranch_or_continue(builder, obj_is_null, bb_core_return) + + # Call ufunc core function + object_sig = [types.pyobject] * len(object_args) + + status, retval = context.call_conv.call_function( + builder, func, types.pyobject, object_sig, + object_args) + builder.store(status.is_error, error_pointer) + + # Release returned object + pyapi.decref(retval) + + builder.branch(bb_core_return) + # At return block + builder.position_at_end(bb_core_return) + + # Release argument objects + for objptr in object_pointers: + pyapi.decref(builder.load(objptr)) + + innercall = status.code + return innercall, builder.load(error_pointer) + + +class GUArrayArg(object): + def __init__(self, context, builder, args, steps, i, step_offset, + typ, syms, sym_dim): + + self.context = context + self.builder = builder + + offset = context.get_constant(types.intp, i) + + data = builder.load(builder.gep(args, [offset], name="data.ptr"), + name="data") + self.data = data + + core_step_ptr = builder.gep(steps, [offset], name="core.step.ptr") + core_step = builder.load(core_step_ptr) + + if isinstance(typ, types.Array): + as_scalar = not syms + + # number of symbol in the shape spec should match the dimension + # of the array type. + if len(syms) != typ.ndim: + if len(syms) == 0 and typ.ndim == 1: + # This is an exception for handling scalar argument. + # The type can be 1D array for scalar. + # In the future, we may deprecate this exception. + pass + else: + raise TypeError("type and shape signature mismatch for arg " + "#{0}".format(i + 1)) + + ndim = typ.ndim + shape = [sym_dim[s] for s in syms] + strides = [] + + for j in range(ndim): + stepptr = builder.gep(steps, + [context.get_constant(types.intp, + step_offset + j)], + name="step.ptr") + step = builder.load(stepptr) + strides.append(step) + + ldcls = (_ArrayAsScalarArgLoader + if as_scalar + else _ArrayArgLoader) + + self._loader = ldcls(dtype=typ.dtype, + ndim=ndim, + core_step=core_step, + as_scalar=as_scalar, + shape=shape, + strides=strides) + else: + # If typ is not an array + if syms: + raise TypeError("scalar type {0} given for non scalar " + "argument #{1}".format(typ, i + 1)) + self._loader = _ScalarArgLoader(dtype=typ, stride=core_step) + + def get_array_at_offset(self, ind): + return self._loader.load(context=self.context, builder=self.builder, + data=self.data, ind=ind) + + +class _ScalarArgLoader(object): + """ + Handle GFunc argument loading where a scalar type is used in the core + function. + Note: It still has a stride because the input to the gufunc can be an array + for this argument. + """ + + def __init__(self, dtype, stride): + self.dtype = dtype + self.stride = stride + + def load(self, context, builder, data, ind): + # Load at base + ind * stride + data = builder.gep(data, [builder.mul(ind, self.stride)]) + dptr = builder.bitcast(data, + context.get_data_type(self.dtype).as_pointer()) + return builder.load(dptr) + + +class _ArrayArgLoader(object): + """ + Handle GUFunc argument loading where an array is expected. + """ + + def __init__(self, dtype, ndim, core_step, as_scalar, shape, strides): + self.dtype = dtype + self.ndim = ndim + self.core_step = core_step + self.as_scalar = as_scalar + self.shape = shape + self.strides = strides + + def load(self, context, builder, data, ind): + arytyp = types.Array(dtype=self.dtype, ndim=self.ndim, layout="A") + arycls = context.make_array(arytyp) + + array = arycls(context, builder) + offseted_data = cgutils.pointer_add(builder, + data, + builder.mul(self.core_step, + ind)) + + shape, strides = self._shape_and_strides(context, builder) + + itemsize = context.get_abi_sizeof(context.get_data_type(self.dtype)) + context.populate_array(array, + data=builder.bitcast(offseted_data, + array.data.type), + shape=shape, + strides=strides, + itemsize=context.get_constant(types.intp, + itemsize), + meminfo=None) + + return array._getvalue() + + def _shape_and_strides(self, context, builder): + shape = cgutils.pack_array(builder, self.shape) + strides = cgutils.pack_array(builder, self.strides) + return shape, strides + + +class _ArrayAsScalarArgLoader(_ArrayArgLoader): + """ + Handle GUFunc argument loading where the shape signature specifies + a scalar "()" but a 1D array is used for the type of the core function. + """ + + def _shape_and_strides(self, context, builder): + # Set shape and strides for a 1D size 1 array + one = context.get_constant(types.intp, 1) + zero = context.get_constant(types.intp, 0) + shape = cgutils.pack_array(builder, [one]) + strides = cgutils.pack_array(builder, [zero]) + return shape, strides diff --git a/venv/lib/python3.10/site-packages/numba/np/ufunc_db.py b/venv/lib/python3.10/site-packages/numba/np/ufunc_db.py new file mode 100644 index 0000000000000000000000000000000000000000..a72642432ff7c041d9a8f5b6028b142548d03a2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/ufunc_db.py @@ -0,0 +1,1211 @@ +"""This file contains information on how to translate different ufuncs +into numba. It is a database of different ufuncs and how each of its +loops maps to a function that implements the inner kernel of that ufunc +(the inner kernel being the per-element function). + +Use the function get_ufunc_info to get the information related to the +ufunc +""" + + +import numpy as np +import sys + +# this is lazily initialized to avoid circular imports +IS_WIN32 = sys.platform.startswith('win32') +numpy_version = tuple(map(int, np.__version__.split('.')[:2])) +_ufunc_db = None + + +def _lazy_init_db(): + global _ufunc_db + + if _ufunc_db is None: + _ufunc_db = {} + _fill_ufunc_db(_ufunc_db) + + +def get_ufuncs(): + """obtain a list of supported ufuncs in the db""" + _lazy_init_db() + return _ufunc_db.keys() + + +def get_ufunc_info(ufunc_key): + """get the lowering information for the ufunc with key ufunc_key. + + The lowering information is a dictionary that maps from a numpy + loop string (as given by the ufunc types attribute) to a function + that handles code generation for a scalar version of the ufunc + (that is, generates the "per element" operation"). + + raises a KeyError if the ufunc is not in the ufunc_db + """ + _lazy_init_db() + return _ufunc_db[ufunc_key] + + +def _fill_ufunc_db(ufunc_db): + # some of these imports would cause a problem of circular + # imports if done at global scope when importing the numba + # module. + from numba.np import npyfuncs + from numba.np.math import cmathimpl, mathimpl, numbers + from numba.np.numpy_support import numpy_version + + ufunc_db[np.isnat] = { + # datetime & timedelta + 'M->?': npyfuncs.np_datetime_isnat_impl, + 'm->?': npyfuncs.np_datetime_isnat_impl, + } + + ufunc_db[np.negative] = { + '?->?': numbers.int_invert_impl, + 'b->b': numbers.int_negate_impl, + 'B->B': numbers.int_negate_impl, + 'h->h': numbers.int_negate_impl, + 'H->H': numbers.int_negate_impl, + 'i->i': numbers.int_negate_impl, + 'I->I': numbers.int_negate_impl, + 'l->l': numbers.int_negate_impl, + 'L->L': numbers.int_negate_impl, + 'q->q': numbers.int_negate_impl, + 'Q->Q': numbers.int_negate_impl, + 'f->f': numbers.real_negate_impl, + 'd->d': numbers.real_negate_impl, + 'F->F': numbers.complex_negate_impl, + 'D->D': numbers.complex_negate_impl, + } + + ufunc_db[np.positive] = { + '?->?': numbers.int_positive_impl, + 'b->b': numbers.int_positive_impl, + 'B->B': numbers.int_positive_impl, + 'h->h': numbers.int_positive_impl, + 'H->H': numbers.int_positive_impl, + 'i->i': numbers.int_positive_impl, + 'I->I': numbers.int_positive_impl, + 'l->l': numbers.int_positive_impl, + 'L->L': numbers.int_positive_impl, + 'q->q': numbers.int_positive_impl, + 'Q->Q': numbers.int_positive_impl, + 'f->f': numbers.real_positive_impl, + 'd->d': numbers.real_positive_impl, + 'F->F': numbers.complex_positive_impl, + 'D->D': numbers.complex_positive_impl, + } + + ufunc_db[np.absolute] = { + '?->?': numbers.int_abs_impl, + 'b->b': numbers.int_abs_impl, + 'B->B': numbers.uint_abs_impl, + 'h->h': numbers.int_abs_impl, + 'H->H': numbers.uint_abs_impl, + 'i->i': numbers.int_abs_impl, + 'I->I': numbers.uint_abs_impl, + 'l->l': numbers.int_abs_impl, + 'L->L': numbers.uint_abs_impl, + 'q->q': numbers.int_abs_impl, + 'Q->Q': numbers.uint_abs_impl, + 'f->f': numbers.real_abs_impl, + 'd->d': numbers.real_abs_impl, + 'F->f': numbers.complex_abs_impl, + 'D->d': numbers.complex_abs_impl, + } + + ufunc_db[np.sign] = { + 'b->b': numbers.int_sign_impl, + 'B->B': numbers.int_sign_impl, + 'h->h': numbers.int_sign_impl, + 'H->H': numbers.int_sign_impl, + 'i->i': numbers.int_sign_impl, + 'I->I': numbers.int_sign_impl, + 'l->l': numbers.int_sign_impl, + 'L->L': numbers.int_sign_impl, + 'q->q': numbers.int_sign_impl, + 'Q->Q': numbers.int_sign_impl, + 'f->f': numbers.real_sign_impl, + 'd->d': numbers.real_sign_impl, + 'F->F': npyfuncs.np_complex_sign_impl, + 'D->D': npyfuncs.np_complex_sign_impl, + } + + ufunc_db[np.add] = { + '??->?': numbers.int_or_impl, + 'bb->b': numbers.int_add_impl, + 'BB->B': numbers.int_add_impl, + 'hh->h': numbers.int_add_impl, + 'HH->H': numbers.int_add_impl, + 'ii->i': numbers.int_add_impl, + 'II->I': numbers.int_add_impl, + 'll->l': numbers.int_add_impl, + 'LL->L': numbers.int_add_impl, + 'qq->q': numbers.int_add_impl, + 'QQ->Q': numbers.int_add_impl, + 'ff->f': numbers.real_add_impl, + 'dd->d': numbers.real_add_impl, + 'FF->F': numbers.complex_add_impl, + 'DD->D': numbers.complex_add_impl, + } + + ufunc_db[np.subtract] = { + '??->?': numbers.int_xor_impl, + 'bb->b': numbers.int_sub_impl, + 'BB->B': numbers.int_sub_impl, + 'hh->h': numbers.int_sub_impl, + 'HH->H': numbers.int_sub_impl, + 'ii->i': numbers.int_sub_impl, + 'II->I': numbers.int_sub_impl, + 'll->l': numbers.int_sub_impl, + 'LL->L': numbers.int_sub_impl, + 'qq->q': numbers.int_sub_impl, + 'QQ->Q': numbers.int_sub_impl, + 'ff->f': numbers.real_sub_impl, + 'dd->d': numbers.real_sub_impl, + 'FF->F': numbers.complex_sub_impl, + 'DD->D': numbers.complex_sub_impl, + } + + ufunc_db[np.multiply] = { + '??->?': numbers.int_and_impl, + 'bb->b': numbers.int_mul_impl, + 'BB->B': numbers.int_mul_impl, + 'hh->h': numbers.int_mul_impl, + 'HH->H': numbers.int_mul_impl, + 'ii->i': numbers.int_mul_impl, + 'II->I': numbers.int_mul_impl, + 'll->l': numbers.int_mul_impl, + 'LL->L': numbers.int_mul_impl, + 'qq->q': numbers.int_mul_impl, + 'QQ->Q': numbers.int_mul_impl, + 'ff->f': numbers.real_mul_impl, + 'dd->d': numbers.real_mul_impl, + 'FF->F': numbers.complex_mul_impl, + 'DD->D': numbers.complex_mul_impl, + } + + if np.divide != np.true_divide: + ufunc_db[np.divide] = { + 'bb->b': npyfuncs.np_int_sdiv_impl, + 'BB->B': npyfuncs.np_int_udiv_impl, + 'hh->h': npyfuncs.np_int_sdiv_impl, + 'HH->H': npyfuncs.np_int_udiv_impl, + 'ii->i': npyfuncs.np_int_sdiv_impl, + 'II->I': npyfuncs.np_int_udiv_impl, + 'll->l': npyfuncs.np_int_sdiv_impl, + 'LL->L': npyfuncs.np_int_udiv_impl, + 'qq->q': npyfuncs.np_int_sdiv_impl, + 'QQ->Q': npyfuncs.np_int_udiv_impl, + 'ff->f': npyfuncs.np_real_div_impl, + 'dd->d': npyfuncs.np_real_div_impl, + 'FF->F': npyfuncs.np_complex_div_impl, + 'DD->D': npyfuncs.np_complex_div_impl, + } + + ufunc_db[np.true_divide] = { + 'bb->d': npyfuncs.np_int_truediv_impl, + 'BB->d': npyfuncs.np_int_truediv_impl, + 'hh->d': npyfuncs.np_int_truediv_impl, + 'HH->d': npyfuncs.np_int_truediv_impl, + 'ii->d': npyfuncs.np_int_truediv_impl, + 'II->d': npyfuncs.np_int_truediv_impl, + 'll->d': npyfuncs.np_int_truediv_impl, + 'LL->d': npyfuncs.np_int_truediv_impl, + 'qq->d': npyfuncs.np_int_truediv_impl, + 'QQ->d': npyfuncs.np_int_truediv_impl, + 'ff->f': npyfuncs.np_real_div_impl, + 'dd->d': npyfuncs.np_real_div_impl, + 'FF->F': npyfuncs.np_complex_div_impl, + 'DD->D': npyfuncs.np_complex_div_impl, + } + + ufunc_db[np.floor_divide] = { + 'bb->b': npyfuncs.np_int_sdiv_impl, + 'BB->B': npyfuncs.np_int_udiv_impl, + 'hh->h': npyfuncs.np_int_sdiv_impl, + 'HH->H': npyfuncs.np_int_udiv_impl, + 'ii->i': npyfuncs.np_int_sdiv_impl, + 'II->I': npyfuncs.np_int_udiv_impl, + 'll->l': npyfuncs.np_int_sdiv_impl, + 'LL->L': npyfuncs.np_int_udiv_impl, + 'qq->q': npyfuncs.np_int_sdiv_impl, + 'QQ->Q': npyfuncs.np_int_udiv_impl, + 'ff->f': npyfuncs.np_real_floor_div_impl, + 'dd->d': npyfuncs.np_real_floor_div_impl, + } + + ufunc_db[np.remainder] = { + 'bb->b': npyfuncs.np_int_srem_impl, + 'BB->B': npyfuncs.np_int_urem_impl, + 'hh->h': npyfuncs.np_int_srem_impl, + 'HH->H': npyfuncs.np_int_urem_impl, + 'ii->i': npyfuncs.np_int_srem_impl, + 'II->I': npyfuncs.np_int_urem_impl, + 'll->l': npyfuncs.np_int_srem_impl, + 'LL->L': npyfuncs.np_int_urem_impl, + 'qq->q': npyfuncs.np_int_srem_impl, + 'QQ->Q': npyfuncs.np_int_urem_impl, + 'ff->f': npyfuncs.np_real_mod_impl, + 'dd->d': npyfuncs.np_real_mod_impl, + } + + ufunc_db[np.divmod] = { + 'bb->bb': npyfuncs.np_int_sdivrem_impl, + 'BB->BB': npyfuncs.np_int_udivrem_impl, + 'hh->hh': npyfuncs.np_int_sdivrem_impl, + 'HH->HH': npyfuncs.np_int_udivrem_impl, + 'ii->ii': npyfuncs.np_int_sdivrem_impl, + 'II->II': npyfuncs.np_int_udivrem_impl, + 'll->ll': npyfuncs.np_int_sdivrem_impl, + 'LL->LL': npyfuncs.np_int_udivrem_impl, + 'qq->qq': npyfuncs.np_int_sdivrem_impl, + 'QQ->QQ': npyfuncs.np_int_udivrem_impl, + 'ff->ff': npyfuncs.np_real_divmod_impl, + 'dd->dd': npyfuncs.np_real_divmod_impl, + } + + ufunc_db[np.fmod] = { + 'bb->b': npyfuncs.np_int_fmod_impl, + 'BB->B': npyfuncs.np_int_fmod_impl, + 'hh->h': npyfuncs.np_int_fmod_impl, + 'HH->H': npyfuncs.np_int_fmod_impl, + 'ii->i': npyfuncs.np_int_fmod_impl, + 'II->I': npyfuncs.np_int_fmod_impl, + 'll->l': npyfuncs.np_int_fmod_impl, + 'LL->L': npyfuncs.np_int_fmod_impl, + 'qq->q': npyfuncs.np_int_fmod_impl, + 'QQ->Q': npyfuncs.np_int_fmod_impl, + 'ff->f': npyfuncs.np_real_fmod_impl, + 'dd->d': npyfuncs.np_real_fmod_impl, + } + + ufunc_db[np.logaddexp] = { + 'ff->f': npyfuncs.np_real_logaddexp_impl, + 'dd->d': npyfuncs.np_real_logaddexp_impl, + } + + ufunc_db[np.logaddexp2] = { + 'ff->f': npyfuncs.np_real_logaddexp2_impl, + 'dd->d': npyfuncs.np_real_logaddexp2_impl, + } + + ufunc_db[np.power] = { + 'bb->b': numbers.int_power_impl, + 'BB->B': numbers.int_power_impl, + 'hh->h': numbers.int_power_impl, + 'HH->H': numbers.int_power_impl, + 'ii->i': numbers.int_power_impl, + 'II->I': numbers.int_power_impl, + 'll->l': numbers.int_power_impl, + 'LL->L': numbers.int_power_impl, + 'qq->q': numbers.int_power_impl, + 'QQ->Q': numbers.int_power_impl, + # XXX we would like to use `int_power_impl` for real ** integer + # as well (for better performance), but the current ufunc typing + # rules forbid that + 'ff->f': numbers.real_power_impl, + 'dd->d': numbers.real_power_impl, + 'FF->F': npyfuncs.np_complex_power_impl, + 'DD->D': npyfuncs.np_complex_power_impl, + } + + ufunc_db[np.float_power] = { + 'ff->f': npyfuncs.real_float_power_impl, + 'dd->d': npyfuncs.real_float_power_impl, + 'FF->F': npyfuncs.np_complex_float_power_impl, + 'DD->D': npyfuncs.np_complex_float_power_impl, + } + + ufunc_db[np.gcd] = { + 'bb->b': npyfuncs.np_gcd_impl, + 'BB->B': npyfuncs.np_gcd_impl, + 'hh->h': npyfuncs.np_gcd_impl, + 'HH->H': npyfuncs.np_gcd_impl, + 'ii->i': npyfuncs.np_gcd_impl, + 'II->I': npyfuncs.np_gcd_impl, + 'll->l': npyfuncs.np_gcd_impl, + 'LL->L': npyfuncs.np_gcd_impl, + 'qq->q': npyfuncs.np_gcd_impl, + 'QQ->Q': npyfuncs.np_gcd_impl, + } + + ufunc_db[np.lcm] = { + 'bb->b': npyfuncs.np_lcm_impl, + 'BB->B': npyfuncs.np_lcm_impl, + 'hh->h': npyfuncs.np_lcm_impl, + 'HH->H': npyfuncs.np_lcm_impl, + 'ii->i': npyfuncs.np_lcm_impl, + 'II->I': npyfuncs.np_lcm_impl, + 'll->l': npyfuncs.np_lcm_impl, + 'LL->L': npyfuncs.np_lcm_impl, + 'qq->q': npyfuncs.np_lcm_impl, + 'QQ->Q': npyfuncs.np_lcm_impl, + } + + ufunc_db[np.rint] = { + 'f->f': npyfuncs.np_real_rint_impl, + 'd->d': npyfuncs.np_real_rint_impl, + 'F->F': npyfuncs.np_complex_rint_impl, + 'D->D': npyfuncs.np_complex_rint_impl, + } + + ufunc_db[np.conjugate] = { + 'b->b': numbers.real_conjugate_impl, + 'B->B': numbers.real_conjugate_impl, + 'h->h': numbers.real_conjugate_impl, + 'H->H': numbers.real_conjugate_impl, + 'i->i': numbers.real_conjugate_impl, + 'I->I': numbers.real_conjugate_impl, + 'l->l': numbers.real_conjugate_impl, + 'L->L': numbers.real_conjugate_impl, + 'q->q': numbers.real_conjugate_impl, + 'Q->Q': numbers.real_conjugate_impl, + 'f->f': numbers.real_conjugate_impl, + 'd->d': numbers.real_conjugate_impl, + 'F->F': numbers.complex_conjugate_impl, + 'D->D': numbers.complex_conjugate_impl, + } + + ufunc_db[np.exp] = { + 'f->f': npyfuncs.np_real_exp_impl, + 'd->d': npyfuncs.np_real_exp_impl, + 'F->F': npyfuncs.np_complex_exp_impl, + 'D->D': npyfuncs.np_complex_exp_impl, + } + + ufunc_db[np.exp2] = { + 'f->f': npyfuncs.np_real_exp2_impl, + 'd->d': npyfuncs.np_real_exp2_impl, + 'F->F': npyfuncs.np_complex_exp2_impl, + 'D->D': npyfuncs.np_complex_exp2_impl, + } + + ufunc_db[np.log] = { + 'f->f': npyfuncs.np_real_log_impl, + 'd->d': npyfuncs.np_real_log_impl, + 'F->F': npyfuncs.np_complex_log_impl, + 'D->D': npyfuncs.np_complex_log_impl, + } + + ufunc_db[np.log2] = { + 'f->f': npyfuncs.np_real_log2_impl, + 'd->d': npyfuncs.np_real_log2_impl, + 'F->F': npyfuncs.np_complex_log2_impl, + 'D->D': npyfuncs.np_complex_log2_impl, + } + + ufunc_db[np.log10] = { + 'f->f': npyfuncs.np_real_log10_impl, + 'd->d': npyfuncs.np_real_log10_impl, + 'F->F': npyfuncs.np_complex_log10_impl, + 'D->D': npyfuncs.np_complex_log10_impl, + } + + ufunc_db[np.expm1] = { + 'f->f': npyfuncs.np_real_expm1_impl, + 'd->d': npyfuncs.np_real_expm1_impl, + 'F->F': npyfuncs.np_complex_expm1_impl, + 'D->D': npyfuncs.np_complex_expm1_impl, + } + + ufunc_db[np.log1p] = { + 'f->f': npyfuncs.np_real_log1p_impl, + 'd->d': npyfuncs.np_real_log1p_impl, + 'F->F': npyfuncs.np_complex_log1p_impl, + 'D->D': npyfuncs.np_complex_log1p_impl, + } + + ufunc_db[np.sqrt] = { + 'f->f': npyfuncs.np_real_sqrt_impl, + 'd->d': npyfuncs.np_real_sqrt_impl, + 'F->F': npyfuncs.np_complex_sqrt_impl, + 'D->D': npyfuncs.np_complex_sqrt_impl, + } + + ufunc_db[np.square] = { + 'b->b': npyfuncs.np_int_square_impl, + 'B->B': npyfuncs.np_int_square_impl, + 'h->h': npyfuncs.np_int_square_impl, + 'H->H': npyfuncs.np_int_square_impl, + 'i->i': npyfuncs.np_int_square_impl, + 'I->I': npyfuncs.np_int_square_impl, + 'l->l': npyfuncs.np_int_square_impl, + 'L->L': npyfuncs.np_int_square_impl, + 'q->q': npyfuncs.np_int_square_impl, + 'Q->Q': npyfuncs.np_int_square_impl, + 'f->f': npyfuncs.np_real_square_impl, + 'd->d': npyfuncs.np_real_square_impl, + 'F->F': npyfuncs.np_complex_square_impl, + 'D->D': npyfuncs.np_complex_square_impl, + } + + ufunc_db[np.cbrt] = { + 'f->f': npyfuncs.np_real_cbrt_impl, + 'd->d': npyfuncs.np_real_cbrt_impl, + } + + ufunc_db[np.reciprocal] = { + 'b->b': npyfuncs.np_int_reciprocal_impl, + 'B->B': npyfuncs.np_int_reciprocal_impl, + 'h->h': npyfuncs.np_int_reciprocal_impl, + 'H->H': npyfuncs.np_int_reciprocal_impl, + 'i->i': npyfuncs.np_int_reciprocal_impl, + 'I->I': npyfuncs.np_int_reciprocal_impl, + 'l->l': npyfuncs.np_int_reciprocal_impl, + 'L->L': npyfuncs.np_int_reciprocal_impl, + 'q->q': npyfuncs.np_int_reciprocal_impl, + 'Q->Q': npyfuncs.np_int_reciprocal_impl, + 'f->f': npyfuncs.np_real_reciprocal_impl, + 'd->d': npyfuncs.np_real_reciprocal_impl, + 'F->F': npyfuncs.np_complex_reciprocal_impl, + 'D->D': npyfuncs.np_complex_reciprocal_impl, + } + + ufunc_db[np.sin] = { + 'f->f': npyfuncs.np_real_sin_impl, + 'd->d': npyfuncs.np_real_sin_impl, + 'F->F': npyfuncs.np_complex_sin_impl, + 'D->D': npyfuncs.np_complex_sin_impl, + } + + ufunc_db[np.cos] = { + 'f->f': npyfuncs.np_real_cos_impl, + 'd->d': npyfuncs.np_real_cos_impl, + 'F->F': npyfuncs.np_complex_cos_impl, + 'D->D': npyfuncs.np_complex_cos_impl, + } + + tan_impl = cmathimpl.tan_impl + + ufunc_db[np.tan] = { + 'f->f': npyfuncs.np_real_tan_impl, + 'd->d': npyfuncs.np_real_tan_impl, + 'F->F': tan_impl, + 'D->D': tan_impl, + } + + arcsin_impl = cmathimpl.asin_impl + + ufunc_db[np.arcsin] = { + 'f->f': npyfuncs.np_real_asin_impl, + 'd->d': npyfuncs.np_real_asin_impl, + 'F->F': arcsin_impl, + 'D->D': arcsin_impl, + } + + ufunc_db[np.arccos] = { + 'f->f': npyfuncs.np_real_acos_impl, + 'd->d': npyfuncs.np_real_acos_impl, + 'F->F': cmathimpl.acos_impl, + 'D->D': cmathimpl.acos_impl, + } + + arctan_impl = cmathimpl.atan_impl + + ufunc_db[np.arctan] = { + 'f->f': npyfuncs.np_real_atan_impl, + 'd->d': npyfuncs.np_real_atan_impl, + 'F->F': arctan_impl, + 'D->D': arctan_impl, + } + + ufunc_db[np.arctan2] = { + 'ff->f': npyfuncs.np_real_atan2_impl, + 'dd->d': npyfuncs.np_real_atan2_impl, + } + + ufunc_db[np.hypot] = { + 'ff->f': npyfuncs.np_real_hypot_impl, + 'dd->d': npyfuncs.np_real_hypot_impl, + } + + ufunc_db[np.sinh] = { + 'f->f': npyfuncs.np_real_sinh_impl, + 'd->d': npyfuncs.np_real_sinh_impl, + 'F->F': npyfuncs.np_complex_sinh_impl, + 'D->D': npyfuncs.np_complex_sinh_impl, + } + + ufunc_db[np.cosh] = { + 'f->f': npyfuncs.np_real_cosh_impl, + 'd->d': npyfuncs.np_real_cosh_impl, + 'F->F': npyfuncs.np_complex_cosh_impl, + 'D->D': npyfuncs.np_complex_cosh_impl, + } + + ufunc_db[np.tanh] = { + 'f->f': npyfuncs.np_real_tanh_impl, + 'd->d': npyfuncs.np_real_tanh_impl, + 'F->F': npyfuncs.np_complex_tanh_impl, + 'D->D': npyfuncs.np_complex_tanh_impl, + } + + arcsinh_impl = cmathimpl.asinh_impl + + ufunc_db[np.arcsinh] = { + 'f->f': npyfuncs.np_real_asinh_impl, + 'd->d': npyfuncs.np_real_asinh_impl, + 'F->F': arcsinh_impl, + 'D->D': arcsinh_impl, + } + + ufunc_db[np.arccosh] = { + 'f->f': npyfuncs.np_real_acosh_impl, + 'd->d': npyfuncs.np_real_acosh_impl, + 'F->F': npyfuncs.np_complex_acosh_impl, + 'D->D': npyfuncs.np_complex_acosh_impl, + } + + arctanh_impl = cmathimpl.atanh_impl + + ufunc_db[np.arctanh] = { + 'f->f': npyfuncs.np_real_atanh_impl, + 'd->d': npyfuncs.np_real_atanh_impl, + 'F->F': arctanh_impl, + 'D->D': arctanh_impl, + } + + ufunc_db[np.deg2rad] = { + 'f->f': mathimpl.radians_float_impl, + 'd->d': mathimpl.radians_float_impl, + } + + ufunc_db[np.radians] = ufunc_db[np.deg2rad] + + ufunc_db[np.rad2deg] = { + 'f->f': mathimpl.degrees_float_impl, + 'd->d': mathimpl.degrees_float_impl, + } + + ufunc_db[np.degrees] = ufunc_db[np.rad2deg] + + ufunc_db[np.floor] = { + 'f->f': npyfuncs.np_real_floor_impl, + 'd->d': npyfuncs.np_real_floor_impl, + } + if numpy_version >= (2, 1): + ufunc_db[np.floor].update({ + '?->?': numbers.identity_impl, + 'b->b': numbers.identity_impl, + 'B->B': numbers.identity_impl, + 'h->h': numbers.identity_impl, + 'H->H': numbers.identity_impl, + 'i->i': numbers.identity_impl, + 'I->I': numbers.identity_impl, + 'l->l': numbers.identity_impl, + 'L->L': numbers.identity_impl, + 'q->q': numbers.identity_impl, + 'Q->Q': numbers.identity_impl, + }) + + ufunc_db[np.ceil] = { + 'f->f': npyfuncs.np_real_ceil_impl, + 'd->d': npyfuncs.np_real_ceil_impl, + } + if numpy_version >= (2, 1): + ufunc_db[np.ceil].update({ + '?->?': numbers.identity_impl, + 'b->b': numbers.identity_impl, + 'B->B': numbers.identity_impl, + 'h->h': numbers.identity_impl, + 'H->H': numbers.identity_impl, + 'i->i': numbers.identity_impl, + 'I->I': numbers.identity_impl, + 'l->l': numbers.identity_impl, + 'L->L': numbers.identity_impl, + 'q->q': numbers.identity_impl, + 'Q->Q': numbers.identity_impl, + }) + + ufunc_db[np.trunc] = { + 'f->f': npyfuncs.np_real_trunc_impl, + 'd->d': npyfuncs.np_real_trunc_impl, + } + if numpy_version >= (2, 1): + ufunc_db[np.trunc].update({ + '?->?': numbers.identity_impl, + 'b->b': numbers.identity_impl, + 'B->B': numbers.identity_impl, + 'h->h': numbers.identity_impl, + 'H->H': numbers.identity_impl, + 'i->i': numbers.identity_impl, + 'I->I': numbers.identity_impl, + 'l->l': numbers.identity_impl, + 'L->L': numbers.identity_impl, + 'q->q': numbers.identity_impl, + 'Q->Q': numbers.identity_impl, + }) + + ufunc_db[np.fabs] = { + 'f->f': npyfuncs.np_real_fabs_impl, + 'd->d': npyfuncs.np_real_fabs_impl, + } + + # logical ufuncs + ufunc_db[np.greater] = { + '??->?': numbers.int_ugt_impl, + 'bb->?': numbers.int_sgt_impl, + 'BB->?': numbers.int_ugt_impl, + 'hh->?': numbers.int_sgt_impl, + 'HH->?': numbers.int_ugt_impl, + 'ii->?': numbers.int_sgt_impl, + 'II->?': numbers.int_ugt_impl, + 'll->?': numbers.int_sgt_impl, + 'LL->?': numbers.int_ugt_impl, + 'qq->?': numbers.int_sgt_impl, + 'QQ->?': numbers.int_ugt_impl, + 'ff->?': numbers.real_gt_impl, + 'dd->?': numbers.real_gt_impl, + 'FF->?': npyfuncs.np_complex_gt_impl, + 'DD->?': npyfuncs.np_complex_gt_impl, + } + if numpy_version >= (1, 25): + ufunc_db[np.greater].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('>'), + 'Qq->?': numbers.int_unsigned_signed_cmp('>')}) + + ufunc_db[np.greater_equal] = { + '??->?': numbers.int_uge_impl, + 'bb->?': numbers.int_sge_impl, + 'BB->?': numbers.int_uge_impl, + 'hh->?': numbers.int_sge_impl, + 'HH->?': numbers.int_uge_impl, + 'ii->?': numbers.int_sge_impl, + 'II->?': numbers.int_uge_impl, + 'll->?': numbers.int_sge_impl, + 'LL->?': numbers.int_uge_impl, + 'qq->?': numbers.int_sge_impl, + 'QQ->?': numbers.int_uge_impl, + 'ff->?': numbers.real_ge_impl, + 'dd->?': numbers.real_ge_impl, + 'FF->?': npyfuncs.np_complex_ge_impl, + 'DD->?': npyfuncs.np_complex_ge_impl, + } + if numpy_version >= (1, 25): + ufunc_db[np.greater_equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('>='), + 'Qq->?': numbers.int_unsigned_signed_cmp('>=')}) + + ufunc_db[np.less] = { + '??->?': numbers.int_ult_impl, + 'bb->?': numbers.int_slt_impl, + 'BB->?': numbers.int_ult_impl, + 'hh->?': numbers.int_slt_impl, + 'HH->?': numbers.int_ult_impl, + 'ii->?': numbers.int_slt_impl, + 'II->?': numbers.int_ult_impl, + 'll->?': numbers.int_slt_impl, + 'LL->?': numbers.int_ult_impl, + 'qq->?': numbers.int_slt_impl, + 'QQ->?': numbers.int_ult_impl, + 'ff->?': numbers.real_lt_impl, + 'dd->?': numbers.real_lt_impl, + 'FF->?': npyfuncs.np_complex_lt_impl, + 'DD->?': npyfuncs.np_complex_lt_impl, + } + if numpy_version >= (1, 25): + ufunc_db[np.less].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('<'), + 'Qq->?': numbers.int_unsigned_signed_cmp('<')}) + + ufunc_db[np.less_equal] = { + '??->?': numbers.int_ule_impl, + 'bb->?': numbers.int_sle_impl, + 'BB->?': numbers.int_ule_impl, + 'hh->?': numbers.int_sle_impl, + 'HH->?': numbers.int_ule_impl, + 'ii->?': numbers.int_sle_impl, + 'II->?': numbers.int_ule_impl, + 'll->?': numbers.int_sle_impl, + 'LL->?': numbers.int_ule_impl, + 'qq->?': numbers.int_sle_impl, + 'QQ->?': numbers.int_ule_impl, + 'ff->?': numbers.real_le_impl, + 'dd->?': numbers.real_le_impl, + 'FF->?': npyfuncs.np_complex_le_impl, + 'DD->?': npyfuncs.np_complex_le_impl, + } + if numpy_version >= (1, 25): + ufunc_db[np.less_equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('<='), + 'Qq->?': numbers.int_unsigned_signed_cmp('<=')}) + + ufunc_db[np.not_equal] = { + '??->?': numbers.int_ne_impl, + 'bb->?': numbers.int_ne_impl, + 'BB->?': numbers.int_ne_impl, + 'hh->?': numbers.int_ne_impl, + 'HH->?': numbers.int_ne_impl, + 'ii->?': numbers.int_ne_impl, + 'II->?': numbers.int_ne_impl, + 'll->?': numbers.int_ne_impl, + 'LL->?': numbers.int_ne_impl, + 'qq->?': numbers.int_ne_impl, + 'QQ->?': numbers.int_ne_impl, + 'ff->?': numbers.real_ne_impl, + 'dd->?': numbers.real_ne_impl, + 'FF->?': npyfuncs.np_complex_ne_impl, + 'DD->?': npyfuncs.np_complex_ne_impl, + } + if numpy_version >= (1, 25): + ufunc_db[np.not_equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('!='), + 'Qq->?': numbers.int_unsigned_signed_cmp('!=')}) + + ufunc_db[np.equal] = { + '??->?': numbers.int_eq_impl, + 'bb->?': numbers.int_eq_impl, + 'BB->?': numbers.int_eq_impl, + 'hh->?': numbers.int_eq_impl, + 'HH->?': numbers.int_eq_impl, + 'ii->?': numbers.int_eq_impl, + 'II->?': numbers.int_eq_impl, + 'll->?': numbers.int_eq_impl, + 'LL->?': numbers.int_eq_impl, + 'qq->?': numbers.int_eq_impl, + 'QQ->?': numbers.int_eq_impl, + 'ff->?': numbers.real_eq_impl, + 'dd->?': numbers.real_eq_impl, + 'FF->?': npyfuncs.np_complex_eq_impl, + 'DD->?': npyfuncs.np_complex_eq_impl, + } + if numpy_version >= (1, 25): + ufunc_db[np.equal].update({ + 'qQ->?': numbers.int_signed_unsigned_cmp('=='), + 'Qq->?': numbers.int_unsigned_signed_cmp('==')}) + + ufunc_db[np.logical_and] = { + '??->?': npyfuncs.np_logical_and_impl, + 'bb->?': npyfuncs.np_logical_and_impl, + 'BB->?': npyfuncs.np_logical_and_impl, + 'hh->?': npyfuncs.np_logical_and_impl, + 'HH->?': npyfuncs.np_logical_and_impl, + 'ii->?': npyfuncs.np_logical_and_impl, + 'II->?': npyfuncs.np_logical_and_impl, + 'll->?': npyfuncs.np_logical_and_impl, + 'LL->?': npyfuncs.np_logical_and_impl, + 'qq->?': npyfuncs.np_logical_and_impl, + 'QQ->?': npyfuncs.np_logical_and_impl, + 'ff->?': npyfuncs.np_logical_and_impl, + 'dd->?': npyfuncs.np_logical_and_impl, + 'FF->?': npyfuncs.np_complex_logical_and_impl, + 'DD->?': npyfuncs.np_complex_logical_and_impl, + } + + ufunc_db[np.logical_or] = { + '??->?': npyfuncs.np_logical_or_impl, + 'bb->?': npyfuncs.np_logical_or_impl, + 'BB->?': npyfuncs.np_logical_or_impl, + 'hh->?': npyfuncs.np_logical_or_impl, + 'HH->?': npyfuncs.np_logical_or_impl, + 'ii->?': npyfuncs.np_logical_or_impl, + 'II->?': npyfuncs.np_logical_or_impl, + 'll->?': npyfuncs.np_logical_or_impl, + 'LL->?': npyfuncs.np_logical_or_impl, + 'qq->?': npyfuncs.np_logical_or_impl, + 'QQ->?': npyfuncs.np_logical_or_impl, + 'ff->?': npyfuncs.np_logical_or_impl, + 'dd->?': npyfuncs.np_logical_or_impl, + 'FF->?': npyfuncs.np_complex_logical_or_impl, + 'DD->?': npyfuncs.np_complex_logical_or_impl, + } + + ufunc_db[np.logical_xor] = { + '??->?': npyfuncs.np_logical_xor_impl, + 'bb->?': npyfuncs.np_logical_xor_impl, + 'BB->?': npyfuncs.np_logical_xor_impl, + 'hh->?': npyfuncs.np_logical_xor_impl, + 'HH->?': npyfuncs.np_logical_xor_impl, + 'ii->?': npyfuncs.np_logical_xor_impl, + 'II->?': npyfuncs.np_logical_xor_impl, + 'll->?': npyfuncs.np_logical_xor_impl, + 'LL->?': npyfuncs.np_logical_xor_impl, + 'qq->?': npyfuncs.np_logical_xor_impl, + 'QQ->?': npyfuncs.np_logical_xor_impl, + 'ff->?': npyfuncs.np_logical_xor_impl, + 'dd->?': npyfuncs.np_logical_xor_impl, + 'FF->?': npyfuncs.np_complex_logical_xor_impl, + 'DD->?': npyfuncs.np_complex_logical_xor_impl, + } + + ufunc_db[np.logical_not] = { + '?->?': npyfuncs.np_logical_not_impl, + 'b->?': npyfuncs.np_logical_not_impl, + 'B->?': npyfuncs.np_logical_not_impl, + 'h->?': npyfuncs.np_logical_not_impl, + 'H->?': npyfuncs.np_logical_not_impl, + 'i->?': npyfuncs.np_logical_not_impl, + 'I->?': npyfuncs.np_logical_not_impl, + 'l->?': npyfuncs.np_logical_not_impl, + 'L->?': npyfuncs.np_logical_not_impl, + 'q->?': npyfuncs.np_logical_not_impl, + 'Q->?': npyfuncs.np_logical_not_impl, + 'f->?': npyfuncs.np_logical_not_impl, + 'd->?': npyfuncs.np_logical_not_impl, + 'F->?': npyfuncs.np_complex_logical_not_impl, + 'D->?': npyfuncs.np_complex_logical_not_impl, + } + + ufunc_db[np.maximum] = { + '??->?': npyfuncs.np_logical_or_impl, + 'bb->b': npyfuncs.np_int_smax_impl, + 'BB->B': npyfuncs.np_int_umax_impl, + 'hh->h': npyfuncs.np_int_smax_impl, + 'HH->H': npyfuncs.np_int_umax_impl, + 'ii->i': npyfuncs.np_int_smax_impl, + 'II->I': npyfuncs.np_int_umax_impl, + 'll->l': npyfuncs.np_int_smax_impl, + 'LL->L': npyfuncs.np_int_umax_impl, + 'qq->q': npyfuncs.np_int_smax_impl, + 'QQ->Q': npyfuncs.np_int_umax_impl, + 'ff->f': npyfuncs.np_real_maximum_impl, + 'dd->d': npyfuncs.np_real_maximum_impl, + 'FF->F': npyfuncs.np_complex_maximum_impl, + 'DD->D': npyfuncs.np_complex_maximum_impl, + } + + ufunc_db[np.minimum] = { + '??->?': npyfuncs.np_logical_and_impl, + 'bb->b': npyfuncs.np_int_smin_impl, + 'BB->B': npyfuncs.np_int_umin_impl, + 'hh->h': npyfuncs.np_int_smin_impl, + 'HH->H': npyfuncs.np_int_umin_impl, + 'ii->i': npyfuncs.np_int_smin_impl, + 'II->I': npyfuncs.np_int_umin_impl, + 'll->l': npyfuncs.np_int_smin_impl, + 'LL->L': npyfuncs.np_int_umin_impl, + 'qq->q': npyfuncs.np_int_smin_impl, + 'QQ->Q': npyfuncs.np_int_umin_impl, + 'ff->f': npyfuncs.np_real_minimum_impl, + 'dd->d': npyfuncs.np_real_minimum_impl, + 'FF->F': npyfuncs.np_complex_minimum_impl, + 'DD->D': npyfuncs.np_complex_minimum_impl, + } + + ufunc_db[np.fmax] = { + '??->?': npyfuncs.np_logical_or_impl, + 'bb->b': npyfuncs.np_int_smax_impl, + 'BB->B': npyfuncs.np_int_umax_impl, + 'hh->h': npyfuncs.np_int_smax_impl, + 'HH->H': npyfuncs.np_int_umax_impl, + 'ii->i': npyfuncs.np_int_smax_impl, + 'II->I': npyfuncs.np_int_umax_impl, + 'll->l': npyfuncs.np_int_smax_impl, + 'LL->L': npyfuncs.np_int_umax_impl, + 'qq->q': npyfuncs.np_int_smax_impl, + 'QQ->Q': npyfuncs.np_int_umax_impl, + 'ff->f': npyfuncs.np_real_fmax_impl, + 'dd->d': npyfuncs.np_real_fmax_impl, + 'FF->F': npyfuncs.np_complex_fmax_impl, + 'DD->D': npyfuncs.np_complex_fmax_impl, + } + + ufunc_db[np.fmin] = { + '??->?': npyfuncs.np_logical_and_impl, + 'bb->b': npyfuncs.np_int_smin_impl, + 'BB->B': npyfuncs.np_int_umin_impl, + 'hh->h': npyfuncs.np_int_smin_impl, + 'HH->H': npyfuncs.np_int_umin_impl, + 'ii->i': npyfuncs.np_int_smin_impl, + 'II->I': npyfuncs.np_int_umin_impl, + 'll->l': npyfuncs.np_int_smin_impl, + 'LL->L': npyfuncs.np_int_umin_impl, + 'qq->q': npyfuncs.np_int_smin_impl, + 'QQ->Q': npyfuncs.np_int_umin_impl, + 'ff->f': npyfuncs.np_real_fmin_impl, + 'dd->d': npyfuncs.np_real_fmin_impl, + 'FF->F': npyfuncs.np_complex_fmin_impl, + 'DD->D': npyfuncs.np_complex_fmin_impl, + } + + # misc floating functions + ufunc_db[np.isnan] = { + 'f->?': npyfuncs.np_real_isnan_impl, + 'd->?': npyfuncs.np_real_isnan_impl, + 'F->?': npyfuncs.np_complex_isnan_impl, + 'D->?': npyfuncs.np_complex_isnan_impl, + # int8 + 'b->?': npyfuncs.np_int_isnan_impl, + 'B->?': npyfuncs.np_int_isnan_impl, + # int16 + 'h->?': npyfuncs.np_int_isnan_impl, + 'H->?': npyfuncs.np_int_isnan_impl, + # int32 + 'i->?': npyfuncs.np_int_isnan_impl, + 'I->?': npyfuncs.np_int_isnan_impl, + # int64 + 'l->?': npyfuncs.np_int_isnan_impl, + 'L->?': npyfuncs.np_int_isnan_impl, + # intp + 'q->?': npyfuncs.np_int_isnan_impl, + 'Q->?': npyfuncs.np_int_isnan_impl, + # boolean + '?->?': npyfuncs.np_int_isnan_impl, + # datetime & timedelta + 'm->?': npyfuncs.np_datetime_isnat_impl, + 'M->?': npyfuncs.np_datetime_isnat_impl, + } + + ufunc_db[np.isinf] = { + 'f->?': npyfuncs.np_real_isinf_impl, + 'd->?': npyfuncs.np_real_isinf_impl, + 'F->?': npyfuncs.np_complex_isinf_impl, + 'D->?': npyfuncs.np_complex_isinf_impl, + # int8 + 'b->?': npyfuncs.np_int_isinf_impl, + 'B->?': npyfuncs.np_int_isinf_impl, + # int16 + 'h->?': npyfuncs.np_int_isinf_impl, + 'H->?': npyfuncs.np_int_isinf_impl, + # int32 + 'i->?': npyfuncs.np_int_isinf_impl, + 'I->?': npyfuncs.np_int_isinf_impl, + # int64 + 'l->?': npyfuncs.np_int_isinf_impl, + 'L->?': npyfuncs.np_int_isinf_impl, + # intp + 'q->?': npyfuncs.np_int_isinf_impl, + 'Q->?': npyfuncs.np_int_isinf_impl, + # boolean + '?->?': npyfuncs.np_int_isinf_impl, + # datetime & timedelta + 'm->?': npyfuncs.np_int_isinf_impl, + 'M->?': npyfuncs.np_int_isinf_impl, + } + + ufunc_db[np.isfinite] = { + 'f->?': npyfuncs.np_real_isfinite_impl, + 'd->?': npyfuncs.np_real_isfinite_impl, + 'F->?': npyfuncs.np_complex_isfinite_impl, + 'D->?': npyfuncs.np_complex_isfinite_impl, + # int8 + 'b->?': npyfuncs.np_int_isfinite_impl, + 'B->?': npyfuncs.np_int_isfinite_impl, + # int16 + 'h->?': npyfuncs.np_int_isfinite_impl, + 'H->?': npyfuncs.np_int_isfinite_impl, + # int32 + 'i->?': npyfuncs.np_int_isfinite_impl, + 'I->?': npyfuncs.np_int_isfinite_impl, + # int64 + 'l->?': npyfuncs.np_int_isfinite_impl, + 'L->?': npyfuncs.np_int_isfinite_impl, + # intp + 'q->?': npyfuncs.np_int_isfinite_impl, + 'Q->?': npyfuncs.np_int_isfinite_impl, + # boolean + '?->?': npyfuncs.np_int_isfinite_impl, + # datetime & timedelta + 'M->?': npyfuncs.np_datetime_isfinite_impl, + 'm->?': npyfuncs.np_datetime_isfinite_impl, + } + + ufunc_db[np.signbit] = { + 'f->?': npyfuncs.np_real_signbit_impl, + 'd->?': npyfuncs.np_real_signbit_impl, + } + + ufunc_db[np.copysign] = { + 'ff->f': npyfuncs.np_real_copysign_impl, + 'dd->d': npyfuncs.np_real_copysign_impl, + } + + ufunc_db[np.nextafter] = { + 'ff->f': npyfuncs.np_real_nextafter_impl, + 'dd->d': npyfuncs.np_real_nextafter_impl, + } + + ufunc_db[np.spacing] = { + 'f->f': npyfuncs.np_real_spacing_impl, + 'd->d': npyfuncs.np_real_spacing_impl, + } + + ufunc_db[np.ldexp] = { + 'fi->f': npyfuncs.np_real_ldexp_impl, + 'fl->f': npyfuncs.np_real_ldexp_impl, + 'di->d': npyfuncs.np_real_ldexp_impl, + 'dl->d': npyfuncs.np_real_ldexp_impl, + } + if numpy_version >= (2, 0) and IS_WIN32: + ufunc_db[np.ldexp]['fq->f'] = ufunc_db[np.ldexp].pop('fl->f') + ufunc_db[np.ldexp]['dq->d'] = ufunc_db[np.ldexp].pop('dl->d') + + # bit twiddling functions + ufunc_db[np.bitwise_and] = { + '??->?': numbers.int_and_impl, + 'bb->b': numbers.int_and_impl, + 'BB->B': numbers.int_and_impl, + 'hh->h': numbers.int_and_impl, + 'HH->H': numbers.int_and_impl, + 'ii->i': numbers.int_and_impl, + 'II->I': numbers.int_and_impl, + 'll->l': numbers.int_and_impl, + 'LL->L': numbers.int_and_impl, + 'qq->q': numbers.int_and_impl, + 'QQ->Q': numbers.int_and_impl, + } + + ufunc_db[np.bitwise_or] = { + '??->?': numbers.int_or_impl, + 'bb->b': numbers.int_or_impl, + 'BB->B': numbers.int_or_impl, + 'hh->h': numbers.int_or_impl, + 'HH->H': numbers.int_or_impl, + 'ii->i': numbers.int_or_impl, + 'II->I': numbers.int_or_impl, + 'll->l': numbers.int_or_impl, + 'LL->L': numbers.int_or_impl, + 'qq->q': numbers.int_or_impl, + 'QQ->Q': numbers.int_or_impl, + } + + ufunc_db[np.bitwise_xor] = { + '??->?': numbers.int_xor_impl, + 'bb->b': numbers.int_xor_impl, + 'BB->B': numbers.int_xor_impl, + 'hh->h': numbers.int_xor_impl, + 'HH->H': numbers.int_xor_impl, + 'ii->i': numbers.int_xor_impl, + 'II->I': numbers.int_xor_impl, + 'll->l': numbers.int_xor_impl, + 'LL->L': numbers.int_xor_impl, + 'qq->q': numbers.int_xor_impl, + 'QQ->Q': numbers.int_xor_impl, + } + + ufunc_db[np.invert] = { # aka np.bitwise_not + '?->?': numbers.int_invert_impl, + 'b->b': numbers.int_invert_impl, + 'B->B': numbers.int_invert_impl, + 'h->h': numbers.int_invert_impl, + 'H->H': numbers.int_invert_impl, + 'i->i': numbers.int_invert_impl, + 'I->I': numbers.int_invert_impl, + 'l->l': numbers.int_invert_impl, + 'L->L': numbers.int_invert_impl, + 'q->q': numbers.int_invert_impl, + 'Q->Q': numbers.int_invert_impl, + } + + ufunc_db[np.left_shift] = { + 'bb->b': numbers.int_shl_impl, + 'BB->B': numbers.int_shl_impl, + 'hh->h': numbers.int_shl_impl, + 'HH->H': numbers.int_shl_impl, + 'ii->i': numbers.int_shl_impl, + 'II->I': numbers.int_shl_impl, + 'll->l': numbers.int_shl_impl, + 'LL->L': numbers.int_shl_impl, + 'qq->q': numbers.int_shl_impl, + 'QQ->Q': numbers.int_shl_impl, + } + + ufunc_db[np.right_shift] = { + 'bb->b': numbers.int_shr_impl, + 'BB->B': numbers.int_shr_impl, + 'hh->h': numbers.int_shr_impl, + 'HH->H': numbers.int_shr_impl, + 'ii->i': numbers.int_shr_impl, + 'II->I': numbers.int_shr_impl, + 'll->l': numbers.int_shr_impl, + 'LL->L': numbers.int_shr_impl, + 'qq->q': numbers.int_shr_impl, + 'QQ->Q': numbers.int_shr_impl, + } + + # Inject datetime64 support + from numba.np import npdatetime + ufunc_db[np.negative].update({ + 'm->m': npdatetime.timedelta_neg_impl, + }) + ufunc_db[np.positive].update({ + 'm->m': npdatetime.timedelta_pos_impl, + }) + ufunc_db[np.absolute].update({ + 'm->m': npdatetime.timedelta_abs_impl, + }) + ufunc_db[np.sign].update({ + 'm->m': npdatetime.timedelta_sign_impl, + }) + ufunc_db[np.add].update({ + 'mm->m': npdatetime.timedelta_add_impl, + 'Mm->M': npdatetime.datetime_plus_timedelta, + 'mM->M': npdatetime.timedelta_plus_datetime, + }) + ufunc_db[np.subtract].update({ + 'mm->m': npdatetime.timedelta_sub_impl, + 'Mm->M': npdatetime.datetime_minus_timedelta, + 'MM->m': npdatetime.datetime_minus_datetime, + }) + ufunc_db[np.multiply].update({ + 'mq->m': npdatetime.timedelta_times_number, + 'md->m': npdatetime.timedelta_times_number, + 'qm->m': npdatetime.number_times_timedelta, + 'dm->m': npdatetime.number_times_timedelta, + }) + if np.divide != np.true_divide: + ufunc_db[np.divide].update({ + 'mq->m': npdatetime.timedelta_over_number, + 'md->m': npdatetime.timedelta_over_number, + 'mm->d': npdatetime.timedelta_over_timedelta, + }) + ufunc_db[np.true_divide].update({ + 'mq->m': npdatetime.timedelta_over_number, + 'md->m': npdatetime.timedelta_over_number, + 'mm->d': npdatetime.timedelta_over_timedelta, + }) + ufunc_db[np.floor_divide].update({ + 'mq->m': npdatetime.timedelta_over_number, + 'md->m': npdatetime.timedelta_over_number, + }) + + ufunc_db[np.floor_divide].update({ + 'mm->q': npdatetime.timedelta_floor_div_timedelta, + }) + + ufunc_db[np.equal].update({ + 'MM->?': npdatetime.datetime_eq_datetime_impl, + 'mm->?': npdatetime.timedelta_eq_timedelta_impl, + }) + ufunc_db[np.not_equal].update({ + 'MM->?': npdatetime.datetime_ne_datetime_impl, + 'mm->?': npdatetime.timedelta_ne_timedelta_impl, + }) + ufunc_db[np.less].update({ + 'MM->?': npdatetime.datetime_lt_datetime_impl, + 'mm->?': npdatetime.timedelta_lt_timedelta_impl, + }) + ufunc_db[np.less_equal].update({ + 'MM->?': npdatetime.datetime_le_datetime_impl, + 'mm->?': npdatetime.timedelta_le_timedelta_impl, + }) + ufunc_db[np.greater].update({ + 'MM->?': npdatetime.datetime_gt_datetime_impl, + 'mm->?': npdatetime.timedelta_gt_timedelta_impl, + }) + ufunc_db[np.greater_equal].update({ + 'MM->?': npdatetime.datetime_ge_datetime_impl, + 'mm->?': npdatetime.timedelta_ge_timedelta_impl, + }) + ufunc_db[np.maximum].update({ + 'MM->M': npdatetime.datetime_maximum_impl, + 'mm->m': npdatetime.timedelta_maximum_impl, + }) + ufunc_db[np.minimum].update({ + 'MM->M': npdatetime.datetime_minimum_impl, + 'mm->m': npdatetime.timedelta_minimum_impl, + }) + # there is no difference for datetime/timedelta in maximum/fmax + # and minimum/fmin + ufunc_db[np.fmax].update({ + 'MM->M': npdatetime.datetime_fmax_impl, + 'mm->m': npdatetime.timedelta_fmax_impl, + }) + ufunc_db[np.fmin].update({ + 'MM->M': npdatetime.datetime_fmin_impl, + 'mm->m': npdatetime.timedelta_fmin_impl, + }) + + ufunc_db[np.remainder].update({ + 'mm->m': npdatetime.timedelta_mod_timedelta, + }) diff --git a/venv/lib/python3.10/site-packages/numba/np/unsafe/__init__.py b/venv/lib/python3.10/site-packages/numba/np/unsafe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/np/unsafe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/unsafe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..200cd4198ed716e651768806fe34af54afbdf3f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/unsafe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/unsafe/__pycache__/ndarray.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/np/unsafe/__pycache__/ndarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e1fa9f69e059ff2b7c44af511b2355029b5eaaa Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/np/unsafe/__pycache__/ndarray.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/np/unsafe/ndarray.py b/venv/lib/python3.10/site-packages/numba/np/unsafe/ndarray.py new file mode 100644 index 0000000000000000000000000000000000000000..cd3e2d171ada1273e25ea895f6d0e38eb30d3a9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/np/unsafe/ndarray.py @@ -0,0 +1,79 @@ +""" +This file provides internal compiler utilities that support certain special +operations with numpy. +""" +from numba.core import types, typing +from numba.core.cgutils import unpack_tuple +from numba.core.extending import intrinsic +from numba.core.imputils import impl_ret_new_ref +from numba.core.errors import RequireLiteralValue, TypingError + +from numba.cpython.unsafe.tuple import tuple_setitem + + +@intrinsic +def empty_inferred(typingctx, shape): + """A version of numpy.empty whose dtype is inferred by the type system. + + Expects `shape` to be a int-tuple. + + There is special logic in the type-inferencer to handle the "refine"-ing + of undefined dtype. + """ + from numba.np.arrayobj import _empty_nd_impl + + def codegen(context, builder, signature, args): + # check that the return type is now defined + arrty = signature.return_type + assert arrty.is_precise() + shapes = unpack_tuple(builder, args[0]) + # redirect implementation to np.empty + res = _empty_nd_impl(context, builder, arrty, shapes) + return impl_ret_new_ref(context, builder, arrty, res._getvalue()) + + # make function signature + nd = len(shape) + array_ty = types.Array(ndim=nd, layout='C', dtype=types.undefined) + sig = array_ty(shape) + return sig, codegen + + +@intrinsic +def to_fixed_tuple(typingctx, array, length): + """Convert *array* into a tuple of *length* + + Returns ``UniTuple(array.dtype, length)`` + + ** Warning ** + - No boundchecking. + If *length* is longer than *array.size*, the behavior is undefined. + """ + if not isinstance(length, types.IntegerLiteral): + raise RequireLiteralValue('*length* argument must be a constant') + + if array.ndim != 1: + raise TypingError("Not supported on array.ndim={}".format(array.ndim)) + + # Determine types + tuple_size = int(length.literal_value) + tuple_type = types.UniTuple(dtype=array.dtype, count=tuple_size) + sig = tuple_type(array, length) + + def codegen(context, builder, signature, args): + def impl(array, length, empty_tuple): + out = empty_tuple + for i in range(length): + out = tuple_setitem(out, i, array[i]) + return out + + inner_argtypes = [signature.args[0], types.intp, tuple_type] + inner_sig = typing.signature(tuple_type, *inner_argtypes) + ll_idx_type = context.get_value_type(types.intp) + # Allocate an empty tuple + empty_tuple = context.get_constant_undef(tuple_type) + inner_args = [args[0], ll_idx_type(tuple_size), empty_tuple] + + res = context.compile_internal(builder, impl, inner_sig, inner_args) + return res + + return sig, codegen diff --git a/venv/lib/python3.10/site-packages/numba/parfors/__init__.py b/venv/lib/python3.10/site-packages/numba/parfors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2acbe3396926a249aff5bb6560735e8139cda9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/parfors/__init__.py @@ -0,0 +1 @@ +from numba.parfors import parfor_lowering diff --git a/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d097e317acfc11c6231d1a45b2ae6089f303322 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/array_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/array_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5be0809874f4ccc79df183e281d6db4716a31aa2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/array_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/parfor_lowering.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/parfor_lowering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c8af0fbfdd8515ebb19911d6b0ba69b77598883 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/parfor_lowering.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/parfor_lowering_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/parfor_lowering_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38e92afcfe74f3203f97fc5820315c7f3fb3f05f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/parfors/__pycache__/parfor_lowering_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/parfors/array_analysis.py b/venv/lib/python3.10/site-packages/numba/parfors/array_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..c931c81861cd74fd714d7af2d18b69b5efddecbf --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/parfors/array_analysis.py @@ -0,0 +1,3207 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +import numpy +import operator +from numba.core import types, ir, config, cgutils, errors +from numba.core.ir_utils import ( + mk_unique_var, + find_topo_order, + dprint_func_ir, + get_global_func_typ, + guard, + require, + get_definition, + find_callname, + find_build_sequence, + find_const, + is_namedtuple_class, + build_definitions, + find_potential_aliases, + get_canonical_alias, + GuardException, +) +from numba.core.analysis import compute_cfg_from_blocks +from numba.core.typing import npydecl, signature +import copy +from numba.core.extending import intrinsic +import llvmlite + +UNKNOWN_CLASS = -1 +CONST_CLASS = 0 +MAP_TYPES = [numpy.ufunc] + +array_analysis_extensions = {} + +# declaring call classes +array_creation = ["empty", "zeros", "ones", "full"] + +random_int_args = ["rand", "randn"] + +random_1arg_size = [ + "ranf", + "random_sample", + "sample", + "random", + "standard_normal", +] + +random_2arg_sizelast = [ + "chisquare", + "weibull", + "power", + "geometric", + "exponential", + "poisson", + "rayleigh", +] + +random_3arg_sizelast = [ + "normal", + "uniform", + "beta", + "binomial", + "f", + "gamma", + "lognormal", + "laplace", +] + +random_calls = ( + random_int_args + + random_1arg_size + + random_2arg_sizelast + + random_3arg_sizelast + + ["randint", "triangular"] +) + + +@intrinsic +def wrap_index(typingctx, idx, size): + """ + Calculate index value "idx" relative to a size "size" value as + (idx % size), where "size" is known to be positive. + Note that we use the mod(%) operation here instead of + (idx < 0 ? idx + size : idx) because we may have situations + where idx > size due to the way indices are calculated + during slice/range analysis. + + Both idx and size have to be Integer types. + size should be from the array size vars that array_analysis + adds and the bitwidth should match the platform maximum. + """ + require(isinstance(idx, types.scalars.Integer)) + require(isinstance(size, types.scalars.Integer)) + + # We need both idx and size to be platform size so that we can compare. + unified_ty = types.intp if size.signed else types.uintp + idx_unified = types.intp if idx.signed else types.uintp + + def codegen(context, builder, sig, args): + ll_idx_unified_ty = context.get_data_type(idx_unified) + ll_unified_ty = context.get_data_type(unified_ty) + if idx_unified.signed: + idx = builder.sext(args[0], ll_idx_unified_ty) + else: + idx = builder.zext(args[0], ll_idx_unified_ty) + if unified_ty.signed: + size = builder.sext(args[1], ll_unified_ty) + else: + size = builder.zext(args[1], ll_unified_ty) + neg_size = builder.neg(size) + zero = llvmlite.ir.Constant(ll_unified_ty, 0) + # If idx is unsigned then these signed comparisons will fail in those + # cases where the idx has the highest bit set, namely more than 2**63 + # on 64-bit platforms. + idx_negative = builder.icmp_signed("<", idx, zero) + pos_oversize = builder.icmp_signed(">=", idx, size) + neg_oversize = builder.icmp_signed("<=", idx, neg_size) + pos_res = builder.select(pos_oversize, size, idx) + neg_res = builder.select(neg_oversize, zero, builder.add(idx, size)) + mod = builder.select(idx_negative, neg_res, pos_res) + return mod + + return signature(unified_ty, idx, size), codegen + + +def wrap_index_literal(idx, size): + if idx < 0: + if idx <= -size: + return 0 + else: + return idx + size + else: + if idx >= size: + return size + else: + return idx + + +@intrinsic +def assert_equiv(typingctx, *val): + """ + A function that asserts the inputs are of equivalent size, + and throws runtime error when they are not. The input is + a vararg that contains an error message, followed by a set + of objects of either array, tuple or integer. + """ + if len(val) > 1: + # Make sure argument is a single tuple type. Note that this only + # happens when IR containing assert_equiv call is being compiled + # (and going through type inference) again. + val = (types.StarArgTuple(val),) + + assert len(val[0]) > 1 + # Arguments must be either array, tuple, or integer + assert all( + isinstance(a, ( + types.ArrayCompatible, + types.BaseTuple, + types.SliceType, + types.Integer + )) + for a in val[0][1:] + ) + if not isinstance(val[0][0], types.StringLiteral): + raise errors.TypingError('first argument must be a StringLiteral') + + def codegen(context, builder, sig, args): + assert len(args) == 1 # it is a vararg tuple + tup = cgutils.unpack_tuple(builder, args[0]) + tup_type = sig.args[0] + msg = sig.args[0][0].literal_value + + def unpack_shapes(a, aty): + if isinstance(aty, types.ArrayCompatible): + ary = context.make_array(aty)(context, builder, a) + return cgutils.unpack_tuple(builder, ary.shape) + elif isinstance(aty, types.BaseTuple): + return cgutils.unpack_tuple(builder, a) + else: # otherwise it is a single integer + return [a] + + def pairwise(a, aty, b, bty): + ashapes = unpack_shapes(a, aty) + bshapes = unpack_shapes(b, bty) + assert len(ashapes) == len(bshapes) + for (m, n) in zip(ashapes, bshapes): + m_eq_n = builder.icmp_unsigned('==', m, n) + with builder.if_else(m_eq_n) as (then, orelse): + with then: + pass + with orelse: + context.call_conv.return_user_exc( + builder, AssertionError, (msg,) + ) + + for i in range(1, len(tup_type) - 1): + pairwise(tup[i], tup_type[i], tup[i + 1], tup_type[i + 1]) + r = context.get_constant_generic(builder, types.NoneType, None) + return r + + return signature(types.none, *val), codegen + + +class EquivSet(object): + + """EquivSet keeps track of equivalence relations between + a set of objects. + """ + + def __init__(self, obj_to_ind=None, ind_to_obj=None, next_ind=0): + """Create a new EquivSet object. Optional keyword arguments are for + internal use only. + """ + # obj_to_ind maps object to equivalence index (sometimes also called + # equivalence class) is a non-negative number that uniquely identifies + # a set of objects that are equivalent. + self.obj_to_ind = obj_to_ind if obj_to_ind else {} + # ind_to_obj maps equivalence index to a list of objects. + self.ind_to_obj = ind_to_obj if ind_to_obj else {} + # next index number that is incremented each time a new equivalence + # relation is created. + self.next_ind = next_ind + + def empty(self): + """Return an empty EquivSet object. + """ + return EquivSet() + + def clone(self): + """Return a new copy. + """ + return EquivSet( + obj_to_ind=copy.deepcopy(self.obj_to_ind), + ind_to_obj=copy.deepcopy(self.ind_to_obj), + next_id=self.next_ind, + ) + + def __repr__(self): + return "EquivSet({})".format(self.ind_to_obj) + + def is_empty(self): + """Return true if the set is empty, or false otherwise. + """ + return self.obj_to_ind == {} + + def _get_ind(self, x): + """Return the internal index (greater or equal to 0) of the given + object, or -1 if not found. + """ + return self.obj_to_ind.get(x, -1) + + def _get_or_add_ind(self, x): + """Return the internal index (greater or equal to 0) of the given + object, or create a new one if not found. + """ + if x in self.obj_to_ind: + i = self.obj_to_ind[x] + else: + i = self.next_ind + self.next_ind += 1 + return i + + def _insert(self, objs): + """Base method that inserts a set of equivalent objects by modifying + self. + """ + assert len(objs) > 1 + + inds = tuple(self._get_or_add_ind(x) for x in objs) + ind = min(inds) + + if config.DEBUG_ARRAY_OPT >= 2: + print("_insert:", objs, inds) + + if not (ind in self.ind_to_obj): + self.ind_to_obj[ind] = [] + + for i, obj in zip(inds, objs): + if i == ind: + if not (obj in self.ind_to_obj[ind]): + self.ind_to_obj[ind].append(obj) + self.obj_to_ind[obj] = ind + else: + if i in self.ind_to_obj: + # those already existing are reassigned + for x in self.ind_to_obj[i]: + self.obj_to_ind[x] = ind + self.ind_to_obj[ind].append(x) + del self.ind_to_obj[i] + else: + # those that are new are assigned. + self.obj_to_ind[obj] = ind + self.ind_to_obj[ind].append(obj) + + def is_equiv(self, *objs): + """Try to derive if given objects are equivalent, return true + if so, or false otherwise. + """ + inds = [self._get_ind(x) for x in objs] + ind = max(inds) + if ind != -1: + return all(i == ind for i in inds) + else: + return all([x == objs[0] for x in objs]) + + def get_equiv_const(self, obj): + """Check if obj is equivalent to some int constant, and return + the constant if found, or None otherwise. + """ + ind = self._get_ind(obj) + if ind >= 0: + objs = self.ind_to_obj[ind] + for x in objs: + if isinstance(x, int): + return x + return None + + def get_equiv_set(self, obj): + """Return the set of equivalent objects. + """ + ind = self._get_ind(obj) + if ind >= 0: + return set(self.ind_to_obj[ind]) + return set() + + def insert_equiv(self, *objs): + """Insert a set of equivalent objects by modifying self. This + method can be overloaded to transform object type before insertion. + """ + return self._insert(objs) + + def intersect(self, equiv_set): + """ Return the intersection of self and the given equiv_set, + without modifying either of them. The result will also keep + old equivalence indices unchanged. + """ + new_set = self.empty() + new_set.next_ind = self.next_ind + + for objs in equiv_set.ind_to_obj.values(): + inds = tuple(self._get_ind(x) for x in objs) + ind_to_obj = {} + + for i, x in zip(inds, objs): + if i in ind_to_obj: + ind_to_obj[i].append(x) + elif i >= 0: + ind_to_obj[i] = [x] + + for v in ind_to_obj.values(): + if len(v) > 1: + new_set._insert(v) + + return new_set + + +class ShapeEquivSet(EquivSet): + + """Just like EquivSet, except that it accepts only numba IR variables + and constants as objects, guided by their types. Arrays are considered + equivalent as long as their shapes are equivalent. Scalars are + equivalent only when they are equal in value. Tuples are equivalent + when they are of the same size, and their elements are equivalent. + """ + + def __init__( + self, + typemap, + defs=None, + ind_to_var=None, + obj_to_ind=None, + ind_to_obj=None, + next_id=0, + ind_to_const=None, + ): + """Create a new ShapeEquivSet object, where typemap is a dictionary + that maps variable names to their types, and it will not be modified. + Optional keyword arguments are for internal use only. + """ + self.typemap = typemap + # defs maps variable name to an int, where + # 1 means the variable is defined only once, and numbers greater + # than 1 means defined more than once. + self.defs = defs if defs else {} + # ind_to_var maps index number to a list of variables (of ir.Var type). + # It is used to retrieve defined shape variables given an equivalence + # index. + self.ind_to_var = ind_to_var if ind_to_var else {} + # ind_to_const maps index number to a constant, if known. + self.ind_to_const = ind_to_const if ind_to_const else {} + + super(ShapeEquivSet, self).__init__(obj_to_ind, ind_to_obj, next_id) + + def empty(self): + """Return an empty ShapeEquivSet. + """ + return ShapeEquivSet(self.typemap, {}) + + def clone(self): + """Return a new copy. + """ + return ShapeEquivSet( + self.typemap, + defs=copy.copy(self.defs), + ind_to_var=copy.copy(self.ind_to_var), + obj_to_ind=copy.deepcopy(self.obj_to_ind), + ind_to_obj=copy.deepcopy(self.ind_to_obj), + next_id=self.next_ind, + ind_to_const=copy.deepcopy(self.ind_toconst), + ) + + def __repr__(self): + return "ShapeEquivSet({}, ind_to_var={}, ind_to_const={})".format( + self.ind_to_obj, self.ind_to_var, self.ind_to_const + ) + + def _get_names(self, obj): + """Return a set of names for the given obj, where array and tuples + are broken down to their individual shapes or elements. This is + safe because both Numba array shapes and Python tuples are immutable. + """ + if isinstance(obj, ir.Var) or isinstance(obj, str): + name = obj if isinstance(obj, str) else obj.name + if name not in self.typemap: + return (name,) + + typ = self.typemap[name] + if isinstance(typ, (types.BaseTuple, types.ArrayCompatible)): + ndim = (typ.ndim + if isinstance(typ, types.ArrayCompatible) + else len(typ)) + # Treat 0d array as if it were a scalar. + if ndim == 0: + return (name,) + else: + return tuple("{}#{}".format(name, i) for i in range(ndim)) + else: + return (name,) + elif isinstance(obj, ir.Const): + if isinstance(obj.value, tuple): + return obj.value + else: + return (obj.value,) + elif isinstance(obj, tuple): + + def get_names(x): + names = self._get_names(x) + if len(names) != 0: + return names[0] + return names + + return tuple(get_names(x) for x in obj) + elif isinstance(obj, int): + return (obj,) + if config.DEBUG_ARRAY_OPT >= 1: + print( + f"Ignoring untracked object type {type(obj)} in ShapeEquivSet") + return () + + def is_equiv(self, *objs): + """Overload EquivSet.is_equiv to handle Numba IR variables and + constants. + """ + assert len(objs) > 1 + obj_names = [self._get_names(x) for x in objs] + obj_names = [x for x in obj_names if x != ()] # rule out 0d shape + if len(obj_names) <= 1: + return False + ndims = [len(names) for names in obj_names] + ndim = ndims[0] + if not all(ndim == x for x in ndims): + if config.DEBUG_ARRAY_OPT >= 1: + print("is_equiv: Dimension mismatch for {}".format(objs)) + return False + for i in range(ndim): + names = [obj_name[i] for obj_name in obj_names] + if not super(ShapeEquivSet, self).is_equiv(*names): + return False + return True + + def get_equiv_const(self, obj): + """If the given object is equivalent to a constant scalar, + return the scalar value, or None otherwise. + """ + names = self._get_names(obj) + if len(names) != 1: + return None + return super(ShapeEquivSet, self).get_equiv_const(names[0]) + + def get_equiv_var(self, obj): + """If the given object is equivalent to some defined variable, + return the variable, or None otherwise. + """ + names = self._get_names(obj) + if len(names) != 1: + return None + ind = self._get_ind(names[0]) + vs = self.ind_to_var.get(ind, []) + return vs[0] if vs != [] else None + + def get_equiv_set(self, obj): + """Return the set of equivalent objects. + """ + names = self._get_names(obj) + if len(names) != 1: + return None + return super(ShapeEquivSet, self).get_equiv_set(names[0]) + + def _insert(self, objs): + """Overload EquivSet._insert to manage ind_to_var dictionary. + """ + inds = [] + for obj in objs: + if obj in self.obj_to_ind: + inds.append(self.obj_to_ind[obj]) + varlist = [] + constval = None + names = set() + for i in sorted(inds): + if i in self.ind_to_var: + for x in self.ind_to_var[i]: + if not (x.name in names): + varlist.append(x) + names.add(x.name) + if i in self.ind_to_const: + assert constval is None + constval = self.ind_to_const[i] + super(ShapeEquivSet, self)._insert(objs) + new_ind = self.obj_to_ind[objs[0]] + for i in set(inds): + if i in self.ind_to_var: + del self.ind_to_var[i] + self.ind_to_var[new_ind] = varlist + if constval is not None: + self.ind_to_const[new_ind] = constval + + def insert_equiv(self, *objs): + """Overload EquivSet.insert_equiv to handle Numba IR variables and + constants. Input objs are either variable or constant, and at least + one of them must be variable. + """ + assert len(objs) > 1 + obj_names = [self._get_names(x) for x in objs] + obj_names = [x for x in obj_names if x != ()] # rule out 0d shape + if len(obj_names) <= 1: + return + names = sum([list(x) for x in obj_names], []) + ndims = [len(x) for x in obj_names] + ndim = ndims[0] + assert all( + ndim == x for x in ndims + ), "Dimension mismatch for {}".format(objs) + varlist = [] + constlist = [] + for obj in objs: + if not isinstance(obj, tuple): + obj = (obj,) + for var in obj: + if isinstance(var, ir.Var) and not (var.name in varlist): + # favor those already defined, move to front of varlist + if var.name in self.defs: + varlist.insert(0, var) + else: + varlist.append(var) + if isinstance(var, ir.Const) and not (var.value in constlist): + constlist.append(var.value) + + # try to populate ind_to_var if variables are present + for obj in varlist: + name = obj.name + if name in names and not (name in self.obj_to_ind): + self.ind_to_obj[self.next_ind] = [name] + self.obj_to_ind[name] = self.next_ind + self.ind_to_var[self.next_ind] = [obj] + self.next_ind += 1 + + # create equivalence classes for previously unseen constants + for const in constlist: + if const in names and not (const in self.obj_to_ind): + self.ind_to_obj[self.next_ind] = [const] + self.obj_to_ind[const] = self.next_ind + self.ind_to_const[self.next_ind] = const + self.next_ind += 1 + + some_change = False + + for i in range(ndim): + names = [obj_name[i] for obj_name in obj_names] + ie_res = super(ShapeEquivSet, self).insert_equiv(*names) + some_change = some_change or ie_res + + return some_change + + def has_shape(self, name): + """Return true if the shape of the given variable is available. + """ + return self.get_shape(name) is not None + + def get_shape(self, name): + """Return a tuple of variables that corresponds to the shape + of the given array, or None if not found. + """ + return guard(self._get_shape, name) + + def _get_shape(self, name): + """Return a tuple of variables that corresponds to the shape + of the given array, or raise GuardException if not found. + """ + inds = self.get_shape_classes(name) + require(inds != ()) + shape = [] + for i in inds: + require(i in self.ind_to_var) + vs = self.ind_to_var[i] + if vs != []: + shape.append(vs[0]) + else: + require(i in self.ind_to_const) + vs = self.ind_to_const[i] + shape.append(vs) + return tuple(shape) + + def get_shape_classes(self, name): + """Instead of the shape tuple, return tuple of int, where + each int is the corresponding class index of the size object. + Unknown shapes are given class index -1. Return empty tuple + if the input name is a scalar variable. + """ + if isinstance(name, ir.Var): + name = name.name + typ = self.typemap[name] if name in self.typemap else None + if not ( + isinstance(typ, ( + types.BaseTuple, types.SliceType, types.ArrayCompatible + )) + ): + return [] + # Treat 0d arrays like scalars. + if isinstance(typ, types.ArrayCompatible) and typ.ndim == 0: + return [] + names = self._get_names(name) + inds = tuple(self._get_ind(name) for name in names) + return inds + + def intersect(self, equiv_set): + """Overload the intersect method to handle ind_to_var. + """ + newset = super(ShapeEquivSet, self).intersect(equiv_set) + ind_to_var = {} + for i, objs in newset.ind_to_obj.items(): + assert len(objs) > 0 + obj = objs[0] + assert obj in self.obj_to_ind + assert obj in equiv_set.obj_to_ind + j = self.obj_to_ind[obj] + k = equiv_set.obj_to_ind[obj] + assert j in self.ind_to_var + assert k in equiv_set.ind_to_var + varlist = [] + names = [x.name for x in equiv_set.ind_to_var[k]] + for x in self.ind_to_var[j]: + if x.name in names: + varlist.append(x) + ind_to_var[i] = varlist + newset.ind_to_var = ind_to_var + return newset + + def define(self, name, redefined): + """Increment the internal count of how many times a variable is being + defined. Most variables in Numba IR are SSA, i.e., defined only once, + but not all of them. When a variable is being re-defined, it must + be removed from the equivalence relation and added to the redefined + set but only if that redefinition is not known to have the same + equivalence classes. Those variables redefined are removed from all + the blocks' equivalence sets later. + + Arrays passed to define() use their whole name but these do not + appear in the equivalence sets since they are stored there per + dimension. Calling _get_names() here converts array names to + dimensional names. + + This function would previously invalidate if there were any multiple + definitions of a variable. However, we realized that this behavior + is overly restrictive. You need only invalidate on multiple + definitions if they are not known to be equivalent. So, the + equivalence insertion functions now return True if some change was + made (meaning the definition was not equivalent) and False + otherwise. If no change was made, then define() need not be + called. For no change to have been made, the variable must + already be present. If the new definition of the var has the + case where lhs and rhs are in the same equivalence class then + again, no change will be made and define() need not be called + or the variable invalidated. + """ + if isinstance(name, ir.Var): + name = name.name + if name in self.defs: + self.defs[name] += 1 + name_res = list(self._get_names(name)) + for one_name in name_res: + # NOTE: variable being redefined, must invalidate previous + # equivalences. Believe it is a rare case, and only happens to + # scalar accumuators. + if one_name in self.obj_to_ind: + redefined.add( + one_name + ) # remove this var from all equiv sets + i = self.obj_to_ind[one_name] + del self.obj_to_ind[one_name] + self.ind_to_obj[i].remove(one_name) + if self.ind_to_obj[i] == []: + del self.ind_to_obj[i] + assert i in self.ind_to_var + names = [x.name for x in self.ind_to_var[i]] + if name in names: + j = names.index(name) + del self.ind_to_var[i][j] + if self.ind_to_var[i] == []: + del self.ind_to_var[i] + # no more size variables, remove equivalence too + if i in self.ind_to_obj: + for obj in self.ind_to_obj[i]: + del self.obj_to_ind[obj] + del self.ind_to_obj[i] + else: + self.defs[name] = 1 + + def union_defs(self, defs, redefined): + """Union with the given defs dictionary. This is meant to handle + branch join-point, where a variable may have been defined in more + than one branches. + """ + for k, v in defs.items(): + if v > 0: + self.define(k, redefined) + + +class SymbolicEquivSet(ShapeEquivSet): + + """Just like ShapeEquivSet, except that it also reasons about variable + equivalence symbolically by using their arithmetic definitions. + The goal is to automatically derive the equivalence of array ranges + (slicing). For instance, a[1:m] and a[0:m-1] shall be considered + size-equivalence. + """ + + def __init__( + self, + typemap, + def_by=None, + ref_by=None, + ext_shapes=None, + defs=None, + ind_to_var=None, + obj_to_ind=None, + ind_to_obj=None, + next_id=0, + ): + """Create a new SymbolicEquivSet object, where typemap is a dictionary + that maps variable names to their types, and it will not be modified. + Optional keyword arguments are for internal use only. + """ + # A "defined-by" table that maps A to a tuple of (B, i), which + # means A is defined as: A = B + i, where A,B are variable names, + # and i is an integer constants. + self.def_by = def_by if def_by else {} + # A "referred-by" table that maps A to a list of [(B, i), (C, j) ...], + # which implies a sequence of definitions: B = A - i, C = A - j, and + # so on, where A,B,C,... are variable names, and i,j,... are + # integer constants. + self.ref_by = ref_by if ref_by else {} + # A extended shape table that can map an arbitrary object to a shape, + # currently used to remember shapes for SetItem IR node, and wrapped + # indices for Slice objects. + self.ext_shapes = ext_shapes if ext_shapes else {} + # rel_map keeps a map of relative sizes that we have seen so + # that if we compute the same relative sizes different times + # in different ways we can associate those two instances + # of the same relative size to the same equivalence class. + self.rel_map = {} + # wrap_index() computes the effectual index given a slice and a + # dimension's size. We need to be able to know that two wrap_index + # calls are equivalent. They are known to be equivalent if the slice + # and dimension sizes of the two wrap_index calls are equivalent. + # wrap_map maps from a tuple of equivalence class ids for a slice and + # a dimension size to some new equivalence class id for the output size. + self.wrap_map = {} + super(SymbolicEquivSet, self).__init__( + typemap, defs, ind_to_var, obj_to_ind, ind_to_obj, next_id + ) + + def empty(self): + """Return an empty SymbolicEquivSet. + """ + return SymbolicEquivSet(self.typemap) + + def __repr__(self): + return ( + "SymbolicEquivSet({}, ind_to_var={}, def_by={}, " + "ref_by={}, ext_shapes={})".format( + self.ind_to_obj, + self.ind_to_var, + self.def_by, + self.ref_by, + self.ext_shapes, + ) + ) + + def clone(self): + """Return a new copy. + """ + return SymbolicEquivSet( + self.typemap, + def_by=copy.copy(self.def_by), + ref_by=copy.copy(self.ref_by), + ext_shapes=copy.copy(self.ext_shapes), + defs=copy.copy(self.defs), + ind_to_var=copy.copy(self.ind_to_var), + obj_to_ind=copy.deepcopy(self.obj_to_ind), + ind_to_obj=copy.deepcopy(self.ind_to_obj), + next_id=self.next_ind, + ) + + def get_rel(self, name): + """Retrieve a definition pair for the given variable, + or return None if it is not available. + """ + return guard(self._get_or_set_rel, name) + + def _get_or_set_rel(self, name, func_ir=None): + """Retrieve a definition pair for the given variable, + and if it is not already available, try to look it up + in the given func_ir, and remember it for future use. + """ + if isinstance(name, ir.Var): + name = name.name + require(self.defs.get(name, 0) == 1) + if name in self.def_by: + return self.def_by[name] + else: + require(func_ir is not None) + + def plus(x, y): + x_is_const = isinstance(x, int) + y_is_const = isinstance(y, int) + if x_is_const: + if y_is_const: + return x + y + else: + (var, offset) = y + return (var, x + offset) + else: + (var, offset) = x + if y_is_const: + return (var, y + offset) + else: + return None + + def minus(x, y): + if isinstance(y, int): + return plus(x, -y) + elif ( + isinstance(x, tuple) + and isinstance(y, tuple) + and x[0] == y[0] + ): + return minus(x[1], y[1]) + else: + return None + + expr = get_definition(func_ir, name) + value = (name, 0) # default to its own name + if isinstance(expr, ir.Expr): + if expr.op == "call": + fname, mod_name = find_callname( + func_ir, expr, typemap=self.typemap + ) + if ( + fname == "wrap_index" + and mod_name == "numba.parfors.array_analysis" + ): + index = tuple( + self.obj_to_ind.get(x.name, -1) for x in expr.args + ) + # If wrap_index for a slice works on a variable + # that is not analyzable (e.g., multiple definitions) + # then we have to return None here since we can't know + # how that size will compare to others if we can't + # analyze some part of the slice. + if -1 in index: + return None + names = self.ext_shapes.get(index, []) + names.append(name) + if len(names) > 0: + self._insert(names) + self.ext_shapes[index] = names + elif expr.op == "binop": + lhs = self._get_or_set_rel(expr.lhs, func_ir) + rhs = self._get_or_set_rel(expr.rhs, func_ir) + # If either the lhs or rhs is not analyzable + # then don't try to record information this var. + if lhs is None or rhs is None: + return None + elif expr.fn == operator.add: + value = plus(lhs, rhs) + elif expr.fn == operator.sub: + value = minus(lhs, rhs) + elif isinstance(expr, ir.Const) and isinstance(expr.value, int): + value = expr.value + require(value is not None) + # update def_by table + self.def_by[name] = value + if isinstance(value, int) or ( + isinstance(value, tuple) + and (value[0] != name or value[1] != 0) + ): + # update ref_by table too + if isinstance(value, tuple): + (var, offset) = value + if not (var in self.ref_by): + self.ref_by[var] = [] + self.ref_by[var].append((name, -offset)) + # insert new equivalence if found + ind = self._get_ind(var) + if ind >= 0: + objs = self.ind_to_obj[ind] + names = [] + for obj in objs: + if obj in self.ref_by: + names += [ + x + for (x, i) in self.ref_by[obj] + if i == -offset + ] + if len(names) > 1: + super(SymbolicEquivSet, self)._insert(names) + return value + + def define(self, var, redefined, func_ir=None, typ=None): + """Besides incrementing the definition count of the given variable + name, it will also retrieve and simplify its definition from func_ir, + and remember the result for later equivalence comparison. Supported + operations are: + 1. arithmetic plus and minus with constants + 2. wrap_index (relative to some given size) + """ + if isinstance(var, ir.Var): + name = var.name + else: + name = var + super(SymbolicEquivSet, self).define(name, redefined) + if ( + func_ir + and self.defs.get(name, 0) == 1 + and isinstance(typ, types.Number) + ): + value = guard(self._get_or_set_rel, name, func_ir) + # turn constant definition into equivalence + if isinstance(value, int): + self._insert([name, value]) + if isinstance(var, ir.Var): + ind = self._get_or_add_ind(name) + if not (ind in self.ind_to_obj): + self.ind_to_obj[ind] = [name] + self.obj_to_ind[name] = ind + if ind in self.ind_to_var: + self.ind_to_var[ind].append(var) + else: + self.ind_to_var[ind] = [var] + return True + + def _insert(self, objs): + """Overload _insert method to handle ind changes between relative + objects. Returns True if some change is made, false otherwise. + """ + indset = set() + uniqs = set() + for obj in objs: + ind = self._get_ind(obj) + if ind == -1: + uniqs.add(obj) + elif not (ind in indset): + uniqs.add(obj) + indset.add(ind) + if len(uniqs) <= 1: + return False + uniqs = list(uniqs) + super(SymbolicEquivSet, self)._insert(uniqs) + objs = self.ind_to_obj[self._get_ind(uniqs[0])] + + # New equivalence guided by def_by and ref_by + offset_dict = {} + + def get_or_set(d, k): + if k in d: + v = d[k] + else: + v = [] + d[k] = v + return v + + for obj in objs: + if obj in self.def_by: + value = self.def_by[obj] + if isinstance(value, tuple): + (name, offset) = value + get_or_set(offset_dict, -offset).append(name) + if name in self.ref_by: # relative to name + for (v, i) in self.ref_by[name]: + get_or_set(offset_dict, -(offset + i)).append(v) + if obj in self.ref_by: + for (name, offset) in self.ref_by[obj]: + get_or_set(offset_dict, offset).append(name) + for names in offset_dict.values(): + self._insert(names) + return True + + def set_shape_setitem(self, obj, shape): + """remember shapes of SetItem IR nodes. + """ + assert isinstance(obj, (ir.StaticSetItem, ir.SetItem)) + self.ext_shapes[obj] = shape + + def _get_shape(self, obj): + """Overload _get_shape to retrieve the shape of SetItem IR nodes. + """ + if isinstance(obj, (ir.StaticSetItem, ir.SetItem)): + require(obj in self.ext_shapes) + return self.ext_shapes[obj] + else: + assert isinstance(obj, ir.Var) + typ = self.typemap[obj.name] + # for slice type, return the shape variable itself + if isinstance(typ, types.SliceType): + return (obj,) + else: + return super(SymbolicEquivSet, self)._get_shape(obj) + + +class WrapIndexMeta(object): + """ + Array analysis should be able to analyze all the function + calls that it adds to the IR. That way, array analysis can + be run as often as needed and you should get the same + equivalencies. One modification to the IR that array analysis + makes is the insertion of wrap_index calls. Thus, repeated + array analysis passes should be able to analyze these wrap_index + calls. The difficulty of these calls is that the equivalence + class of the left-hand side of the assignment is not present in + the arguments to wrap_index in the right-hand side. Instead, + the equivalence class of the wrap_index output is a combination + of the wrap_index args. The important thing to + note is that if the equivalence classes of the slice size + and the dimension's size are the same for two wrap index + calls then we can be assured of the answer being the same. + So, we maintain the wrap_map dict that maps from a tuple + of equivalence class ids for the slice and dimension size + to some new equivalence class id for the output size. + However, when we are analyzing the first such wrap_index + call we don't have a variable there to associate to the + size since we're in the process of analyzing the instruction + that creates that mapping. So, instead we return an object + of this special class and analyze_inst will establish the + connection between a tuple of the parts of this object + below and the left-hand side variable. + """ + + def __init__(self, slice_size, dim_size): + self.slice_size = slice_size + self.dim_size = dim_size + + +class ArrayAnalysis(object): + aa_count = 0 + + """Analyzes Numpy array computations for properties such as + shape/size equivalence, and keeps track of them on a per-block + basis. The analysis should only be run once because it modifies + the incoming IR by inserting assertion statements that safeguard + parfor optimizations. + """ + + def __init__(self, context, func_ir, typemap, calltypes): + self.context = context + self.func_ir = func_ir + self.typemap = typemap + self.calltypes = calltypes + + # EquivSet of variables, indexed by block number + self.equiv_sets = {} + # keep attr calls to arrays like t=A.sum() as {t:('sum',A)} + self.array_attr_calls = {} + # keep attrs of objects (value,attr)->shape_var + self.object_attrs = {} + # keep prepended instructions from conditional branch + self.prepends = {} + # keep track of pruned precessors when branch degenerates to jump + self.pruned_predecessors = {} + + def get_equiv_set(self, block_label): + """Return the equiv_set object of an block given its label. + """ + return self.equiv_sets[block_label] + + def remove_redefineds(self, redefineds): + """Take a set of variables in redefineds and go through all + the currently existing equivalence sets (created in topo order) + and remove that variable from all of them since it is multiply + defined within the function. + """ + unused = set() + for r in redefineds: + for eslabel in self.equiv_sets: + es = self.equiv_sets[eslabel] + es.define(r, unused) + + def run(self, blocks=None, equiv_set=None): + """run array shape analysis on the given IR blocks, resulting in + modified IR and finalized EquivSet for each block. + """ + if blocks is None: + blocks = self.func_ir.blocks + + self.func_ir._definitions = build_definitions(self.func_ir.blocks) + + if equiv_set is None: + init_equiv_set = SymbolicEquivSet(self.typemap) + else: + init_equiv_set = equiv_set + + self.alias_map, self.arg_aliases = find_potential_aliases( + blocks, + self.func_ir.arg_names, + self.typemap, + self.func_ir + ) + + aa_count_save = ArrayAnalysis.aa_count + ArrayAnalysis.aa_count += 1 + if config.DEBUG_ARRAY_OPT >= 1: + print("Starting ArrayAnalysis:", aa_count_save) + dprint_func_ir(self.func_ir, "before array analysis", blocks) + + if config.DEBUG_ARRAY_OPT >= 1: + print( + "ArrayAnalysis variable types: ", sorted(self.typemap.items()) + ) + print("ArrayAnalysis call types: ", self.calltypes) + + cfg = compute_cfg_from_blocks(blocks) + topo_order = find_topo_order(blocks, cfg=cfg) + # Traverse blocks in topological order + self._run_on_blocks(topo_order, blocks, cfg, init_equiv_set) + + if config.DEBUG_ARRAY_OPT >= 1: + self.dump() + print( + "ArrayAnalysis post variable types: ", + sorted(self.typemap.items()), + ) + print("ArrayAnalysis post call types: ", self.calltypes) + + dprint_func_ir(self.func_ir, "after array analysis", blocks) + if config.DEBUG_ARRAY_OPT >= 1: + print("Ending ArrayAnalysis:", aa_count_save) + + def _run_on_blocks(self, topo_order, blocks, cfg, init_equiv_set): + for label in topo_order: + if config.DEBUG_ARRAY_OPT >= 2: + print("Processing block:", label) + block = blocks[label] + scope = block.scope + pending_transforms = self._determine_transform( + cfg, block, label, scope, init_equiv_set + ) + self._combine_to_new_block(block, pending_transforms) + + def _combine_to_new_block(self, block, pending_transforms): + """Combine the new instructions from previous pass into a new block + body. + """ + new_body = [] + for inst, pre, post in pending_transforms: + for instr in pre: + new_body.append(instr) + new_body.append(inst) + for instr in post: + new_body.append(instr) + block.body = new_body + + def _determine_transform(self, cfg, block, label, scope, init_equiv_set): + """Determine the transformation for each instruction in the block + """ + equiv_set = None + # equiv_set is the intersection of predecessors + preds = cfg.predecessors(label) + # some incoming edge may be pruned due to prior analysis + if label in self.pruned_predecessors: + pruned = self.pruned_predecessors[label] + else: + pruned = [] + # Go through each incoming edge, process prepended instructions and + # calculate beginning equiv_set of current block as an intersection + # of incoming ones. + if config.DEBUG_ARRAY_OPT >= 2: + print("preds:", preds) + for (p, q) in preds: + if config.DEBUG_ARRAY_OPT >= 2: + print("p, q:", p, q) + if p in pruned: + continue + if p in self.equiv_sets: + from_set = self.equiv_sets[p].clone() + if config.DEBUG_ARRAY_OPT >= 2: + print("p in equiv_sets", from_set) + if (p, label) in self.prepends: + instrs = self.prepends[(p, label)] + for inst in instrs: + redefined = set() + self._analyze_inst( + label, scope, from_set, inst, redefined + ) + # Remove anything multiply defined in this block + # from every block equivs. + # NOTE: necessary? can't observe effect in testsuite + self.remove_redefineds(redefined) + if equiv_set is None: + equiv_set = from_set + else: + equiv_set = equiv_set.intersect(from_set) + redefined = set() + equiv_set.union_defs(from_set.defs, redefined) + # Remove anything multiply defined in this block + # from every block equivs. + # NOTE: necessary? can't observe effect in testsuite + self.remove_redefineds(redefined) + + # Start with a new equiv_set if none is computed + if equiv_set is None: + equiv_set = init_equiv_set + self.equiv_sets[label] = equiv_set + + # Go through instructions in a block, and insert pre/post + # instructions as we analyze them. + pending_transforms = [] + for inst in block.body: + redefined = set() + pre, post = self._analyze_inst( + label, scope, equiv_set, inst, redefined + ) + # Remove anything multiply defined in this block from every block + # equivs. + if len(redefined) > 0: + self.remove_redefineds(redefined) + + pending_transforms.append((inst, pre, post)) + return pending_transforms + + def dump(self): + """dump per-block equivalence sets for debugging purposes. + """ + print("Array Analysis: ", self.equiv_sets) + + def _define(self, equiv_set, var, typ, value): + self.typemap[var.name] = typ + self.func_ir._definitions[var.name] = [value] + redefineds = set() + equiv_set.define(var, redefineds, self.func_ir, typ) + + class AnalyzeResult(object): + def __init__(self, **kwargs): + self.kwargs = kwargs + + def _analyze_inst(self, label, scope, equiv_set, inst, redefined): + pre = [] + post = [] + if config.DEBUG_ARRAY_OPT >= 2: + print("analyze_inst:", inst) + if isinstance(inst, ir.Assign): + lhs = inst.target + typ = self.typemap[lhs.name] + shape = None + if isinstance(typ, types.ArrayCompatible) and typ.ndim == 0: + shape = () + elif isinstance(inst.value, ir.Expr): + result = self._analyze_expr(scope, equiv_set, inst.value, lhs) + if result: + require(isinstance(result, ArrayAnalysis.AnalyzeResult)) + if 'shape' in result.kwargs: + shape = result.kwargs['shape'] + if 'pre' in result.kwargs: + pre.extend(result.kwargs['pre']) + if 'post' in result.kwargs: + post.extend(result.kwargs['post']) + if 'rhs' in result.kwargs: + inst.value = result.kwargs['rhs'] + elif isinstance(inst.value, (ir.Var, ir.Const)): + shape = inst.value + elif isinstance(inst.value, ir.Global): + gvalue = inst.value.value + # only integer values can be part of shape + # TODO: support cases with some but not all integer values or + # nested tuples + if (isinstance(gvalue, tuple) + and all(isinstance(v, int) for v in gvalue)): + shape = gvalue + elif isinstance(gvalue, int): + shape = (gvalue,) + elif isinstance(inst.value, ir.Arg): + if ( + isinstance(typ, types.containers.UniTuple) + and isinstance(typ.dtype, types.Integer) + ): + shape = inst.value + elif ( + isinstance(typ, types.containers.Tuple) + and all([isinstance(x, + (types.Integer, types.IntegerLiteral)) + for x in typ.types] + ) + ): + shape = inst.value + + if isinstance(shape, ir.Const): + if isinstance(shape.value, tuple): + loc = shape.loc + shape = tuple(ir.Const(x, loc) for x in shape.value) + elif isinstance(shape.value, int): + shape = (shape,) + else: + shape = None + elif isinstance(shape, ir.Var) and isinstance( + self.typemap[shape.name], types.Integer + ): + shape = (shape,) + elif isinstance(shape, WrapIndexMeta): + """ Here we've got the special WrapIndexMeta object + back from analyzing a wrap_index call. We define + the lhs and then get it's equivalence class then + add the mapping from the tuple of slice size and + dimensional size equivalence ids to the lhs + equivalence id. + """ + equiv_set.define(lhs, redefined, self.func_ir, typ) + lhs_ind = equiv_set._get_ind(lhs.name) + if lhs_ind != -1: + equiv_set.wrap_map[ + (shape.slice_size, shape.dim_size) + ] = lhs_ind + return pre, post + + if isinstance(typ, types.ArrayCompatible): + if ( + shape is not None + and isinstance(shape, ir.Var) + and isinstance( + self.typemap[shape.name], types.containers.BaseTuple + ) + ): + pass + elif ( + shape is None + or isinstance(shape, tuple) + or ( + isinstance(shape, ir.Var) + and not equiv_set.has_shape(shape) + ) + ): + shape = self._gen_shape_call( + equiv_set, lhs, typ.ndim, shape, post + ) + elif isinstance(typ, types.UniTuple): + if shape and isinstance(typ.dtype, types.Integer): + shape = self._gen_shape_call( + equiv_set, lhs, len(typ), shape, post + ) + elif ( + isinstance(typ, types.containers.Tuple) + and all([isinstance(x, + (types.Integer, types.IntegerLiteral)) + for x in typ.types] + ) + ): + shape = self._gen_shape_call( + equiv_set, lhs, len(typ), shape, post + ) + + """ See the comment on the define() function. + + We need only call define(), which will invalidate a variable + from being in the equivalence sets on multiple definitions, + if the variable was not previously defined or if the new + definition would be in a conflicting equivalence class to the + original equivalence class for the variable. + + insert_equiv() returns True if either of these conditions are + True and then we call define() in those cases. + If insert_equiv() returns False then no changes were made and + all equivalence classes are consistent upon a redefinition so + no invalidation is needed and we don't call define(). + """ + needs_define = True + if shape is not None: + needs_define = equiv_set.insert_equiv(lhs, shape) + if needs_define: + equiv_set.define(lhs, redefined, self.func_ir, typ) + elif isinstance(inst, (ir.StaticSetItem, ir.SetItem)): + index = ( + inst.index if isinstance(inst, ir.SetItem) else inst.index_var + ) + result = guard( + self._index_to_shape, scope, equiv_set, inst.target, index + ) + if not result: + return [], [] + if result[0] is not None: + assert isinstance(inst, (ir.StaticSetItem, ir.SetItem)) + inst.index = result[0] + result = result[1] + target_shape = result.kwargs['shape'] + if 'pre' in result.kwargs: + pre = result.kwargs['pre'] + value_shape = equiv_set.get_shape(inst.value) + if value_shape == (): # constant + equiv_set.set_shape_setitem(inst, target_shape) + return pre, [] + elif value_shape is not None: + target_typ = self.typemap[inst.target.name] + require(isinstance(target_typ, types.ArrayCompatible)) + target_ndim = target_typ.ndim + shapes = [target_shape, value_shape] + names = [inst.target.name, inst.value.name] + broadcast_result = self._broadcast_assert_shapes( + scope, equiv_set, inst.loc, shapes, names + ) + require('shape' in broadcast_result.kwargs) + require('pre' in broadcast_result.kwargs) + shape = broadcast_result.kwargs['shape'] + asserts = broadcast_result.kwargs['pre'] + n = len(shape) + # shape dimension must be within target dimension + assert target_ndim >= n + equiv_set.set_shape_setitem(inst, shape) + return pre + asserts, [] + else: + return pre, [] + elif isinstance(inst, ir.Branch): + + def handle_call_binop(cond_def): + br = None + if cond_def.fn == operator.eq: + br = inst.truebr + otherbr = inst.falsebr + cond_val = 1 + elif cond_def.fn == operator.ne: + br = inst.falsebr + otherbr = inst.truebr + cond_val = 0 + lhs_typ = self.typemap[cond_def.lhs.name] + rhs_typ = self.typemap[cond_def.rhs.name] + if br is not None and ( + ( + isinstance(lhs_typ, types.Integer) + and isinstance(rhs_typ, types.Integer) + ) + or ( + isinstance(lhs_typ, types.BaseTuple) + and isinstance(rhs_typ, types.BaseTuple) + ) + ): + loc = inst.loc + args = (cond_def.lhs, cond_def.rhs) + asserts = self._make_assert_equiv( + scope, loc, equiv_set, args + ) + asserts.append( + ir.Assign(ir.Const(cond_val, loc), cond_var, loc) + ) + self.prepends[(label, br)] = asserts + self.prepends[(label, otherbr)] = [ + ir.Assign(ir.Const(1 - cond_val, loc), cond_var, loc) + ] + + cond_var = inst.cond + cond_def = guard(get_definition, self.func_ir, cond_var) + if not cond_def: # phi variable has no single definition + # We'll use equiv_set to try to find a cond_def instead + equivs = equiv_set.get_equiv_set(cond_var) + defs = [] + for name in equivs: + if isinstance(name, str) and name in self.typemap: + var_def = guard( + get_definition, self.func_ir, name, lhs_only=True + ) + if isinstance(var_def, ir.Var): + var_def = var_def.name + if var_def: + defs.append(var_def) + else: + defs.append(name) + defvars = set(filter(lambda x: isinstance(x, str), defs)) + defconsts = set(defs).difference(defvars) + if len(defconsts) == 1: + cond_def = list(defconsts)[0] + elif len(defvars) == 1: + cond_def = guard( + get_definition, self.func_ir, list(defvars)[0] + ) + if isinstance(cond_def, ir.Expr) and cond_def.op == 'binop': + handle_call_binop(cond_def) + elif isinstance(cond_def, ir.Expr) and cond_def.op == 'call': + # this handles bool(predicate) + glbl_bool = guard(get_definition, self.func_ir, cond_def.func) + if glbl_bool is not None and glbl_bool.value is bool: + if len(cond_def.args) == 1: + condition = guard(get_definition, self.func_ir, + cond_def.args[0]) + if (condition is not None and + isinstance(condition, ir.Expr) and + condition.op == 'binop'): + handle_call_binop(condition) + else: + if isinstance(cond_def, ir.Const): + cond_def = cond_def.value + if isinstance(cond_def, int) or isinstance(cond_def, bool): + # condition is always true/false, prune the outgoing edge + pruned_br = inst.falsebr if cond_def else inst.truebr + if pruned_br in self.pruned_predecessors: + self.pruned_predecessors[pruned_br].append(label) + else: + self.pruned_predecessors[pruned_br] = [label] + + elif type(inst) in array_analysis_extensions: + # let external calls handle stmt if type matches + f = array_analysis_extensions[type(inst)] + pre, post = f(inst, equiv_set, self.typemap, self) + + return pre, post + + def _analyze_expr(self, scope, equiv_set, expr, lhs): + fname = "_analyze_op_{}".format(expr.op) + try: + fn = getattr(self, fname) + except AttributeError: + return None + return guard(fn, scope, equiv_set, expr, lhs) + + def _analyze_op_getattr(self, scope, equiv_set, expr, lhs): + # TODO: getattr of npytypes.Record + if expr.attr == "T" and self._isarray(expr.value.name): + return self._analyze_op_call_numpy_transpose( + scope, equiv_set, expr.loc, [expr.value], {} + ) + elif expr.attr == "shape": + shape = equiv_set.get_shape(expr.value) + return ArrayAnalysis.AnalyzeResult(shape=shape) + elif expr.attr in ("real", "imag") and self._isarray(expr.value.name): + # Shape of real or imag attr is the same as the shape of the array + # itself. + return ArrayAnalysis.AnalyzeResult(shape=expr.value) + elif self._isarray(lhs.name): + canonical_value = get_canonical_alias( + expr.value.name, self.alias_map + ) + if (canonical_value, expr.attr) in self.object_attrs: + return ArrayAnalysis.AnalyzeResult( + shape=self.object_attrs[(canonical_value, expr.attr)] + ) + else: + typ = self.typemap[lhs.name] + post = [] + shape = self._gen_shape_call( + equiv_set, lhs, typ.ndim, None, post + ) + self.object_attrs[(canonical_value, expr.attr)] = shape + return ArrayAnalysis.AnalyzeResult(shape=shape, post=post) + + return None + + def _analyze_op_cast(self, scope, equiv_set, expr, lhs): + return ArrayAnalysis.AnalyzeResult(shape=expr.value) + + def _analyze_op_exhaust_iter(self, scope, equiv_set, expr, lhs): + var = expr.value + typ = self.typemap[var.name] + if isinstance(typ, types.BaseTuple): + require(len(typ) == expr.count) + require(equiv_set.has_shape(var)) + return ArrayAnalysis.AnalyzeResult(shape=var) + return None + + def gen_literal_slice_part( + self, + arg_val, + loc, + scope, + stmts, + equiv_set, + name="static_literal_slice_part", + ): + # Create var to hold the calculated slice size. + static_literal_slice_part_var = ir.Var(scope, mk_unique_var(name), loc) + static_literal_slice_part_val = ir.Const(arg_val, loc) + static_literal_slice_part_typ = types.IntegerLiteral(arg_val) + # We'll prepend this slice size calculation to the get/setitem. + stmts.append( + ir.Assign( + value=static_literal_slice_part_val, + target=static_literal_slice_part_var, + loc=loc, + ) + ) + self._define( + equiv_set, + static_literal_slice_part_var, + static_literal_slice_part_typ, + static_literal_slice_part_val, + ) + return static_literal_slice_part_var, static_literal_slice_part_typ + + def gen_static_slice_size( + self, lhs_rel, rhs_rel, loc, scope, stmts, equiv_set + ): + the_var, *_ = self.gen_literal_slice_part( + rhs_rel - lhs_rel, + loc, + scope, + stmts, + equiv_set, + name="static_slice_size", + ) + return the_var + + def gen_explicit_neg( + self, + arg, + arg_rel, + arg_typ, + size_typ, + loc, + scope, + dsize, + stmts, + equiv_set, + ): + assert not isinstance(size_typ, int) + # Create var to hold the calculated slice size. + explicit_neg_var = ir.Var(scope, mk_unique_var("explicit_neg"), loc) + explicit_neg_val = ir.Expr.binop(operator.add, dsize, arg, loc=loc) + # Determine the type of that var. Can be literal if we know the + # literal size of the dimension. + explicit_neg_typ = types.intp + self.calltypes[explicit_neg_val] = signature( + explicit_neg_typ, size_typ, arg_typ + ) + # We'll prepend this slice size calculation to the get/setitem. + stmts.append( + ir.Assign(value=explicit_neg_val, target=explicit_neg_var, loc=loc) + ) + self._define( + equiv_set, explicit_neg_var, explicit_neg_typ, explicit_neg_val + ) + return explicit_neg_var, explicit_neg_typ + + def update_replacement_slice( + self, + lhs, + lhs_typ, + lhs_rel, + dsize_rel, + replacement_slice, + slice_index, + need_replacement, + loc, + scope, + stmts, + equiv_set, + size_typ, + dsize, + ): + # Do compile-time calculation of real index value if both the given + # index value and the array length are known at compile time. + known = False + if isinstance(lhs_rel, int): + # If the index and the array size are known then the real index + # can be calculated at compile time. + if lhs_rel == 0: + # Special-case 0 as nothing needing to be done. + known = True + elif isinstance(dsize_rel, int): + known = True + # Calculate the real index. + wil = wrap_index_literal(lhs_rel, dsize_rel) + # If the given index value is between 0 and dsize then + # there's no need to rewrite anything. + if wil != lhs_rel: + if config.DEBUG_ARRAY_OPT >= 2: + print("Replacing slice to hard-code known slice size.") + # Indicate we will need to replace the slice var. + need_replacement = True + literal_var, literal_typ = self.gen_literal_slice_part( + wil, loc, scope, stmts, equiv_set + ) + assert slice_index == 0 or slice_index == 1 + if slice_index == 0: + replacement_slice.args = ( + literal_var, + replacement_slice.args[1], + ) + else: + replacement_slice.args = ( + replacement_slice.args[0], + literal_var, + ) + # Update lhs information with the negative removed. + lhs = replacement_slice.args[slice_index] + lhs_typ = literal_typ + lhs_rel = equiv_set.get_rel(lhs) + elif lhs_rel < 0: + # Indicate we will need to replace the slice var. + need_replacement = True + if config.DEBUG_ARRAY_OPT >= 2: + print("Replacing slice due to known negative index.") + explicit_neg_var, explicit_neg_typ = self.gen_explicit_neg( + lhs, + lhs_rel, + lhs_typ, + size_typ, + loc, + scope, + dsize, + stmts, + equiv_set, + ) + if slice_index == 0: + replacement_slice.args = ( + explicit_neg_var, + replacement_slice.args[1], + ) + else: + replacement_slice.args = ( + replacement_slice.args[0], + explicit_neg_var, + ) + # Update lhs information with the negative removed. + lhs = replacement_slice.args[slice_index] + lhs_typ = explicit_neg_typ + lhs_rel = equiv_set.get_rel(lhs) + return ( + lhs, + lhs_typ, + lhs_rel, + replacement_slice, + need_replacement, + known, + ) + + def slice_size(self, index, dsize, equiv_set, scope, stmts): + """Reason about the size of a slice represented by the "index" + variable, and return a variable that has this size data, or + raise GuardException if it cannot reason about it. + + The computation takes care of negative values used in the slice + with respect to the given dimensional size ("dsize"). + + Extra statements required to produce the result are appended + to parent function's stmts list. + """ + loc = index.loc + # Get the definition of the index variable. + index_def = get_definition(self.func_ir, index) + fname, mod_name = find_callname( + self.func_ir, index_def, typemap=self.typemap + ) + require(fname == 'slice' and mod_name in ('builtins')) + require(len(index_def.args) == 2) + lhs = index_def.args[0] + rhs = index_def.args[1] + size_typ = self.typemap[dsize.name] + lhs_typ = self.typemap[lhs.name] + rhs_typ = self.typemap[rhs.name] + + if config.DEBUG_ARRAY_OPT >= 2: + print(f"slice_size index={index} dsize={dsize} " + f"index_def={index_def} lhs={lhs} rhs={rhs} " + f"size_typ={size_typ} lhs_typ={lhs_typ} rhs_typ={rhs_typ}") + + # Make a deepcopy of the original slice to use as the + # replacement slice, which we will modify as necessary + # below to convert all negative constants in the slice + # to be relative to the dimension size. + replacement_slice = copy.deepcopy(index_def) + need_replacement = False + + # Fill in the left side of the slice's ":" with 0 if it wasn't + # specified. + if isinstance(lhs_typ, types.NoneType): + zero_var = ir.Var(scope, mk_unique_var("zero"), loc) + zero = ir.Const(0, loc) + stmts.append(ir.Assign(value=zero, target=zero_var, loc=loc)) + self._define(equiv_set, zero_var, types.IntegerLiteral(0), zero) + lhs = zero_var + lhs_typ = types.IntegerLiteral(0) + replacement_slice.args = (lhs, replacement_slice.args[1]) + need_replacement = True + if config.DEBUG_ARRAY_OPT >= 2: + print("Replacing slice because lhs is None.") + + # Fill in the right side of the slice's ":" with the array + # length if it wasn't specified. + if isinstance(rhs_typ, types.NoneType): + rhs = dsize + rhs_typ = size_typ + replacement_slice.args = (replacement_slice.args[0], rhs) + need_replacement = True + if config.DEBUG_ARRAY_OPT >= 2: + print("Replacing slice because lhs is None.") + + lhs_rel = equiv_set.get_rel(lhs) + rhs_rel = equiv_set.get_rel(rhs) + dsize_rel = equiv_set.get_rel(dsize) + if config.DEBUG_ARRAY_OPT >= 2: + print( + "lhs_rel", lhs_rel, "rhs_rel", rhs_rel, "dsize_rel", dsize_rel + ) + + # Update replacement slice with the real index value if we can + # compute it at compile time. + [ + lhs, + lhs_typ, + lhs_rel, + replacement_slice, + need_replacement, + lhs_known, + ] = self.update_replacement_slice( + lhs, + lhs_typ, + lhs_rel, + dsize_rel, + replacement_slice, + 0, + need_replacement, + loc, + scope, + stmts, + equiv_set, + size_typ, + dsize, + ) + [ + rhs, + rhs_typ, + rhs_rel, + replacement_slice, + need_replacement, + rhs_known, + ] = self.update_replacement_slice( + rhs, + rhs_typ, + rhs_rel, + dsize_rel, + replacement_slice, + 1, + need_replacement, + loc, + scope, + stmts, + equiv_set, + size_typ, + dsize, + ) + if config.DEBUG_ARRAY_OPT >= 2: + print("lhs_known:", lhs_known) + print("rhs_known:", rhs_known) + + # If neither of the parts of the slice were negative constants + # then we don't need to do slice replacement in the IR. + if not need_replacement: + replacement_slice_var = None + else: + # Create a new var for the replacement slice. + replacement_slice_var = ir.Var( + scope, mk_unique_var("replacement_slice"), loc + ) + # Create a deepcopy of slice calltype so that when we change it + # below the original isn't changed. Make the types of the parts of + # the slice intp. + new_arg_typs = (types.intp, types.intp) + rs_calltype = self.typemap[index_def.func.name].get_call_type( + self.context, new_arg_typs, {} + ) + self.calltypes[replacement_slice] = rs_calltype + stmts.append( + ir.Assign( + value=replacement_slice, + target=replacement_slice_var, + loc=loc, + ) + ) + # The type of the replacement slice is the same type as the + # original. + self.typemap[replacement_slice_var.name] = self.typemap[index.name] + + if config.DEBUG_ARRAY_OPT >= 2: + print( + "after rewriting negatives", + "lhs_rel", + lhs_rel, + "rhs_rel", + rhs_rel, + ) + + if lhs_known and rhs_known: + if config.DEBUG_ARRAY_OPT >= 2: + print("lhs and rhs known so return static size") + return ( + self.gen_static_slice_size( + lhs_rel, rhs_rel, loc, scope, stmts, equiv_set + ), + replacement_slice_var, + ) + + if ( + lhs_rel == 0 + and isinstance(rhs_rel, tuple) + and equiv_set.is_equiv(dsize, rhs_rel[0]) + and rhs_rel[1] == 0 + ): + return dsize, None + + slice_typ = types.intp + orig_slice_typ = slice_typ + + size_var = ir.Var(scope, mk_unique_var("slice_size"), loc) + size_val = ir.Expr.binop(operator.sub, rhs, lhs, loc=loc) + self.calltypes[size_val] = signature(slice_typ, rhs_typ, lhs_typ) + self._define(equiv_set, size_var, slice_typ, size_val) + size_rel = equiv_set.get_rel(size_var) + if config.DEBUG_ARRAY_OPT >= 2: + print("size_rel", size_rel, type(size_rel)) + + wrap_var = ir.Var(scope, mk_unique_var("wrap"), loc) + wrap_def = ir.Global("wrap_index", wrap_index, loc=loc) + fnty = get_global_func_typ(wrap_index) + sig = self.context.resolve_function_type( + fnty, (orig_slice_typ, size_typ), {} + ) + self._define(equiv_set, wrap_var, fnty, wrap_def) + + def gen_wrap_if_not_known(val, val_typ, known): + if not known: + var = ir.Var(scope, mk_unique_var("var"), loc) + var_typ = types.intp + new_value = ir.Expr.call(wrap_var, [val, dsize], {}, loc) + # def_res will be False if there is something unanalyzable + # that prevents a size association from being created. + self._define(equiv_set, var, var_typ, new_value) + self.calltypes[new_value] = sig + return (var, var_typ, new_value) + else: + return (val, val_typ, None) + + var1, var1_typ, value1 = gen_wrap_if_not_known(lhs, lhs_typ, lhs_known) + var2, var2_typ, value2 = gen_wrap_if_not_known(rhs, rhs_typ, rhs_known) + + stmts.append(ir.Assign(value=size_val, target=size_var, loc=loc)) + stmts.append(ir.Assign(value=wrap_def, target=wrap_var, loc=loc)) + if value1 is not None: + stmts.append(ir.Assign(value=value1, target=var1, loc=loc)) + if value2 is not None: + stmts.append(ir.Assign(value=value2, target=var2, loc=loc)) + + post_wrap_size_var = ir.Var( + scope, mk_unique_var("post_wrap_slice_size"), loc + ) + post_wrap_size_val = ir.Expr.binop(operator.sub, + var2, + var1, + loc=loc) + self.calltypes[post_wrap_size_val] = signature( + slice_typ, var2_typ, var1_typ + ) + self._define( + equiv_set, post_wrap_size_var, slice_typ, post_wrap_size_val + ) + + stmts.append( + ir.Assign( + value=post_wrap_size_val, target=post_wrap_size_var, loc=loc + ) + ) + + # rel_map keeps a map of relative sizes that we have seen so + # that if we compute the same relative sizes different times + # in different ways we can associate those two instances + # of the same relative size to the same equivalence class. + if isinstance(size_rel, tuple): + if config.DEBUG_ARRAY_OPT >= 2: + print("size_rel is tuple", equiv_set.rel_map) + rel_map_entry = None + for rme, rme_tuple in equiv_set.rel_map.items(): + if rme[1] == size_rel[1] and equiv_set.is_equiv( + rme[0], size_rel[0] + ): + rel_map_entry = rme_tuple + break + + if rel_map_entry is not None: + # We have seen this relative size before so establish + # equivalence to the previous variable. + if config.DEBUG_ARRAY_OPT >= 2: + print("establishing equivalence to", rel_map_entry) + equiv_set.insert_equiv(size_var, rel_map_entry[0]) + equiv_set.insert_equiv(post_wrap_size_var, rel_map_entry[1]) + else: + # The first time we've seen this relative size so + # remember the variable defining that size. + equiv_set.rel_map[size_rel] = (size_var, post_wrap_size_var) + + return post_wrap_size_var, replacement_slice_var + + def _index_to_shape(self, scope, equiv_set, var, ind_var): + """For indexing like var[index] (either write or read), see if + the index corresponds to a range/slice shape. + Returns a 2-tuple where the first item is either None or a ir.Var + to be used to replace the index variable in the outer getitem or + setitem instruction. The second item is also a tuple returning + the shape and prepending instructions. + """ + typ = self.typemap[var.name] + require(isinstance(typ, types.ArrayCompatible)) + ind_typ = self.typemap[ind_var.name] + ind_shape = equiv_set._get_shape(ind_var) + var_shape = equiv_set._get_shape(var) + if isinstance(ind_typ, types.SliceType): + seq_typs = (ind_typ,) + seq = (ind_var,) + else: + require(isinstance(ind_typ, types.BaseTuple)) + seq, op = find_build_sequence(self.func_ir, ind_var) + require(op == "build_tuple") + seq_typs = tuple(self.typemap[x.name] for x in seq) + require(len(ind_shape) == len(seq_typs) == len(var_shape)) + stmts = [] + + def to_shape(typ, index, dsize): + if isinstance(typ, types.SliceType): + return self.slice_size(index, dsize, equiv_set, scope, stmts) + elif isinstance(typ, types.Number): + return None, None + else: + # unknown dimension size for this index, + # so we'll raise GuardException + require(False) + + shape_list = [] + index_var_list = [] + replace_index = False + for (typ, size, dsize, orig_ind) in zip(seq_typs, + ind_shape, + var_shape, + seq): + # Convert the given dimension of the get/setitem index expr. + shape_part, index_var_part = to_shape(typ, size, dsize) + shape_list.append(shape_part) + + # to_shape will return index_var_part as not None if a + # replacement of the slice is required to convert from + # negative indices to positive relative indices. + if index_var_part is not None: + # Remember that we need to replace the build_tuple. + replace_index = True + index_var_list.append(index_var_part) + else: + index_var_list.append(orig_ind) + + # If at least one of the dimensions required a new slice variable + # then we'll need to replace the build_tuple for this get/setitem. + if replace_index: + # Multi-dimensional array access needs a replacement tuple built. + if len(index_var_list) > 1: + # Make a variable to hold the new build_tuple. + replacement_build_tuple_var = ir.Var( + scope, + mk_unique_var("replacement_build_tuple"), + ind_shape[0].loc, + ) + # Create the build tuple from the accumulated index vars above. + new_build_tuple = ir.Expr.build_tuple( + index_var_list, ind_shape[0].loc + ) + stmts.append( + ir.Assign( + value=new_build_tuple, + target=replacement_build_tuple_var, + loc=ind_shape[0].loc, + ) + ) + # New build_tuple has same type as the original one. + self.typemap[replacement_build_tuple_var.name] = ind_typ + else: + replacement_build_tuple_var = index_var_list[0] + else: + replacement_build_tuple_var = None + + shape = tuple(shape_list) + require(not all(x is None for x in shape)) + shape = tuple(x for x in shape if x is not None) + return (replacement_build_tuple_var, + ArrayAnalysis.AnalyzeResult(shape=shape, pre=stmts)) + + def _analyze_op_getitem(self, scope, equiv_set, expr, lhs): + result = self._index_to_shape(scope, equiv_set, expr.value, expr.index) + if result[0] is not None: + expr.index = result[0] + return result[1] + + def _analyze_op_static_getitem(self, scope, equiv_set, expr, lhs): + var = expr.value + typ = self.typemap[var.name] + if not isinstance(typ, types.BaseTuple): + result = self._index_to_shape( + scope, equiv_set, expr.value, expr.index_var + ) + if result[0] is not None: + expr.index_var = result[0] + return result[1] + shape = equiv_set._get_shape(var) + if isinstance(expr.index, int): + require(expr.index < len(shape)) + return ArrayAnalysis.AnalyzeResult(shape=shape[expr.index]) + elif isinstance(expr.index, slice): + return ArrayAnalysis.AnalyzeResult(shape=shape[expr.index]) + require(False) + + def _analyze_op_unary(self, scope, equiv_set, expr, lhs): + require(expr.fn in UNARY_MAP_OP) + # for scalars, only + operator results in equivalence + # for example, if "m = -n", m and n are not equivalent + if self._isarray(expr.value.name) or expr.fn == operator.add: + return ArrayAnalysis.AnalyzeResult(shape=expr.value) + return None + + def _analyze_op_binop(self, scope, equiv_set, expr, lhs): + require(expr.fn in BINARY_MAP_OP) + return self._analyze_broadcast( + scope, equiv_set, expr.loc, [expr.lhs, expr.rhs], expr.fn + ) + + def _analyze_op_inplace_binop(self, scope, equiv_set, expr, lhs): + require(expr.fn in INPLACE_BINARY_MAP_OP) + return self._analyze_broadcast( + scope, equiv_set, expr.loc, [expr.lhs, expr.rhs], expr.fn + ) + + def _analyze_op_arrayexpr(self, scope, equiv_set, expr, lhs): + return self._analyze_broadcast( + scope, equiv_set, expr.loc, expr.list_vars(), None + ) + + def _analyze_op_build_tuple(self, scope, equiv_set, expr, lhs): + # For the moment, we can't do anything with tuples that + # contain multi-dimensional arrays, compared to array dimensions. + # Return None to say we won't track this tuple if a part of it + # is an array. + for x in expr.items: + if ( + isinstance(x, ir.Var) + and isinstance(self.typemap[x.name], types.ArrayCompatible) + and self.typemap[x.name].ndim > 1 + ): + return None + + consts = [] + for var in expr.items: + x = guard(find_const, self.func_ir, var) + if x is not None: + consts.append(x) + else: + break + else: + out = tuple([ir.Const(x, expr.loc) for x in consts]) + return ArrayAnalysis.AnalyzeResult( + shape=out, + rhs=ir.Const(tuple(consts), expr.loc) + ) + # default return for non-const + return ArrayAnalysis.AnalyzeResult(shape=tuple(expr.items)) + + def _analyze_op_call(self, scope, equiv_set, expr, lhs): + from numba.stencils.stencil import StencilFunc + + callee = expr.func + callee_def = get_definition(self.func_ir, callee) + if isinstance( + callee_def, (ir.Global, ir.FreeVar) + ) and is_namedtuple_class(callee_def.value): + return ArrayAnalysis.AnalyzeResult(shape=tuple(expr.args)) + if isinstance(callee_def, (ir.Global, ir.FreeVar)) and isinstance( + callee_def.value, StencilFunc + ): + args = expr.args + return self._analyze_stencil( + scope, + equiv_set, + callee_def.value, + expr.loc, + args, + dict(expr.kws), + ) + + fname, mod_name = find_callname( + self.func_ir, expr, typemap=self.typemap + ) + added_mod_name = False + # call via attribute (i.e. array.func) + if isinstance(mod_name, ir.Var) and isinstance( + self.typemap[mod_name.name], types.ArrayCompatible + ): + args = [mod_name] + expr.args + mod_name = "numpy" + # Remember that args and expr.args don't alias. + added_mod_name = True + else: + args = expr.args + fname = "_analyze_op_call_{}_{}".format(mod_name, fname).replace( + ".", "_" + ) + if fname in UFUNC_MAP_OP: # known numpy ufuncs + return self._analyze_broadcast(scope, equiv_set, + expr.loc, args, None) + else: + try: + fn = getattr(self, fname) + except AttributeError: + return None + result = guard( + fn, + scope=scope, + equiv_set=equiv_set, + loc=expr.loc, + args=args, + kws=dict(expr.kws), + ) + # We want the ability for function fn to modify arguments. + # If args and expr.args don't alias then we need the extra + # step of assigning back into expr.args from the args that + # was passed to fn. + if added_mod_name: + expr.args = args[1:] + return result + + def _analyze_op_call_builtins_len(self, scope, equiv_set, loc, args, kws): + # python 3 version of len() + require(len(args) == 1) + var = args[0] + typ = self.typemap[var.name] + require(isinstance(typ, types.ArrayCompatible)) + shape = equiv_set._get_shape(var) + return ArrayAnalysis.AnalyzeResult(shape=shape[0], rhs=shape[0]) + + def _analyze_op_call_numba_parfors_array_analysis_assert_equiv( + self, scope, equiv_set, loc, args, kws + ): + equiv_set.insert_equiv(*args[1:]) + return None + + def _analyze_op_call_numba_parfors_array_analysis_wrap_index( + self, scope, equiv_set, loc, args, kws + ): + """ Analyze wrap_index calls added by a previous run of + Array Analysis + """ + require(len(args) == 2) + # Two parts to wrap index, the specified slice size... + slice_size = args[0].name + # ...and the size of the dimension. + dim_size = args[1].name + # Get the equivalence class ids for both. + slice_eq = equiv_set._get_or_add_ind(slice_size) + dim_eq = equiv_set._get_or_add_ind(dim_size) + # See if a previous wrap_index calls we've analyzed maps from + # the same pair of equivalence class ids for slice and dim size. + if (slice_eq, dim_eq) in equiv_set.wrap_map: + wrap_ind = equiv_set.wrap_map[(slice_eq, dim_eq)] + require(wrap_ind in equiv_set.ind_to_var) + vs = equiv_set.ind_to_var[wrap_ind] + require(vs != []) + # Return the shape of the variable from the previous wrap_index. + return ArrayAnalysis.AnalyzeResult(shape=(vs[0],)) + else: + # We haven't seen this combination of slice and dim + # equivalence class ids so return a WrapIndexMeta so that + # _analyze_inst can establish the connection to the lhs var. + return ArrayAnalysis.AnalyzeResult( + shape=WrapIndexMeta(slice_eq, dim_eq) + ) + + def _analyze_numpy_create_array(self, scope, equiv_set, loc, args, kws): + shape_var = None + if len(args) > 0: + shape_var = args[0] + elif "shape" in kws: + shape_var = kws["shape"] + if shape_var: + return ArrayAnalysis.AnalyzeResult(shape=shape_var) + raise errors.UnsupportedRewriteError( + "Must specify a shape for array creation", + loc=loc, + ) + + def _analyze_op_call_numpy_empty(self, scope, equiv_set, loc, args, kws): + return self._analyze_numpy_create_array( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numba_np_unsafe_ndarray_empty_inferred( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_numpy_create_array( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numpy_zeros(self, scope, equiv_set, loc, args, kws): + return self._analyze_numpy_create_array( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numpy_ones(self, scope, equiv_set, loc, args, kws): + return self._analyze_numpy_create_array( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numpy_eye(self, scope, equiv_set, loc, args, kws): + if len(args) > 0: + N = args[0] + elif "N" in kws: + N = kws["N"] + else: + raise errors.UnsupportedRewriteError( + "Expect one argument (or 'N') to eye function", + loc=loc, + ) + if "M" in kws: + M = kws["M"] + else: + M = N + return ArrayAnalysis.AnalyzeResult(shape=(N, M)) + + def _analyze_op_call_numpy_identity( + self, scope, equiv_set, loc, args, kws + ): + assert len(args) > 0 + N = args[0] + return ArrayAnalysis.AnalyzeResult(shape=(N, N)) + + def _analyze_op_call_numpy_diag(self, scope, equiv_set, loc, args, kws): + # We can only reason about the output shape when the input is 1D or + # square 2D. + assert len(args) > 0 + a = args[0] + assert isinstance(a, ir.Var) + atyp = self.typemap[a.name] + if isinstance(atyp, types.ArrayCompatible): + if atyp.ndim == 2: + if "k" in kws: # will proceed only when k = 0 or absent + k = kws["k"] + if not equiv_set.is_equiv(k, 0): + return None + (m, n) = equiv_set._get_shape(a) + if equiv_set.is_equiv(m, n): + return ArrayAnalysis.AnalyzeResult(shape=(m,)) + elif atyp.ndim == 1: + (m,) = equiv_set._get_shape(a) + return ArrayAnalysis.AnalyzeResult(shape=(m, m)) + return None + + def _analyze_numpy_array_like(self, scope, equiv_set, args, kws): + assert len(args) > 0 + var = args[0] + typ = self.typemap[var.name] + if isinstance(typ, types.Integer): + return ArrayAnalysis.AnalyzeResult(shape=(1,)) + elif isinstance(typ, types.ArrayCompatible) and equiv_set.has_shape( + var + ): + return ArrayAnalysis.AnalyzeResult(shape=var) + return None + + def _analyze_op_call_numpy_ravel(self, scope, equiv_set, loc, args, kws): + assert len(args) == 1 + var = args[0] + typ = self.typemap[var.name] + assert isinstance(typ, types.ArrayCompatible) + # output array is same shape as input if input is 1D + if typ.ndim == 1 and equiv_set.has_shape(var): + if typ.layout == "C": + # output is the same as input (no copy) for 'C' layout + # optimize out the call + return ArrayAnalysis.AnalyzeResult(shape=var, rhs=var) + else: + return ArrayAnalysis.AnalyzeResult(shape=var) + # TODO: handle multi-D input arrays (calc array size) + return None + + def _analyze_op_call_numpy_copy(self, scope, equiv_set, loc, args, kws): + return self._analyze_numpy_array_like(scope, equiv_set, args, kws) + + def _analyze_op_call_numpy_empty_like( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_numpy_array_like(scope, equiv_set, args, kws) + + def _analyze_op_call_numpy_zeros_like( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_numpy_array_like(scope, equiv_set, args, kws) + + def _analyze_op_call_numpy_ones_like( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_numpy_array_like(scope, equiv_set, args, kws) + + def _analyze_op_call_numpy_full_like( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_numpy_array_like(scope, equiv_set, args, kws) + + def _analyze_op_call_numpy_asfortranarray( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_numpy_array_like(scope, equiv_set, args, kws) + + def _analyze_op_call_numpy_reshape(self, scope, equiv_set, loc, args, kws): + n = len(args) + assert n > 1 + if n == 2: + typ = self.typemap[args[1].name] + if isinstance(typ, types.BaseTuple): + return ArrayAnalysis.AnalyzeResult(shape=args[1]) + + # Reshape is allowed to take one argument that has the value <0. + # This means that the size of that dimension should be inferred from + # the size of the array being reshaped and the other dimensions + # specified. Our general approach here is to see if the reshape + # has any <0 arguments. If it has more than one then throw a + # ValueError. If exactly one <0 argument is found, remember its + # argument index. + stmts = [] + neg_one_index = -1 + for arg_index in range(1, len(args)): + reshape_arg = args[arg_index] + reshape_arg_def = guard(get_definition, self.func_ir, reshape_arg) + if isinstance(reshape_arg_def, ir.Const): + if reshape_arg_def.value < 0: + if neg_one_index == -1: + neg_one_index = arg_index + else: + msg = ("The reshape API may only include one negative" + " argument.") + raise errors.UnsupportedRewriteError( + msg, loc=reshape_arg.loc + ) + + if neg_one_index >= 0: + # If exactly one <0 argument to reshape was found, then we are + # going to insert code to calculate the missing dimension and then + # replace the negative with the calculated size. We do this + # because we can't let array equivalence analysis think that some + # array has a negative dimension size. + loc = args[0].loc + # Create a variable to hold the size of the array being reshaped. + calc_size_var = ir.Var(scope, mk_unique_var("calc_size_var"), loc) + self.typemap[calc_size_var.name] = types.intp + # Assign the size of the array calc_size_var. + init_calc_var = ir.Assign( + ir.Expr.getattr(args[0], "size", loc), calc_size_var, loc + ) + stmts.append(init_calc_var) + # For each other dimension, divide the current size by the + # specified dimension size. Once all such dimensions have been + # done then what is left is the size of the negative dimension. + for arg_index in range(1, len(args)): + # Skip the negative dimension. + if arg_index == neg_one_index: + continue + div_calc_size_var = ir.Var( + scope, mk_unique_var("calc_size_var"), loc + ) + self.typemap[div_calc_size_var.name] = types.intp + # Calculate the next size as current size // the current arg's + # dimension size. + new_binop = ir.Expr.binop( + operator.floordiv, calc_size_var, args[arg_index], loc + ) + div_calc = ir.Assign(new_binop, div_calc_size_var, loc) + self.calltypes[new_binop] = signature( + types.intp, types.intp, types.intp + ) + stmts.append(div_calc) + calc_size_var = div_calc_size_var + # Put the calculated value back into the reshape arguments, + # replacing the negative. + args[neg_one_index] = calc_size_var + + return ArrayAnalysis.AnalyzeResult(shape=tuple(args[1:]), pre=stmts) + + def _analyze_op_call_numpy_transpose( + self, scope, equiv_set, loc, args, kws + ): + in_arr = args[0] + typ = self.typemap[in_arr.name] + assert isinstance( + typ, types.ArrayCompatible + ), "Invalid np.transpose argument" + shape = equiv_set._get_shape(in_arr) + if len(args) == 1: + return ArrayAnalysis.AnalyzeResult(shape=tuple(reversed(shape))) + axes = [guard(find_const, self.func_ir, a) for a in args[1:]] + if isinstance(axes[0], tuple): + axes = list(axes[0]) + if None in axes: + return None + ret = [shape[i] for i in axes] + return ArrayAnalysis.AnalyzeResult(shape=tuple(ret)) + + def _analyze_op_call_numpy_random_rand( + self, scope, equiv_set, loc, args, kws + ): + if len(args) > 0: + return ArrayAnalysis.AnalyzeResult(shape=tuple(args)) + return None + + def _analyze_op_call_numpy_random_randn( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_call_numpy_random_rand( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_numpy_random_with_size( + self, pos, scope, equiv_set, args, kws + ): + if "size" in kws: + return ArrayAnalysis.AnalyzeResult(shape=kws["size"]) + if len(args) > pos: + return ArrayAnalysis.AnalyzeResult(shape=args[pos]) + return None + + def _analyze_op_call_numpy_random_ranf( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 0, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_random_sample( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 0, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_sample( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 0, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_random( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 0, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_standard_normal( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 0, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_chisquare( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_weibull( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_power( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_geometric( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_exponential( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_poisson( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_rayleigh( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 1, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_normal( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_uniform( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_beta( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_binomial( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_f( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_gamma( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_lognormal( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_laplace( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_randint( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 2, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_random_triangular( + self, scope, equiv_set, loc, args, kws + ): + return self._analyze_op_numpy_random_with_size( + 3, scope, equiv_set, args, kws + ) + + def _analyze_op_call_numpy_concatenate( + self, scope, equiv_set, loc, args, kws + ): + assert len(args) > 0 + loc = args[0].loc + seq, op = find_build_sequence(self.func_ir, args[0]) + n = len(seq) + require(n > 0) + axis = 0 + if "axis" in kws: + if isinstance(kws["axis"], int): # internal use only + axis = kws["axis"] + else: + axis = find_const(self.func_ir, kws["axis"]) + elif len(args) > 1: + axis = find_const(self.func_ir, args[1]) + require(isinstance(axis, int)) + require(op == "build_tuple") + shapes = [equiv_set._get_shape(x) for x in seq] + if axis < 0: + axis = len(shapes[0]) + axis + require(0 <= axis < len(shapes[0])) + asserts = [] + new_shape = [] + if n == 1: # from one array N-dimension to (N-1)-dimension + shape = shapes[0] + # first size is the count, pop it out of shapes + n = equiv_set.get_equiv_const(shapes[0]) + shape.pop(0) + for i in range(len(shape)): + if i == axis: + m = equiv_set.get_equiv_const(shape[i]) + size = m * n if (m and n) else None + else: + size = self._sum_size(equiv_set, shapes[0]) + new_shape.append(size) + else: # from n arrays N-dimension to N-dimension + for i in range(len(shapes[0])): + if i == axis: + size = self._sum_size( + equiv_set, [shape[i] for shape in shapes] + ) + else: + sizes = [shape[i] for shape in shapes] + asserts.append( + self._call_assert_equiv(scope, loc, equiv_set, sizes) + ) + size = sizes[0] + new_shape.append(size) + return ArrayAnalysis.AnalyzeResult( + shape=tuple(new_shape), + pre=sum(asserts, []) + ) + + def _analyze_op_call_numpy_stack(self, scope, equiv_set, loc, args, kws): + assert len(args) > 0 + loc = args[0].loc + seq, op = find_build_sequence(self.func_ir, args[0]) + n = len(seq) + require(n > 0) + axis = 0 + if "axis" in kws: + if isinstance(kws["axis"], int): # internal use only + axis = kws["axis"] + else: + axis = find_const(self.func_ir, kws["axis"]) + elif len(args) > 1: + axis = find_const(self.func_ir, args[1]) + require(isinstance(axis, int)) + # only build_tuple can give reliable count + require(op == "build_tuple") + shapes = [equiv_set._get_shape(x) for x in seq] + asserts = self._call_assert_equiv(scope, loc, equiv_set, seq) + shape = shapes[0] + if axis < 0: + axis = len(shape) + axis + 1 + require(0 <= axis <= len(shape)) + new_shape = list(shape[0:axis]) + [n] + list(shape[axis:]) + return ArrayAnalysis.AnalyzeResult(shape=tuple(new_shape), pre=asserts) + + def _analyze_op_call_numpy_vstack(self, scope, equiv_set, loc, args, kws): + assert len(args) == 1 + seq, op = find_build_sequence(self.func_ir, args[0]) + n = len(seq) + require(n > 0) + typ = self.typemap[seq[0].name] + require(isinstance(typ, types.ArrayCompatible)) + if typ.ndim < 2: + return self._analyze_op_call_numpy_stack( + scope, equiv_set, loc, args, kws + ) + else: + kws["axis"] = 0 + return self._analyze_op_call_numpy_concatenate( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numpy_hstack(self, scope, equiv_set, loc, args, kws): + assert len(args) == 1 + seq, op = find_build_sequence(self.func_ir, args[0]) + n = len(seq) + require(n > 0) + typ = self.typemap[seq[0].name] + require(isinstance(typ, types.ArrayCompatible)) + if typ.ndim < 2: + kws["axis"] = 0 + else: + kws["axis"] = 1 + return self._analyze_op_call_numpy_concatenate( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numpy_dstack(self, scope, equiv_set, loc, args, kws): + assert len(args) == 1 + seq, op = find_build_sequence(self.func_ir, args[0]) + n = len(seq) + require(n > 0) + typ = self.typemap[seq[0].name] + require(isinstance(typ, types.ArrayCompatible)) + if typ.ndim == 1: + kws["axis"] = 1 + result = self._analyze_op_call_numpy_stack( + scope, equiv_set, loc, args, kws + ) + require(result) + result.kwargs['shape'] = tuple([1] + list(result.kwargs['shape'])) + return result + elif typ.ndim == 2: + kws["axis"] = 2 + return self._analyze_op_call_numpy_stack( + scope, equiv_set, loc, args, kws + ) + else: + kws["axis"] = 2 + return self._analyze_op_call_numpy_concatenate( + scope, equiv_set, loc, args, kws + ) + + def _analyze_op_call_numpy_cumsum(self, scope, equiv_set, loc, args, kws): + # TODO + return None + + def _analyze_op_call_numpy_cumprod(self, scope, equiv_set, loc, args, kws): + # TODO + return None + + def _analyze_op_call_numpy_linspace( + self, scope, equiv_set, loc, args, kws + ): + n = len(args) + num = 50 + if n > 2: + num = args[2] + elif "num" in kws: + num = kws["num"] + return ArrayAnalysis.AnalyzeResult(shape=(num,)) + + def _analyze_op_call_numpy_dot(self, scope, equiv_set, loc, args, kws): + n = len(args) + assert n >= 2 + loc = args[0].loc + require(all([self._isarray(x.name) for x in args])) + typs = [self.typemap[x.name] for x in args] + dims = [ty.ndim for ty in typs] + require(all(x > 0 for x in dims)) + if dims[0] == 1 and dims[1] == 1: + return None + shapes = [equiv_set._get_shape(x) for x in args] + if dims[0] == 1: + asserts = self._call_assert_equiv( + scope, loc, equiv_set, [shapes[0][0], shapes[1][-2]] + ) + return ArrayAnalysis.AnalyzeResult( + shape=tuple(shapes[1][0:-2] + shapes[1][-1:]), + pre=asserts + ) + if dims[1] == 1: + asserts = self._call_assert_equiv( + scope, loc, equiv_set, [shapes[0][-1], shapes[1][0]] + ) + return ArrayAnalysis.AnalyzeResult( + shape=tuple(shapes[0][0:-1]), + pre=asserts + ) + if dims[0] == 2 and dims[1] == 2: + asserts = self._call_assert_equiv( + scope, loc, equiv_set, [shapes[0][1], shapes[1][0]] + ) + return ArrayAnalysis.AnalyzeResult( + shape=(shapes[0][0], shapes[1][1]), + pre=asserts + ) + if dims[0] > 2: # TODO: handle higher dimension cases + pass + return None + + def _analyze_stencil(self, scope, equiv_set, stencil_func, loc, args, kws): + # stencil requires that all relatively indexed array arguments are + # of same size + std_idx_arrs = stencil_func.options.get("standard_indexing", ()) + kernel_arg_names = stencil_func.kernel_ir.arg_names + if isinstance(std_idx_arrs, str): + std_idx_arrs = (std_idx_arrs,) + rel_idx_arrs = [] + assert len(args) > 0 and len(args) == len(kernel_arg_names) + for arg, var in zip(kernel_arg_names, args): + typ = self.typemap[var.name] + if isinstance(typ, types.ArrayCompatible) and not ( + arg in std_idx_arrs + ): + rel_idx_arrs.append(var) + n = len(rel_idx_arrs) + require(n > 0) + asserts = self._call_assert_equiv(scope, loc, equiv_set, rel_idx_arrs) + shape = equiv_set.get_shape(rel_idx_arrs[0]) + return ArrayAnalysis.AnalyzeResult(shape=shape, pre=asserts) + + def _analyze_op_call_numpy_linalg_inv( + self, scope, equiv_set, loc, args, kws + ): + require(len(args) >= 1) + return ArrayAnalysis.AnalyzeResult(shape=equiv_set._get_shape(args[0])) + + def _analyze_broadcast(self, scope, equiv_set, loc, args, fn): + """Infer shape equivalence of arguments based on Numpy broadcast rules + and return shape of output + https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html + """ + tups = list(filter(lambda a: self._istuple(a.name), args)) + # Here we have a tuple concatenation. + if len(tups) == 2 and fn.__name__ == 'add': + # If either of the tuples is empty then the resulting shape + # is just the other tuple. + tup0typ = self.typemap[tups[0].name] + tup1typ = self.typemap[tups[1].name] + if tup0typ.count == 0: + return ArrayAnalysis.AnalyzeResult( + shape=equiv_set.get_shape(tups[1]) + ) + if tup1typ.count == 0: + return ArrayAnalysis.AnalyzeResult( + shape=equiv_set.get_shape(tups[0]) + ) + + try: + shapes = [equiv_set.get_shape(x) for x in tups] + if None in shapes: + return None + concat_shapes = sum(shapes, ()) + return ArrayAnalysis.AnalyzeResult( + shape=concat_shapes + ) + except GuardException: + return None + + # else arrays + arrs = list(filter(lambda a: self._isarray(a.name), args)) + require(len(arrs) > 0) + names = [x.name for x in arrs] + dims = [self.typemap[x.name].ndim for x in arrs] + max_dim = max(dims) + require(max_dim > 0) + try: + shapes = [equiv_set.get_shape(x) for x in arrs] + except GuardException: + return ArrayAnalysis.AnalyzeResult( + shape=arrs[0], + pre=self._call_assert_equiv(scope, loc, equiv_set, arrs) + ) + pre = [] + if None in shapes: + # There is at least 1 shape that we don't know, + # so we need to generate that shape now. + new_shapes = [] + for i, s in enumerate(shapes): + if s is None: + var = arrs[i] + typ = self.typemap[var.name] + shape = self._gen_shape_call( + equiv_set, var, typ.ndim, None, pre + ) + new_shapes.append(shape) + else: + new_shapes.append(s) + shapes = new_shapes + + result = self._broadcast_assert_shapes( + scope, equiv_set, loc, shapes, names + ) + if pre: + # If we had to generate a shape we have to insert + # that code before the broadcast assertion. + if 'pre' in result.kwargs: + prev_pre = result.kwargs['pre'] + else: + prev_pre = [] + result.kwargs['pre'] = pre + prev_pre + return result + + def _broadcast_assert_shapes(self, scope, equiv_set, loc, shapes, names): + """Produce assert_equiv for sizes in each dimension, taking into + account of dimension coercion and constant size of 1. + """ + asserts = [] + new_shape = [] + max_dim = max([len(shape) for shape in shapes]) + const_size_one = None + for i in range(max_dim): + sizes = [] + size_names = [] + for name, shape in zip(names, shapes): + if i < len(shape): + size = shape[len(shape) - 1 - i] + const_size = equiv_set.get_equiv_const(size) + if const_size == 1: + const_size_one = size + else: + sizes.append(size) # non-1 size to front + size_names.append(name) + if sizes == []: + assert const_size_one is not None + sizes.append(const_size_one) + size_names.append("1") + asserts.append( + self._call_assert_equiv( + scope, loc, equiv_set, sizes, names=size_names + ) + ) + new_shape.append(sizes[0]) + return ArrayAnalysis.AnalyzeResult( + shape=tuple(reversed(new_shape)), + pre=sum(asserts, []) + ) + + def _call_assert_equiv(self, scope, loc, equiv_set, args, names=None): + insts = self._make_assert_equiv( + scope, loc, equiv_set, args, names=names + ) + if len(args) > 1: + equiv_set.insert_equiv(*args) + return insts + + def _make_assert_equiv(self, scope, loc, equiv_set, _args, names=None): + # filter out those that are already equivalent + if config.DEBUG_ARRAY_OPT >= 2: + print("make_assert_equiv:", _args, names) + if names is None: + names = [x.name for x in _args] + args = [] + arg_names = [] + for name, x in zip(names, _args): + if config.DEBUG_ARRAY_OPT >= 2: + print("name, x:", name, x) + seen = False + for y in args: + if config.DEBUG_ARRAY_OPT >= 2: + print("is equiv to?", y, equiv_set.is_equiv(x, y)) + if equiv_set.is_equiv(x, y): + seen = True + break + if not seen: + args.append(x) + arg_names.append(name) + + # no assertion necessary if there are less than two + if len(args) < 2: + if config.DEBUG_ARRAY_OPT >= 2: + print( + "Will not insert assert_equiv as args are known to be " + "equivalent." + ) + return [] + + msg = "Sizes of {} do not match on {}".format( + ", ".join(arg_names), loc + ) + msg_val = ir.Const(msg, loc) + msg_typ = types.StringLiteral(msg) + msg_var = ir.Var(scope, mk_unique_var("msg"), loc) + self.typemap[msg_var.name] = msg_typ + argtyps = tuple([msg_typ] + [self.typemap[x.name] for x in args]) + + # assert_equiv takes vararg, which requires a tuple as argument type + tup_typ = types.StarArgTuple.from_types(argtyps) + + # prepare function variable whose type may vary since it takes vararg + assert_var = ir.Var(scope, mk_unique_var("assert"), loc) + assert_def = ir.Global("assert_equiv", assert_equiv, loc=loc) + fnty = get_global_func_typ(assert_equiv) + sig = self.context.resolve_function_type(fnty, (tup_typ,), {}) + self._define(equiv_set, assert_var, fnty, assert_def) + + # The return value from assert_equiv is always of none type. + var = ir.Var(scope, mk_unique_var("ret"), loc) + value = ir.Expr.call(assert_var, [msg_var] + args, {}, loc=loc) + self._define(equiv_set, var, types.none, value) + self.calltypes[value] = sig + + return [ + ir.Assign(value=msg_val, target=msg_var, loc=loc), + ir.Assign(value=assert_def, target=assert_var, loc=loc), + ir.Assign(value=value, target=var, loc=loc), + ] + + def _gen_shape_call(self, equiv_set, var, ndims, shape, post): + # attr call: A_sh_attr = getattr(A, shape) + if isinstance(shape, ir.Var): + shape = equiv_set.get_shape(shape) + + # already a tuple variable that contains size + if isinstance(shape, ir.Var): + attr_var = shape + shape_attr_call = None + shape = None + elif isinstance(shape, ir.Arg): + attr_var = var + shape_attr_call = None + shape = None + else: + shape_attr_call = ir.Expr.getattr(var, "shape", var.loc) + attr_var = ir.Var( + var.scope, mk_unique_var("{}_shape".format(var.name)), var.loc + ) + shape_attr_typ = types.containers.UniTuple(types.intp, ndims) + size_vars = [] + use_attr_var = False + # trim shape tuple if it is more than ndim + if shape: + nshapes = len(shape) + if ndims < nshapes: + shape = shape[(nshapes - ndims) :] + for i in range(ndims): + skip = False + if shape and shape[i]: + if isinstance(shape[i], ir.Var): + typ = self.typemap[shape[i].name] + if isinstance(typ, (types.Number, types.SliceType)): + size_var = shape[i] + skip = True + else: + if isinstance(shape[i], int): + size_val = ir.Const(shape[i], var.loc) + else: + size_val = shape[i] + assert isinstance(size_val, ir.Const) + size_var = ir.Var( + var.scope, + mk_unique_var("{}_size{}".format(var.name, i)), + var.loc, + ) + post.append(ir.Assign(size_val, size_var, var.loc)) + self._define(equiv_set, size_var, types.intp, size_val) + skip = True + if not skip: + # get size: Asize0 = A_sh_attr[0] + size_var = ir.Var( + var.scope, + mk_unique_var("{}_size{}".format(var.name, i)), + var.loc, + ) + getitem = ir.Expr.static_getitem(attr_var, i, None, var.loc) + use_attr_var = True + self.calltypes[getitem] = None + post.append(ir.Assign(getitem, size_var, var.loc)) + self._define(equiv_set, size_var, types.intp, getitem) + size_vars.append(size_var) + if use_attr_var and shape_attr_call: + # only insert shape call if there is any getitem call + post.insert(0, ir.Assign(shape_attr_call, attr_var, var.loc)) + self._define(equiv_set, attr_var, shape_attr_typ, shape_attr_call) + return tuple(size_vars) + + def _isarray(self, varname): + typ = self.typemap[varname] + return isinstance(typ, types.npytypes.Array) and typ.ndim > 0 + + def _istuple(self, varname): + typ = self.typemap[varname] + return isinstance(typ, types.BaseTuple) + + def _sum_size(self, equiv_set, sizes): + """Return the sum of the given list of sizes if they are all equivalent + to some constant, or None otherwise. + """ + s = 0 + for size in sizes: + n = equiv_set.get_equiv_const(size) + if n is None: + return None + else: + s += n + return s + + +UNARY_MAP_OP = list(npydecl.NumpyRulesUnaryArrayOperator._op_map.keys()) + [ + operator.pos +] +BINARY_MAP_OP = npydecl.NumpyRulesArrayOperator._op_map.keys() +INPLACE_BINARY_MAP_OP = npydecl.NumpyRulesInplaceArrayOperator._op_map.keys() +UFUNC_MAP_OP = [f.__name__ for f in npydecl.supported_ufuncs] diff --git a/venv/lib/python3.10/site-packages/numba/parfors/parfor.py b/venv/lib/python3.10/site-packages/numba/parfors/parfor.py new file mode 100644 index 0000000000000000000000000000000000000000..3c71d4c3f201980b743181cc7d39f1a70739b28e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/parfors/parfor.py @@ -0,0 +1,5249 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +""" +This module transforms data-parallel operations such as Numpy calls into +'Parfor' nodes, which are nested loops that can be parallelized. +It also implements optimizations such as loop fusion, and extends the rest of +compiler analysis and optimizations to support Parfors. +This is similar to ParallelAccelerator package in Julia: +https://github.com/IntelLabs/ParallelAccelerator.jl +'Parallelizing Julia with a Non-invasive DSL', T. Anderson et al., ECOOP'17. +""" +import types as pytypes # avoid confusion with numba.types +import sys, math +import os +import textwrap +import copy +import inspect +import linecache +from functools import reduce +from collections import defaultdict, OrderedDict, namedtuple +from contextlib import contextmanager +import operator +from dataclasses import make_dataclass +import warnings + +from llvmlite import ir as lir +from numba.core.imputils import impl_ret_untracked +import numba.core.ir +from numba.core import types, typing, utils, errors, ir, analysis, postproc, rewrites, typeinfer, config, ir_utils +from numba import prange, pndindex +from numba.np.npdatetime_helpers import datetime_minimum, datetime_maximum +from numba.np.numpy_support import as_dtype, numpy_version +from numba.core.typing.templates import infer_global, AbstractTemplate +from numba.stencils.stencilparfor import StencilPass +from numba.core.extending import register_jitable, lower_builtin + +from numba.core.ir_utils import ( + mk_unique_var, + next_label, + mk_alloc, + get_np_ufunc_typ, + mk_range_block, + mk_loop_header, + get_name_var_table, + replace_vars, + replace_vars_inner, + visit_vars, + visit_vars_inner, + remove_dead, + copy_propagate, + get_block_copies, + apply_copy_propagate, + dprint_func_ir, + find_topo_order, + get_stmt_writes, + rename_labels, + get_call_table, + simplify, + simplify_CFG, + has_no_side_effect, + canonicalize_array_math, + add_offset_to_labels, + find_callname, + find_build_sequence, + guard, + require, + GuardException, + compile_to_numba_ir, + get_definition, + build_definitions, + replace_arg_nodes, + replace_returns, + is_getitem, + is_setitem, + is_get_setitem, + index_var_of_get_setitem, + set_index_var_of_get_setitem, + find_potential_aliases, + replace_var_names, + transfer_scope, +) + +from numba.core.analysis import (compute_use_defs, compute_live_map, + compute_dead_maps, compute_cfg_from_blocks) +from numba.core.controlflow import CFGraph +from numba.core.typing import npydecl, signature +from numba.core.types.functions import Function +from numba.parfors.array_analysis import (random_int_args, random_1arg_size, + random_2arg_sizelast, random_3arg_sizelast, + random_calls, assert_equiv) +from numba.core.extending import overload +import copy +import numpy +import numpy as np +from numba.parfors import array_analysis +import numba.cpython.builtins +from numba.stencils import stencilparfor +# circular dependency: import numba.npyufunc.dufunc.DUFunc + +# wrapped pretty print +_termwidth = 80 +_txtwrapper = textwrap.TextWrapper(width=_termwidth, drop_whitespace=False) +def print_wrapped(x): + for l in x.splitlines(): + [print(y) for y in _txtwrapper.wrap(l)] + +sequential_parfor_lowering = False + +# init_prange is a sentinel call that specifies the start of the initialization +# code for the computation in the upcoming prange call +# This lets the prange pass to put the code in the generated parfor's init_block +def init_prange(): + return + +@overload(init_prange) +def init_prange_overload(): + def no_op(): + return + return no_op + +class internal_prange(object): + + def __new__(cls, *args): + return range(*args) + + +def min_parallel_impl(return_type, arg): + # XXX: use prange for 1D arrays since pndindex returns a 1-tuple instead of + # integer. This causes type and fusion issues. + if arg.ndim == 0: + def min_1(in_arr): + return in_arr[()] + elif arg.ndim == 1: + if isinstance(arg.dtype, (types.NPDatetime, types.NPTimedelta)): + # NaT is always returned if it is in the array + def min_1(in_arr): + numba.parfors.parfor.init_prange() + min_checker(len(in_arr)) + val = numba.cpython.builtins.get_type_max_value(in_arr.dtype) + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val = datetime_minimum(val, in_arr[i]) + return val + else: + def min_1(in_arr): + numba.parfors.parfor.init_prange() + min_checker(len(in_arr)) + val = numba.cpython.builtins.get_type_max_value(in_arr.dtype) + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val = min(val, in_arr[i]) + return val + else: + def min_1(in_arr): + numba.parfors.parfor.init_prange() + min_checker(len(in_arr)) + val = numba.cpython.builtins.get_type_max_value(in_arr.dtype) + for i in numba.pndindex(in_arr.shape): + val = min(val, in_arr[i]) + return val + return min_1 + +def max_parallel_impl(return_type, arg): + if arg.ndim == 0: + def max_1(in_arr): + return in_arr[()] + elif arg.ndim == 1: + if isinstance(arg.dtype, (types.NPDatetime, types.NPTimedelta)): + # NaT is always returned if it is in the array + def max_1(in_arr): + numba.parfors.parfor.init_prange() + max_checker(len(in_arr)) + val = numba.cpython.builtins.get_type_min_value(in_arr.dtype) + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val = datetime_maximum(val, in_arr[i]) + return val + else: + def max_1(in_arr): + numba.parfors.parfor.init_prange() + max_checker(len(in_arr)) + val = numba.cpython.builtins.get_type_min_value(in_arr.dtype) + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val = max(val, in_arr[i]) + return val + else: + def max_1(in_arr): + numba.parfors.parfor.init_prange() + max_checker(len(in_arr)) + val = numba.cpython.builtins.get_type_min_value(in_arr.dtype) + for i in numba.pndindex(in_arr.shape): + val = max(val, in_arr[i]) + return val + return max_1 + +def argmin_parallel_impl(in_arr): + numba.parfors.parfor.init_prange() + argmin_checker(len(in_arr)) + A = in_arr.ravel() + init_val = numba.cpython.builtins.get_type_max_value(A.dtype) + ival = typing.builtins.IndexValue(0, init_val) + for i in numba.parfors.parfor.internal_prange(len(A)): + curr_ival = typing.builtins.IndexValue(i, A[i]) + ival = min(ival, curr_ival) + return ival.index + +def argmax_parallel_impl(in_arr): + numba.parfors.parfor.init_prange() + argmax_checker(len(in_arr)) + A = in_arr.ravel() + init_val = numba.cpython.builtins.get_type_min_value(A.dtype) + ival = typing.builtins.IndexValue(0, init_val) + for i in numba.parfors.parfor.internal_prange(len(A)): + curr_ival = typing.builtins.IndexValue(i, A[i]) + ival = max(ival, curr_ival) + return ival.index + +def dotvv_parallel_impl(a, b): + numba.parfors.parfor.init_prange() + l = a.shape[0] + m = b.shape[0] + # TODO: investigate assert_equiv + #assert_equiv("sizes of l, m do not match", l, m) + s = 0 + for i in numba.parfors.parfor.internal_prange(l): + s += a[i] * b[i] + return s + +def dotvm_parallel_impl(a, b): + numba.parfors.parfor.init_prange() + l = a.shape + m, n = b.shape + # TODO: investigate assert_equiv + #assert_equiv("Sizes of l, m do not match", l, m) + c = np.zeros(n, a.dtype) + # TODO: evaluate dotvm implementation options + #for i in prange(n): + # s = 0 + # for j in range(m): + # s += a[j] * b[j, i] + # c[i] = s + for i in numba.parfors.parfor.internal_prange(m): + c += a[i] * b[i, :] + return c + +def dotmv_parallel_impl(a, b): + numba.parfors.parfor.init_prange() + m, n = a.shape + l = b.shape + # TODO: investigate assert_equiv + #assert_equiv("sizes of n, l do not match", n, l) + c = np.empty(m, a.dtype) + for i in numba.parfors.parfor.internal_prange(m): + s = 0 + for j in range(n): + s += a[i, j] * b[j] + c[i] = s + return c + +def dot_parallel_impl(return_type, atyp, btyp): + # Note that matrix matrix multiply is not translated. + if (isinstance(atyp, types.npytypes.Array) and + isinstance(btyp, types.npytypes.Array)): + if atyp.ndim == btyp.ndim == 1: + return dotvv_parallel_impl + # TODO: evaluate support for dotvm and enable + #elif atyp.ndim == 1 and btyp.ndim == 2: + # return dotvm_parallel_impl + elif atyp.ndim == 2 and btyp.ndim == 1: + return dotmv_parallel_impl + +def sum_parallel_impl(return_type, arg): + zero = return_type(0) + + if arg.ndim == 0: + def sum_1(in_arr): + return in_arr[()] + elif arg.ndim == 1: + def sum_1(in_arr): + numba.parfors.parfor.init_prange() + val = zero + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val += in_arr[i] + return val + else: + def sum_1(in_arr): + numba.parfors.parfor.init_prange() + val = zero + for i in numba.pndindex(in_arr.shape): + val += in_arr[i] + return val + return sum_1 + +def prod_parallel_impl(return_type, arg): + one = return_type(1) + + if arg.ndim == 0: + def prod_1(in_arr): + return in_arr[()] + elif arg.ndim == 1: + def prod_1(in_arr): + numba.parfors.parfor.init_prange() + val = one + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val *= in_arr[i] + return val + else: + def prod_1(in_arr): + numba.parfors.parfor.init_prange() + val = one + for i in numba.pndindex(in_arr.shape): + val *= in_arr[i] + return val + return prod_1 + + +def mean_parallel_impl(return_type, arg): + # can't reuse sum since output type is different + zero = return_type(0) + + if arg.ndim == 0: + def mean_1(in_arr): + return in_arr[()] + elif arg.ndim == 1: + def mean_1(in_arr): + numba.parfors.parfor.init_prange() + val = zero + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val += in_arr[i] + return val/len(in_arr) + else: + def mean_1(in_arr): + numba.parfors.parfor.init_prange() + val = zero + for i in numba.pndindex(in_arr.shape): + val += in_arr[i] + return val/in_arr.size + return mean_1 + +def var_parallel_impl(return_type, arg): + if arg.ndim == 0: + def var_1(in_arr): + return 0 + elif arg.ndim == 1: + def var_1(in_arr): + # Compute the mean + m = in_arr.mean() + # Compute the sum of square diffs + numba.parfors.parfor.init_prange() + ssd = 0 + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + val = in_arr[i] - m + ssd += np.real(val * np.conj(val)) + return ssd / len(in_arr) + else: + def var_1(in_arr): + # Compute the mean + m = in_arr.mean() + # Compute the sum of square diffs + numba.parfors.parfor.init_prange() + ssd = 0 + for i in numba.pndindex(in_arr.shape): + val = in_arr[i] - m + ssd += np.real(val * np.conj(val)) + return ssd / in_arr.size + return var_1 + +def std_parallel_impl(return_type, arg): + def std_1(in_arr): + return in_arr.var() ** 0.5 + return std_1 + +def arange_parallel_impl(return_type, *args, dtype=None): + inferred_dtype = as_dtype(return_type.dtype) + + def arange_1(stop): + return np.arange(0, stop, 1, inferred_dtype) + + def arange_1_dtype(stop, dtype): + return np.arange(0, stop, 1, dtype) + + def arange_2(start, stop): + return np.arange(start, stop, 1, inferred_dtype) + + def arange_2_dtype(start, stop, dtype): + return np.arange(start, stop, 1, dtype) + + def arange_3(start, stop, step): + return np.arange(start, stop, step, inferred_dtype) + + def arange_3_dtype(start, stop, step, dtype): + return np.arange(start, stop, step, dtype) + + if any(isinstance(a, types.Complex) for a in args): + def arange_4(start, stop, step, dtype): + numba.parfors.parfor.init_prange() + nitems_c = (stop - start) / step + nitems_r = math.ceil(nitems_c.real) + nitems_i = math.ceil(nitems_c.imag) + nitems = int(max(min(nitems_i, nitems_r), 0)) + arr = np.empty(nitems, dtype) + for i in numba.parfors.parfor.internal_prange(nitems): + arr[i] = start + i * step + return arr + else: + def arange_4(start, stop, step, dtype): + numba.parfors.parfor.init_prange() + nitems_r = math.ceil((stop - start) / step) + nitems = int(max(nitems_r, 0)) + arr = np.empty(nitems, dtype) + val = start + for i in numba.parfors.parfor.internal_prange(nitems): + arr[i] = start + i * step + return arr + + if len(args) == 1: + return arange_1 if dtype is None else arange_1_dtype + elif len(args) == 2: + return arange_2 if dtype is None else arange_2_dtype + elif len(args) == 3: + return arange_3 if dtype is None else arange_3_dtype + elif len(args) == 4: + return arange_4 + else: + raise ValueError("parallel arange with types {}".format(args)) + +def linspace_parallel_impl(return_type, *args): + dtype = as_dtype(return_type.dtype) + + def linspace_2(start, stop): + return np.linspace(start, stop, 50) + + def linspace_3(start, stop, num): + numba.parfors.parfor.init_prange() + arr = np.empty(num, dtype) + div = num - 1 + delta = stop - start + arr[0] = start + for i in numba.parfors.parfor.internal_prange(num): + arr[i] = start + delta * (i / div) + return arr + + if len(args) == 2: + return linspace_2 + elif len(args) == 3: + return linspace_3 + else: + raise ValueError("parallel linspace with types {}".format(args)) + +swap_functions_map = { + ('argmin', 'numpy'): lambda r,a: argmin_parallel_impl, + ('argmax', 'numpy'): lambda r,a: argmax_parallel_impl, + ('min', 'numpy'): min_parallel_impl, + ('max', 'numpy'): max_parallel_impl, + ('amin', 'numpy'): min_parallel_impl, + ('amax', 'numpy'): max_parallel_impl, + ('sum', 'numpy'): sum_parallel_impl, + ('prod', 'numpy'): prod_parallel_impl, + ('mean', 'numpy'): mean_parallel_impl, + ('var', 'numpy'): var_parallel_impl, + ('std', 'numpy'): std_parallel_impl, + ('dot', 'numpy'): dot_parallel_impl, + ('arange', 'numpy'): arange_parallel_impl, + ('linspace', 'numpy'): linspace_parallel_impl, +} + +def fill_parallel_impl(return_type, arr, val): + """Parallel implementation of ndarray.fill. The array on + which to operate is retrieved from get_call_name and + is passed along with the value to fill. + """ + if arr.ndim == 1: + def fill_1(in_arr, val): + numba.parfors.parfor.init_prange() + for i in numba.parfors.parfor.internal_prange(len(in_arr)): + in_arr[i] = val + return None + else: + def fill_1(in_arr, val): + numba.parfors.parfor.init_prange() + for i in numba.pndindex(in_arr.shape): + in_arr[i] = val + return None + return fill_1 + +replace_functions_ndarray = { + 'fill': fill_parallel_impl, +} + +@register_jitable +def max_checker(arr_size): + if arr_size == 0: + raise ValueError(("zero-size array to reduction operation " + "maximum which has no identity")) + +@register_jitable +def min_checker(arr_size): + if arr_size == 0: + raise ValueError(("zero-size array to reduction operation " + "minimum which has no identity")) + +@register_jitable +def argmin_checker(arr_size): + if arr_size == 0: + raise ValueError("attempt to get argmin of an empty sequence") + +@register_jitable +def argmax_checker(arr_size): + if arr_size == 0: + raise ValueError("attempt to get argmax of an empty sequence") + +checker_impl = namedtuple('checker_impl', ['name', 'func']) + +replace_functions_checkers_map = { + ('argmin', 'numpy') : checker_impl('argmin_checker', argmin_checker), + ('argmax', 'numpy') : checker_impl('argmax_checker', argmax_checker), + ('min', 'numpy') : checker_impl('min_checker', min_checker), + ('max', 'numpy') : checker_impl('max_checker', max_checker), + ('amin', 'numpy') : checker_impl('min_checker', min_checker), + ('amax', 'numpy') : checker_impl('max_checker', max_checker), +} + + +class LoopNest(object): + + '''The LoopNest class holds information of a single loop including + the index variable (of a non-negative integer value), and the + range variable, e.g. range(r) is 0 to r-1 with step size 1. + ''' + + def __init__(self, index_variable, start, stop, step): + self.index_variable = index_variable + self.start = start + self.stop = stop + self.step = step + + + def __repr__(self): + return ("LoopNest(index_variable = {}, range = ({}, {}, {}))". + format(self.index_variable, self.start, self.stop, self.step)) + + def list_vars(self): + all_uses = [] + all_uses.append(self.index_variable) + if isinstance(self.start, ir.Var): + all_uses.append(self.start) + if isinstance(self.stop, ir.Var): + all_uses.append(self.stop) + if isinstance(self.step, ir.Var): + all_uses.append(self.step) + return all_uses + +class Parfor(ir.Expr, ir.Stmt): + + id_counter = 0 + + def __init__( + self, + loop_nests, + init_block, + loop_body, + loc, + index_var, + equiv_set, + pattern, + flags, + *, #only specify the options below by keyword + no_sequential_lowering=False, + races=set()): + super(Parfor, self).__init__( + op='parfor', + loc=loc + ) + + self.id = type(self).id_counter + type(self).id_counter += 1 + #self.input_info = input_info + #self.output_info = output_info + self.loop_nests = loop_nests + self.init_block = init_block + self.loop_body = loop_body + self.index_var = index_var + self.params = None # filled right before parallel lowering + self.equiv_set = equiv_set + # The parallel patterns this parfor was generated from and their options + # for example, a parfor could be from the stencil pattern with + # the neighborhood option + assert len(pattern) > 1 + self.patterns = [pattern] + self.flags = flags + # if True, this parfor shouldn't be lowered sequentially even with the + # sequential lowering option + self.no_sequential_lowering = no_sequential_lowering + self.races = races + self.redvars = [] + self.reddict = {} + # If the lowerer is None then the standard lowerer will be used. + # This can be set to a function to have that function act as the lowerer + # for this parfor. This lowerer field will also prevent parfors from + # being fused unless they have use the same lowerer. + self.lowerer = None + if config.DEBUG_ARRAY_OPT_STATS: + fmt = 'Parallel for-loop #{} is produced from pattern \'{}\' at {}' + print(fmt.format( + self.id, pattern, loc)) + + def __repr__(self): + return "id=" + str(self.id) + repr(self.loop_nests) + \ + repr(self.loop_body) + repr(self.index_var) + + def get_loop_nest_vars(self): + return [x.index_variable for x in self.loop_nests] + + def list_vars(self): + """list variables used (read/written) in this parfor by + traversing the body and combining block uses. + """ + all_uses = [] + for l, b in self.loop_body.items(): + for stmt in b.body: + all_uses += stmt.list_vars() + + for loop in self.loop_nests: + all_uses += loop.list_vars() + + for stmt in self.init_block.body: + all_uses += stmt.list_vars() + + return all_uses + + def get_shape_classes(self, var, typemap=None): + """get the shape classes for a given variable. + If a typemap is specified then use it for type resolution + """ + # We get shape classes from the equivalence set but that + # keeps its own typemap at a time prior to lowering. So + # if something is added during lowering then we can pass + # in a type map to use. We temporarily replace the + # equivalence set typemap, do the work and then restore + # the original on the way out. + if typemap is not None: + save_typemap = self.equiv_set.typemap + self.equiv_set.typemap = typemap + res = self.equiv_set.get_shape_classes(var) + if typemap is not None: + self.equiv_set.typemap = save_typemap + return res + + def dump(self, file=None): + file = file or sys.stdout + print(("begin parfor {}".format(self.id)).center(20, '-'), file=file) + print("index_var = ", self.index_var, file=file) + print("params = ", self.params, file=file) + print("races = ", self.races, file=file) + for loopnest in self.loop_nests: + print(loopnest, file=file) + print("init block:", file=file) + self.init_block.dump(file) + for offset, block in sorted(self.loop_body.items()): + print('label %s:' % (offset,), file=file) + block.dump(file) + print(("end parfor {}".format(self.id)).center(20, '-'), file=file) + + def validate_params(self, typemap): + """ + Check that Parfors params are of valid types. + """ + if self.params is None: + msg = ("Cannot run parameter validation on a Parfor with params " + "not set") + raise ValueError(msg) + for p in self.params: + ty = typemap.get(p) + if ty is None: + msg = ("Cannot validate parameter %s, there is no type " + "information available") + raise ValueError(msg) + if isinstance(ty, types.BaseTuple): + if ty.count > config.PARFOR_MAX_TUPLE_SIZE: + msg = ("Use of a tuple (%s) of length %d in a parallel region " + "exceeds the maximum supported tuple size. Since " + "Generalized Universal Functions back parallel regions " + "and those do not support tuples, tuples passed to " + "parallel regions are unpacked if their size is below " + "a certain threshold, currently configured to be %d. " + "This threshold can be modified using the Numba " + "environment variable NUMBA_PARFOR_MAX_TUPLE_SIZE.") + raise errors.UnsupportedParforsError(msg % + (p, ty.count, config.PARFOR_MAX_TUPLE_SIZE), + self.loc) + + +def _analyze_parfor(parfor, equiv_set, typemap, array_analysis): + """Recursive array analysis for parfor nodes. + """ + func_ir = array_analysis.func_ir + parfor_blocks = wrap_parfor_blocks(parfor) + # Since init_block get label 0 after wrap, we need to save + # the equivset for the real block label 0. + backup_equivset = array_analysis.equiv_sets.get(0, None) + array_analysis.run(parfor_blocks, equiv_set) + unwrap_parfor_blocks(parfor, parfor_blocks) + parfor.equiv_set = array_analysis.equiv_sets[0] + # Restore equivset for block 0 after parfor is unwrapped + if backup_equivset: + array_analysis.equiv_sets[0] = backup_equivset + return [], [] + +array_analysis.array_analysis_extensions[Parfor] = _analyze_parfor + +class ParforDiagnostics(object): + """Holds parfor diagnostic info, this is accumulated throughout the + PreParforPass and ParforPass, also in the closure inlining! + """ + def __init__(self): + # holds ref to the function for which this is providing diagnostics + self.func = None + # holds a map of the replaced functions + self.replaced_fns = dict() + # used to identify "internal" parfor functions + self.internal_name = '__numba_parfor_gufunc' + self.fusion_info = defaultdict(list) + self.nested_fusion_info = defaultdict(list) + self.fusion_reports = [] + self.hoist_info = {} + self.has_setup = False + + def setup(self, func_ir, fusion_enabled): + self.func_ir = func_ir + self.name = self.func_ir.func_id.func_qualname + self.line = self.func_ir.loc + self.fusion_enabled = fusion_enabled + if self.internal_name in self.name: + self.purpose = 'Internal parallel function' + else: + self.purpose = 'Function %s, %s' % (self.name, self.line) + # we store a reference to the parfors prior to fusion etc, the parfors + # do get mangled in the fusion process but in a predetermined manner + # and by holding a reference here the "before" state can be printed + self.initial_parfors = self.get_parfors() + self.has_setup = True + + @property + def has_setup(self): + return self._has_setup + + @has_setup.setter + def has_setup(self, state): + self._has_setup = state + + def count_parfors(self, blocks=None): + return len(self.get_parfors()) + + def _get_nested_parfors(self, parfor, parfors_list): + blocks = wrap_parfor_blocks(parfor) + self._get_parfors(blocks, parfors_list) + unwrap_parfor_blocks(parfor) + + def _get_parfors(self, blocks, parfors_list): + for label, blk in blocks.items(): + for stmt in blk.body: + if isinstance(stmt, Parfor): + parfors_list.append(stmt) + self._get_nested_parfors(stmt, parfors_list) + + def get_parfors(self): + parfors_list = [] + self._get_parfors(self.func_ir.blocks, parfors_list) + return parfors_list + + def hoisted_allocations(self): + allocs = [] + for pf_id, data in self.hoist_info.items(): + stmt = data.get('hoisted', []) + for inst in stmt: + if isinstance(inst.value, ir.Expr): + if inst.value.op == 'call': + call = guard(find_callname, self.func_ir, inst.value) + if call is not None and call == ('empty', 'numpy'): + allocs.append(inst) + return allocs + + def compute_graph_info(self, _a): + """ + compute adjacency list of the fused loops + and find the roots in of the lists + """ + a = copy.deepcopy(_a) + if a == {}: + return [], set() + + vtx = set() + for v in a.values(): + for x in v: + vtx.add(x) + + # find roots + potential_roots = set(a.keys()) + roots = potential_roots - vtx + if roots is None: + roots = set() + + # populate rest of adjacency list + not_roots = set() + for x in range(max(set(a.keys()).union(vtx)) + 1): + val = a.get(x) + if val is not None: + a[x] = val + elif val == []: + not_roots.add(x) # debug only + else: + a[x] = [] + + + # fold adjacency list into an actual list ordered + # by vtx + l = [] + for x in sorted(a.keys()): + l.append(a[x]) + + return l, roots #, not_roots + + def get_stats(self, fadj, nadj, root): + """ + Computes the number of fused and serialized loops + based on a fusion adjacency list `fadj` and a nested + parfors adjacency list `nadj` for the root, `root` + """ + def count_root(fadj, nadj, root, nfused, nserial): + for k in nadj[root]: + nserial += 1 + if nadj[k] == []: + nfused += len(fadj[k]) + else: + nf, ns = count_root(fadj, nadj, k, nfused, nserial) + nfused += nf + nserial = ns + return nfused, nserial + nfused, nserial = count_root(fadj, nadj, root, 0, 0) + return nfused, nserial + + def reachable_nodes(self, adj, root): + """ + returns a list of nodes reachable in an adjacency list from a + specified root + """ + fusers = [] + fusers.extend(adj[root]) + for k in adj[root]: + if adj[k] != []: + fusers.extend(self.reachable_nodes(adj, k)) + return fusers + + def sort_pf_by_line(self, pf_id, parfors_simple): + """ + pd_id - the parfors id + parfors_simple - the simple parfors map + """ + # this sorts parfors by source line number + pf = parfors_simple[pf_id][0] + pattern = pf.patterns[0] + line = max(0, pf.loc.line - 1) # why are these out by 1 ?! + filename = self.func_ir.loc.filename + nadj, nroots = self.compute_graph_info(self.nested_fusion_info) + fadj, froots = self.compute_graph_info(self.fusion_info) + graphs = [nadj, fadj] + + # If the parfor is internal, like internal prange, then the + # default line number is from its location in the numba source + # To get a more accurate line number, this first checks the + # adjacency graph for fused parfors that might not be internal + # and uses the minimum line number from there. If that fails + # (case where there's just a single internal parfor) the IR + # is walked backwards from the parfor location and the first non + # parfor statement line number is used. + if isinstance(pattern, tuple): + if pattern[1] == 'internal': + reported_loc = pattern[2][1] + if reported_loc.filename == filename: + return max(0, reported_loc.line - 1) + else: + # first recurse and check the adjacency list for + # something that is not an in internal parfor + tmp = [] + for adj in graphs: + if adj: # graph may be empty, e.g. no nesting + for k in adj[pf_id]: + tmp.append(self.sort_pf_by_line(k, parfors_simple)) + if tmp: + return max(0, min(tmp) - 1) + # second run through the parfor block to see if there's + # and reference to a line number in the user source + for blk in pf.loop_body.values(): + for stmt in blk.body: + if stmt.loc.filename == filename: + return max(0, stmt.loc.line - 1) + # finally run through the func_ir and look for the + # first non-parfor statement prior to this one and + # grab the line from that + for blk in self.func_ir.blocks.values(): + try: + idx = blk.body.index(pf) + for i in range(idx - 1, 0, -1): + stmt = blk.body[i] + if not isinstance(stmt, Parfor): + line = max(0, stmt.loc.line - 1) + break + except ValueError: + pass + return line + + def get_parfors_simple(self, print_loop_search): + parfors_simple = dict() + + # print in line order, parfors loop id is based on discovery order + for pf in sorted(self.initial_parfors, key=lambda x: x.loc.line): + # use 0 here, the parfors are mutated by the time this routine + # is called, however, fusion appends the patterns so we can just + # pull in the first as a "before fusion" emulation + r_pattern = pf.patterns[0] + pattern = pf.patterns[0] + loc = pf.loc + if isinstance(pattern, tuple): + if pattern[0] == 'prange': + if pattern[1] == 'internal': + replfn = '.'.join(reversed(list(pattern[2][0]))) + loc = pattern[2][1] + r_pattern = '%s %s' % (replfn, '(internal parallel version)') + elif pattern[1] == 'user': + r_pattern = "user defined prange" + elif pattern[1] == 'pndindex': + r_pattern = "internal pndindex" #FIXME: trace this! + else: + assert 0 + fmt = 'Parallel for-loop #%s: is produced from %s:\n %s\n \n' + if print_loop_search: + print_wrapped(fmt % (pf.id, loc, r_pattern)) + parfors_simple[pf.id] = (pf, loc, r_pattern) + return parfors_simple + + def get_all_lines(self, parfors_simple): + # ensure adjacency lists are the same size for both sets of info + # (nests and fusion may not traverse the same space, for + # convenience [] is used as a condition to halt recursion) + fadj, froots = self.compute_graph_info(self.fusion_info) + nadj, _nroots = self.compute_graph_info(self.nested_fusion_info) + + if len(fadj) > len(nadj): + lim = len(fadj) + tmp = nadj + else: + lim = len(nadj) + tmp = fadj + for x in range(len(tmp), lim): + tmp.append([]) + + # This computes the roots of true loop nests (i.e. loops containing + # loops opposed to just a loop that's a root). + nroots = set() + if _nroots: + for r in _nroots: + if nadj[r] != []: + nroots.add(r) + all_roots = froots ^ nroots + + # This computes all the parfors at the top level that are either: + # - roots of loop fusion + # - roots of true loop nests + # it then combines these based on source line number for ease of + # producing output ordered in a manner similar to the code structure + froots_lines = {} + for x in froots: + line = self.sort_pf_by_line(x, parfors_simple) + froots_lines[line] = 'fuse', x, fadj + + nroots_lines = {} + for x in nroots: + line = self.sort_pf_by_line(x, parfors_simple) + nroots_lines[line] = 'nest', x, nadj + + all_lines = froots_lines.copy() + all_lines.update(nroots_lines) + return all_lines + + def source_listing(self, parfors_simple, purpose_str): + filename = self.func_ir.loc.filename + count = self.count_parfors() + func_name = self.func_ir.func_id.func + try: + lines = inspect.getsource(func_name).splitlines() + except OSError: # generated function + lines = None + if lines and parfors_simple: + src_width = max([len(x) for x in lines]) + map_line_to_pf = defaultdict(list) # parfors can alias lines + for k, v in parfors_simple.items(): + # TODO: do a better job of tracking parfors that are not in + # this file but are referred to, e.g. np.arange() + if parfors_simple[k][1].filename == filename: + match_line = self.sort_pf_by_line(k, parfors_simple) + map_line_to_pf[match_line].append(str(k)) + + max_pf_per_line = max([1] + [len(x) for x in map_line_to_pf.values()]) + width = src_width + (1 + max_pf_per_line * (len(str(count)) + 2)) + newlines = [] + newlines.append('\n') + newlines.append('Parallel loop listing for %s' % purpose_str) + newlines.append(width * '-' + '|loop #ID') + fmt = '{0:{1}}| {2}' + # why are these off by 1? + lstart = max(0, self.func_ir.loc.line - 1) + for no, line in enumerate(lines, lstart): + pf_ids = map_line_to_pf.get(no, None) + if pf_ids is not None: + pfstr = '#' + ', '.join(pf_ids) + else: + pfstr = '' + stripped = line.strip('\n') + srclen = len(stripped) + if pf_ids: + l = fmt.format(width * '-', width, pfstr) + else: + l = fmt.format(width * ' ', width, pfstr) + newlines.append(stripped + l[srclen:]) + print('\n'.join(newlines)) + else: + print("No source available") + + def print_unoptimised(self, lines): + # This prints the unoptimised parfors state + sword = '+--' + fac = len(sword) + fadj, froots = self.compute_graph_info(self.fusion_info) + nadj, _nroots = self.compute_graph_info(self.nested_fusion_info) + + if len(fadj) > len(nadj): + lim = len(fadj) + tmp = nadj + else: + lim = len(nadj) + tmp = fadj + for x in range(len(tmp), lim): + tmp.append([]) + + def print_nest(fadj_, nadj_, theroot, reported, region_id): + def print_g(fadj_, nadj_, nroot, depth): + print_wrapped(fac * depth * ' ' + '%s%s %s' % (sword, nroot, '(parallel)')) + for k in nadj_[nroot]: + if nadj_[k] == []: + msg = [] + msg.append(fac * (depth + 1) * ' ' + '%s%s %s' % (sword, k, '(parallel)')) + if fadj_[k] != [] and k not in reported: + fused = self.reachable_nodes(fadj_, k) + for i in fused: + msg.append(fac * (depth + 1) * ' ' + '%s%s %s' % (sword, i, '(parallel)')) + reported.append(k) + print_wrapped('\n'.join(msg)) + else: + print_g(fadj_, nadj_, k, depth + 1) + + if nadj_[theroot] != []: + print_wrapped("Parallel region %s:" % region_id) + print_g(fadj_, nadj_, theroot, 0) + print("\n") + region_id = region_id + 1 + return region_id + + def print_fuse(ty, pf_id, adj, depth, region_id): + msg = [] + print_wrapped("Parallel region %s:" % region_id) + msg.append(fac * depth * ' ' + '%s%s %s' % (sword, pf_id, '(parallel)')) + if adj[pf_id] != []: + fused = sorted(self.reachable_nodes(adj, pf_id)) + for k in fused: + msg.append(fac * depth * ' ' + '%s%s %s' % (sword, k, '(parallel)')) + region_id = region_id + 1 + print_wrapped('\n'.join(msg)) + print("\n") + return region_id + + # Walk the parfors by src line and print optimised structure + region_id = 0 + reported = [] + for line, info in sorted(lines.items()): + opt_ty, pf_id, adj = info + if opt_ty == 'fuse': + if pf_id not in reported: + region_id = print_fuse('f', pf_id, adj, 0, region_id) + elif opt_ty == 'nest': + region_id = print_nest(fadj, nadj, pf_id, reported, region_id) + else: + assert 0 + + def print_optimised(self, lines): + # This prints the optimised output based on the transforms that + # occurred during loop fusion and rewriting of loop nests + sword = '+--' + fac = len(sword) + fadj, froots = self.compute_graph_info(self.fusion_info) + nadj, _nroots = self.compute_graph_info(self.nested_fusion_info) + + if len(fadj) > len(nadj): + lim = len(fadj) + tmp = nadj + else: + lim = len(nadj) + tmp = fadj + for x in range(len(tmp), lim): + tmp.append([]) + + summary = dict() + # region : {fused, serialized} + + def print_nest(fadj_, nadj_, theroot, reported, region_id): + def print_g(fadj_, nadj_, nroot, depth): + for k in nadj_[nroot]: + msg = fac * depth * ' ' + '%s%s %s' % (sword, k, '(serial') + if nadj_[k] == []: + fused = [] + if fadj_[k] != [] and k not in reported: + fused = sorted(self.reachable_nodes(fadj_, k)) + msg += ", fused with loop(s): " + msg += ', '.join([str(x) for x in fused]) + msg += ')' + reported.append(k) + print_wrapped(msg) + summary[region_id]['fused'] += len(fused) + else: + print_wrapped(msg + ')') + print_g(fadj_, nadj_, k, depth + 1) + summary[region_id]['serialized'] += 1 + + if nadj_[theroot] != []: + print_wrapped("Parallel region %s:" % region_id) + print_wrapped('%s%s %s' % (sword, theroot, '(parallel)')) + summary[region_id] = {'root': theroot, 'fused': 0, 'serialized': 0} + print_g(fadj_, nadj_, theroot, 1) + print("\n") + region_id = region_id + 1 + return region_id + + def print_fuse(ty, pf_id, adj, depth, region_id): + print_wrapped("Parallel region %s:" % region_id) + msg = fac * depth * ' ' + '%s%s %s' % (sword, pf_id, '(parallel') + fused = [] + if adj[pf_id] != []: + fused = sorted(self.reachable_nodes(adj, pf_id)) + msg += ", fused with loop(s): " + msg += ', '.join([str(x) for x in fused]) + + summary[region_id] = {'root': pf_id, 'fused': len(fused), 'serialized': 0} + msg += ')' + print_wrapped(msg) + print("\n") + region_id = region_id + 1 + return region_id + + # Walk the parfors by src line and print optimised structure + region_id = 0 + reported = [] + for line, info in sorted(lines.items()): + opt_ty, pf_id, adj = info + if opt_ty == 'fuse': + if pf_id not in reported: + region_id = print_fuse('f', pf_id, adj, 0, region_id) + elif opt_ty == 'nest': + region_id = print_nest(fadj, nadj, pf_id, reported, region_id) + else: + assert 0 + + # print the summary of the fuse/serialize rewrite + if summary: + for k, v in sorted(summary.items()): + msg = ('\n \nParallel region %s (loop #%s) had %s ' + 'loop(s) fused') + root = v['root'] + fused = v['fused'] + serialized = v['serialized'] + if serialized != 0: + msg += (' and %s loop(s) ' + 'serialized as part of the larger ' + 'parallel loop (#%s).') + print_wrapped(msg % (k, root, fused, serialized, root)) + else: + msg += '.' + print_wrapped(msg % (k, root, fused)) + else: + print_wrapped("Parallel structure is already optimal.") + + def allocation_hoist(self): + found = False + print('Allocation hoisting:') + for pf_id, data in self.hoist_info.items(): + stmt = data.get('hoisted', []) + for inst in stmt: + if isinstance(inst.value, ir.Expr): + try: + attr = inst.value.attr + if attr == 'empty': + msg = ("The memory allocation derived from the " + "instruction at %s is hoisted out of the " + "parallel loop labelled #%s (it will be " + "performed before the loop is executed and " + "reused inside the loop):") + loc = inst.loc + print_wrapped(msg % (loc, pf_id)) + try: + path = os.path.relpath(loc.filename) + except ValueError: + path = os.path.abspath(loc.filename) + lines = linecache.getlines(path) + if lines and loc.line: + print_wrapped(" Allocation:: " + lines[0 if loc.line < 2 else loc.line - 1].strip()) + print_wrapped(" - numpy.empty() is used for the allocation.\n") + found = True + except (KeyError, AttributeError): + pass + if not found: + print_wrapped('No allocation hoisting found') + + def instruction_hoist(self): + print("") + print('Instruction hoisting:') + hoist_info_printed = False + if self.hoist_info: + for pf_id, data in self.hoist_info.items(): + hoisted = data.get('hoisted', None) + not_hoisted = data.get('not_hoisted', None) + if not hoisted and not not_hoisted: + print("loop #%s has nothing to hoist." % pf_id) + continue + + print("loop #%s:" % pf_id) + if hoisted: + print(" Has the following hoisted:") + [print(" %s" % y) for y in hoisted] + hoist_info_printed = True + if not_hoisted: + print(" Failed to hoist the following:") + [print(" %s: %s" % (y, x)) for x, y in not_hoisted] + hoist_info_printed = True + if not hoist_info_printed: + print_wrapped('No instruction hoisting found') + print_wrapped(80 * '-') + + def dump(self, level=1): + if not self.has_setup: + raise RuntimeError("self.setup has not been called") + name = self.func_ir.func_id.func_qualname + line = self.func_ir.loc + if self.internal_name in name: + purpose_str = 'Internal parallel functions ' + purpose = 'internal' + else: + purpose_str = ' Function %s, %s ' % (name, line) + purpose = 'user' + + print_loop_search = False + print_source_listing = False + print_fusion_search = False + print_fusion_summary = False + print_loopnest_rewrite = False + print_pre_optimised = False + print_post_optimised = False + print_allocation_hoist = False + print_instruction_hoist = False + print_internal = False + + # each level switches on progressively more output + if level in (1, 2, 3, 4): + print_source_listing = True + print_post_optimised = True + else: + raise ValueError("Report level unknown, should be one of 1, 2, 3, 4") + + if level in (2, 3, 4): + print_pre_optimised = True + + if level in (3, 4): + print_allocation_hoist = True + + if level == 3: + print_fusion_summary = True + print_loopnest_rewrite = True + + if level == 4: + print_fusion_search = True + print_instruction_hoist = True + print_internal = True + + if purpose == 'internal' and not print_internal: + return + + print_wrapped('\n ') + print_wrapped(_termwidth * "=") + print_wrapped((" Parallel Accelerator Optimizing: %s " % purpose_str).center(_termwidth, '=')) + print_wrapped(_termwidth * "=") + print_wrapped("") + +#----------- search section + if print_loop_search: + print_wrapped('Looking for parallel loops'.center(_termwidth, '-')) + parfors_simple = self.get_parfors_simple(print_loop_search) + + count = self.count_parfors() + if print_loop_search: + print_wrapped("\nFound %s parallel loops." % count) + print_wrapped('-' * _termwidth) + +#----------- augmented source section + filename = self.func_ir.loc.filename + try: + # Try to get a relative path + # ipython/jupyter input just returns as filename + path = os.path.relpath(filename) + except ValueError: + # Fallback to absolute path if error occurred in getting the + # relative path. + # This may happen on windows if the drive is different + path = os.path.abspath(filename) + + if print_source_listing: + self.source_listing(parfors_simple, purpose_str) + +#---------- these are used a lot here on in + sword = '+--' + parfors = self.get_parfors() # this is the mutated parfors + parfor_ids = [x.id for x in parfors] + n_parfors = len(parfor_ids) + +#----------- loop fusion section + if print_fusion_search or print_fusion_summary: + if not sequential_parfor_lowering: + print_wrapped(' Fusing loops '.center(_termwidth, '-')) + msg = ("Attempting fusion of parallel loops (combines loops " + "with similar properties)...\n") + print_wrapped(msg) + else: + msg = "Performing sequential lowering of loops...\n" + print_wrapped(msg) + print_wrapped(_termwidth * '-') + # if there are some parfors, print information about them! + if n_parfors > -1: + def dump_graph_indented(a, root_msg, node_msg): + fac = len(sword) + def print_graph(adj, roots): + def print_g(adj, root, depth): + for k in adj[root]: + print_wrapped(fac * depth * ' ' + '%s%s %s' % (sword, k, node_msg)) + if adj[k] != []: + print_g(adj, k, depth + 1) + for r in roots: + print_wrapped('%s%s %s' % (sword, r, root_msg)) + print_g(l, r, 1) + print_wrapped("") + l, roots = self.compute_graph_info(a) + print_graph(l, roots) + + if print_fusion_search: + for report in self.fusion_reports: + l1, l2, msg = report + print_wrapped(" Trying to fuse loops #%s and #%s:" % (l1, l2)) + print_wrapped(" %s" % msg) + + if self.fusion_info != {}: + if print_fusion_summary: + print_wrapped("\n \nFused loop summary:\n") + + dump_graph_indented(self.fusion_info, 'has the following loops fused into it:', '(fused)') + + if print_fusion_summary: + if self.fusion_enabled: + after_fusion = "Following the attempted fusion of parallel for-loops" + else: + after_fusion = "With fusion disabled" + + print_wrapped(('\n{} there are {} parallel for-loop(s) (originating from loops labelled: {}).').format( + after_fusion, n_parfors, ', '.join(['#%s' % x for x in parfor_ids]))) + print_wrapped(_termwidth * '-') + print_wrapped("") + +#----------- loop nest section + if print_loopnest_rewrite: + if self.nested_fusion_info != {}: + print_wrapped((" Optimising loop nests ").center(_termwidth, '-')) + print_wrapped("Attempting loop nest rewrites (optimising for the largest parallel loops)...\n ") + root_msg = 'is a parallel loop' + node_msg = '--> rewritten as a serial loop' + dump_graph_indented(self.nested_fusion_info, root_msg, node_msg) + print_wrapped(_termwidth * '-') + print_wrapped("") + +#---------- compute various properties and orderings in the data for subsequent use + all_lines = self.get_all_lines(parfors_simple) + + if print_pre_optimised: + print(' Before Optimisation '.center(_termwidth,'-')) + self.print_unoptimised(all_lines) + print(_termwidth * '-') + + if print_post_optimised: + print(' After Optimisation '.center(_termwidth,'-')) + self.print_optimised(all_lines) + print(_termwidth * '-') + print_wrapped("") + print_wrapped(_termwidth * '-') + print_wrapped("\n ") + +#----------- LICM section + if print_allocation_hoist or print_instruction_hoist: + print_wrapped('Loop invariant code motion'.center(80, '-')) + + if print_allocation_hoist: + self.allocation_hoist() + + if print_instruction_hoist: + self.instruction_hoist() + + else: # there are no parfors + print_wrapped('Function %s, %s, has no parallel for-loops.'.format(name, line)) + + def __str__(self): + r = "ParforDiagnostics:\n" + r += repr(self.replaced_fns) + return r + + def __repr__(self): + r = "ParforDiagnostics" + return r + + +class PreParforPass(object): + """Preprocessing for the Parfor pass. It mostly inlines parallel + implementations of numpy functions if available. + """ + def __init__(self, func_ir, typemap, calltypes, typingctx, targetctx, + options, swapped={}, replace_functions_map=None): + self.func_ir = func_ir + self.typemap = typemap + self.calltypes = calltypes + self.typingctx = typingctx + self.targetctx = targetctx + self.options = options + # diagnostics + self.swapped = swapped + if replace_functions_map is None: + replace_functions_map = swap_functions_map + self.replace_functions_map = replace_functions_map + self.stats = { + 'replaced_func': 0, + 'replaced_dtype': 0, + } + + def run(self): + """Run pre-parfor processing pass. + """ + # e.g. convert A.sum() to np.sum(A) for easier match and optimization + canonicalize_array_math(self.func_ir, self.typemap, + self.calltypes, self.typingctx) + if self.options.numpy: + self._replace_parallel_functions(self.func_ir.blocks) + self.func_ir.blocks = simplify_CFG(self.func_ir.blocks) + + def _replace_parallel_functions(self, blocks): + """ + Replace functions with their parallel implementation in + replace_functions_map if available. + The implementation code is inlined to enable more optimization. + """ + swapped = self.swapped + from numba.core.inline_closurecall import inline_closure_call + work_list = list(blocks.items()) + while work_list: + label, block = work_list.pop() + for i, instr in enumerate(block.body): + if isinstance(instr, ir.Assign): + lhs = instr.target + lhs_typ = self.typemap[lhs.name] + expr = instr.value + if isinstance(expr, ir.Expr) and expr.op == 'call': + # Try and inline known calls with their parallel implementations + def replace_func(): + func_def = get_definition(self.func_ir, expr.func) + callname = find_callname(self.func_ir, expr) + repl_func = self.replace_functions_map.get(callname, None) + # Handle method on array type + if (repl_func is None and + len(callname) == 2 and + isinstance(callname[1], ir.Var) and + isinstance(self.typemap[callname[1].name], + types.npytypes.Array)): + repl_func = replace_functions_ndarray.get(callname[0], None) + if repl_func is not None: + # Add the array that the method is on to the arg list. + expr.args.insert(0, callname[1]) + + require(repl_func is not None) + typs = tuple(self.typemap[x.name] for x in expr.args) + kws_typs = {k: self.typemap[x.name] for k, x in expr.kws} + try: + new_func = repl_func(lhs_typ, *typs, **kws_typs) + except: + new_func = None + require(new_func is not None) + # bind arguments to the new_func + typs = utils.pysignature(new_func).bind(*typs, **kws_typs).args + + g = copy.copy(self.func_ir.func_id.func.__globals__) + g['numba'] = numba + g['np'] = numpy + g['math'] = math + # if the function being inlined has a function + # checking the inputs, find it and add it to globals + check = replace_functions_checkers_map.get(callname, + None) + if check is not None: + g[check.name] = check.func + # inline the parallel implementation + new_blocks, _ = inline_closure_call(self.func_ir, g, + block, i, new_func, self.typingctx, self.targetctx, + typs, self.typemap, self.calltypes, work_list) + call_table = get_call_table(new_blocks, topological_ordering=False) + + # find the prange in the new blocks and record it for use in diagnostics + for call in call_table: + for k, v in call.items(): + if v[0] == 'internal_prange': + swapped[k] = [callname, repl_func.__name__, func_def, block.body[i].loc] + break + return True + if guard(replace_func): + self.stats['replaced_func'] += 1 + break + elif (isinstance(expr, ir.Expr) and expr.op == 'getattr' and + expr.attr == 'dtype'): + # Replace getattr call "A.dtype" with numpy.dtype(). + # This helps remove superfluous dependencies from parfor. + typ = self.typemap[expr.value.name] + if isinstance(typ, types.npytypes.Array): + # Convert A.dtype to four statements. + # 1) Get numpy global. + # 2) Create var for known type of array as string + # constant. e.g. 'float64' + # 3) Get dtype function from numpy module. + # 4) Create var for numpy.dtype(var from #2). + + # Create var for numpy module. + dtype = typ.dtype + scope = block.scope + loc = instr.loc + g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc) + self.typemap[g_np_var.name] = types.misc.Module(numpy) + g_np = ir.Global('np', numpy, loc) + g_np_assign = ir.Assign(g_np, g_np_var, loc) + + # Create var for the inferred type of the array + # e.g., 'float64' + dtype_str = str(dtype) + if dtype_str == 'bool': + dtype_str = 'bool_' + typ_var = ir.Var( + scope, mk_unique_var("$np_typ_var"), loc) + self.typemap[typ_var.name] = types.StringLiteral( + dtype_str) + typ_var_assign = ir.Assign( + ir.Const(dtype_str, loc), typ_var, loc) + + # Get the dtype function from the numpy module. + dtype_attr_var = ir.Var(scope, mk_unique_var("$dtype_attr_var"), loc) + temp = find_template(numpy.dtype) + tfunc = numba.core.types.Function(temp) + tfunc.get_call_type(self.typingctx, (self.typemap[typ_var.name],), {}) + self.typemap[dtype_attr_var.name] = types.functions.Function(temp) + dtype_attr_getattr = ir.Expr.getattr(g_np_var, 'dtype', loc) + dtype_attr_assign = ir.Assign(dtype_attr_getattr, dtype_attr_var, loc) + + # Call numpy.dtype on the statically coded type two steps above. + dtype_var = ir.Var(scope, mk_unique_var("$dtype_var"), loc) + self.typemap[dtype_var.name] = types.npytypes.DType(dtype) + dtype_getattr = ir.Expr.call(dtype_attr_var, [typ_var], (), loc) + dtype_assign = ir.Assign(dtype_getattr, dtype_var, loc) + self.calltypes[dtype_getattr] = signature( + self.typemap[dtype_var.name], self.typemap[typ_var.name]) + + # The original A.dtype rhs is replaced with result of this call. + instr.value = dtype_var + # Add statements to body of the code. + block.body.insert(0, dtype_assign) + block.body.insert(0, dtype_attr_assign) + block.body.insert(0, typ_var_assign) + block.body.insert(0, g_np_assign) + self.stats['replaced_dtype'] += 1 + break + +def find_template(op): + for ft in numba.core.typing.templates.builtin_registry.functions: + if ft.key == op: + return ft + + +class ParforPassStates: + """This class encapsulates all internal states of the ParforPass. + """ + + def __init__(self, func_ir, typemap, calltypes, return_type, typingctx, + targetctx, options, flags, metadata, + diagnostics=ParforDiagnostics()): + self.func_ir = func_ir + self.typemap = typemap + self.calltypes = calltypes + self.typingctx = typingctx + self.targetctx = targetctx + self.return_type = return_type + self.options = options + self.diagnostics = diagnostics + self.swapped_fns = diagnostics.replaced_fns + self.fusion_info = diagnostics.fusion_info + self.nested_fusion_info = diagnostics.nested_fusion_info + + self.array_analysis = array_analysis.ArrayAnalysis( + self.typingctx, self.func_ir, self.typemap, self.calltypes, + ) + + ir_utils._the_max_label.update(max(func_ir.blocks.keys())) + self.flags = flags + self.metadata = metadata + if "parfors" not in metadata: + metadata["parfors"] = {} + + +class ConvertInplaceBinop: + """Parfor subpass to convert setitem on Arrays + """ + def __init__(self, pass_states): + """ + Parameters + ---------- + pass_states : ParforPassStates + """ + self.pass_states = pass_states + self.rewritten = [] + + def run(self, blocks): + pass_states = self.pass_states + # convert expressions like A += ... where A is an array. + topo_order = find_topo_order(blocks) + # variables available in the program so far (used for finding map + # functions in array_expr lowering) + for label in topo_order: + block = blocks[label] + new_body = [] + equiv_set = pass_states.array_analysis.get_equiv_set(label) + for instr in block.body: + if isinstance(instr, ir.Assign): + lhs = instr.target + expr = instr.value + if isinstance(expr, ir.Expr) and expr.op == 'inplace_binop': + loc = expr.loc + target = expr.lhs + value = expr.rhs + target_typ = pass_states.typemap[target.name] + value_typ = pass_states.typemap[value.name] + # Handle A op= ... + if isinstance(target_typ, types.npytypes.Array): + # RHS is an array + if isinstance(value_typ, types.npytypes.Array): + new_instr = self._inplace_binop_to_parfor(equiv_set, + loc, expr.immutable_fn, target, value) + self.rewritten.append( + dict(old=instr, new=new_instr, + reason='inplace_binop'), + ) + instr = [new_instr, ir.Assign(target, lhs, loc)] + if isinstance(instr, list): + new_body.extend(instr) + else: + new_body.append(instr) + block.body = new_body + + def _inplace_binop_to_parfor(self, equiv_set, loc, op, target, value): + """generate parfor from setitem node with a boolean or slice array indices. + The value can be either a scalar or an array variable, and if a boolean index + is used for the latter case, the same index must be used for the value too. + """ + pass_states = self.pass_states + scope = target.scope + arr_typ = pass_states.typemap[target.name] + el_typ = arr_typ.dtype + init_block = ir.Block(scope, loc) + value_typ = pass_states.typemap[value.name] + + size_vars = equiv_set.get_shape(target) + + # generate loopnests and size variables from target correlations + index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc) + + # generate body + body_label = next_label() + body_block = ir.Block(scope, loc) + index_var, index_var_typ = _make_index_var( + pass_states.typemap, scope, index_vars, body_block) + + # Read value. + value_var = ir.Var(scope, mk_unique_var("$value_var"), loc) + pass_states.typemap[value_var.name] = value_typ.dtype + getitem_call = ir.Expr.getitem(value, index_var, loc) + pass_states.calltypes[getitem_call] = signature( + value_typ.dtype, value_typ, index_var_typ) + body_block.body.append(ir.Assign(getitem_call, value_var, loc)) + + # Read target + target_var = ir.Var(scope, mk_unique_var("$target_var"), loc) + pass_states.typemap[target_var.name] = el_typ + getitem_call = ir.Expr.getitem(target, index_var, loc) + pass_states.calltypes[getitem_call] = signature( + el_typ, arr_typ, index_var_typ) + body_block.body.append(ir.Assign(getitem_call, target_var, loc)) + + # Create temp to hold result. + expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc) + pass_states.typemap[expr_out_var.name] = el_typ + + # Create binop and assign result to temporary. + binop_expr = ir.Expr.binop(op, target_var, value_var, loc) + body_block.body.append(ir.Assign(binop_expr, expr_out_var, loc)) + unified_type = self.pass_states.typingctx.unify_pairs(el_typ, value_typ.dtype) + pass_states.calltypes[binop_expr] = signature( + unified_type, unified_type, unified_type) + + # Write to target + setitem_node = ir.SetItem(target, index_var, expr_out_var, loc) + pass_states.calltypes[setitem_node] = signature( + types.none, arr_typ, index_var_typ, el_typ) + body_block.body.append(setitem_node) + + parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set, + ('inplace_binop', ''), pass_states.flags) + parfor.loop_body = {body_label: body_block} + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor from inplace_binop") + parfor.dump() + return parfor + + def _type_getitem(self, args): + fnty = operator.getitem + return self.pass_states.typingctx.resolve_function_type(fnty, tuple(args), {}) + + +def get_index_var(x): + return x.index if isinstance(x, ir.SetItem) else x.index_var + + +class ConvertSetItemPass: + """Parfor subpass to convert setitem on Arrays + """ + def __init__(self, pass_states): + """ + Parameters + ---------- + pass_states : ParforPassStates + """ + self.pass_states = pass_states + self.rewritten = [] + + def run(self, blocks): + pass_states = self.pass_states + # convert setitem expressions like A[C] = c or A[C] = B[C] to parfor, + # where C is a boolean array. + topo_order = find_topo_order(blocks) + # variables available in the program so far (used for finding map + # functions in array_expr lowering) + for label in topo_order: + block = blocks[label] + new_body = [] + equiv_set = pass_states.array_analysis.get_equiv_set(label) + for instr in block.body: + if isinstance(instr, (ir.StaticSetItem, ir.SetItem)): + loc = instr.loc + target = instr.target + index = get_index_var(instr) + value = instr.value + target_typ = pass_states.typemap[target.name] + index_typ = pass_states.typemap[index.name] + value_typ = pass_states.typemap[value.name] + # Handle A[boolean_array] = + if isinstance(target_typ, types.npytypes.Array): + if (isinstance(index_typ, types.npytypes.Array) and + isinstance(index_typ.dtype, types.Boolean) and + target_typ.ndim == index_typ.ndim): + # RHS is a scalar number + if isinstance(value_typ, types.Number): + new_instr = self._setitem_to_parfor(equiv_set, + loc, target, index, value) + self.rewritten.append( + dict(old=instr, new=new_instr, + reason='masked_assign_broadcast_scalar'), + ) + instr = new_instr + # RHS is an array + elif isinstance(value_typ, types.npytypes.Array): + val_def = guard(get_definition, pass_states.func_ir, + value.name) + if (isinstance(val_def, ir.Expr) and + val_def.op == 'getitem' and + val_def.index.name == index.name): + new_instr = self._setitem_to_parfor(equiv_set, + loc, target, index, val_def.value) + self.rewritten.append( + dict(old=instr, new=new_instr, + reason='masked_assign_array'), + ) + instr = new_instr + else: + # Handle A[:] = x + shape = equiv_set.get_shape(instr) + # Don't converted broadcasted setitems into parfors. + if isinstance(index_typ, types.BaseTuple): + # The sliced dims are those in the index that + # are made of slices. Count the numbers of slices + # in the index tuple. + sliced_dims = len(list(filter( + lambda x: isinstance(x, types.misc.SliceType), + index_typ.types))) + elif isinstance(index_typ, types.misc.SliceType): + # For singular indices there can be a bare slice + # and if so there is one dimension being set. + sliced_dims = 1 + else: + sliced_dims = 0 + + # Only create a parfor for this setitem if we know the + # shape of the output and number of dimensions set is + # equal to the number of dimensions on the right side. + if (shape is not None and + (not isinstance(value_typ, types.npytypes.Array) or + sliced_dims == value_typ.ndim)): + new_instr = self._setitem_to_parfor(equiv_set, + loc, target, index, value, shape=shape) + self.rewritten.append( + dict(old=instr, new=new_instr, + reason='slice'), + ) + instr = new_instr + new_body.append(instr) + block.body = new_body + + def _setitem_to_parfor(self, equiv_set, loc, target, index, value, shape=None): + """generate parfor from setitem node with a boolean or slice array indices. + The value can be either a scalar or an array variable, and if a boolean index + is used for the latter case, the same index must be used for the value too. + """ + pass_states = self.pass_states + scope = target.scope + arr_typ = pass_states.typemap[target.name] + el_typ = arr_typ.dtype + index_typ = pass_states.typemap[index.name] + init_block = ir.Block(scope, loc) + + if shape: + # Slice index is being used on the target array, we'll have to create + # a sub-array so that the target dimension matches the given shape. + assert(isinstance(index_typ, types.BaseTuple) or + isinstance(index_typ, types.SliceType)) + # setitem has a custom target shape + size_vars = shape + # create a new target array via getitem + subarr_var = ir.Var(scope, mk_unique_var("$subarr"), loc) + getitem_call = ir.Expr.getitem(target, index, loc) + subarr_typ = typing.arraydecl.get_array_index_type( arr_typ, index_typ).result + pass_states.typemap[subarr_var.name] = subarr_typ + pass_states.calltypes[getitem_call] = self._type_getitem((arr_typ, index_typ)) + init_block.append(ir.Assign(getitem_call, subarr_var, loc)) + target = subarr_var + else: + # Otherwise it is a boolean array that is used as index. + assert(isinstance(index_typ, types.ArrayCompatible)) + size_vars = equiv_set.get_shape(target) + bool_typ = index_typ.dtype + + # generate loopnests and size variables from lhs correlations + loopnests = [] + index_vars = [] + for size_var in size_vars: + index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc) + index_vars.append(index_var) + pass_states.typemap[index_var.name] = types.uintp + loopnests.append(LoopNest(index_var, 0, size_var, 1)) + + # generate body + body_label = next_label() + body_block = ir.Block(scope, loc) + index_var, index_var_typ = _make_index_var( + pass_states.typemap, scope, index_vars, body_block) + parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set, + ('setitem', ''), pass_states.flags) + if shape: + # slice subarray + parfor.loop_body = {body_label: body_block} + true_block = body_block + end_label = None + else: + # boolean mask + true_label = next_label() + true_block = ir.Block(scope, loc) + end_label = next_label() + end_block = ir.Block(scope, loc) + parfor.loop_body = {body_label: body_block, + true_label: true_block, + end_label: end_block, + } + mask_var = ir.Var(scope, mk_unique_var("$mask_var"), loc) + pass_states.typemap[mask_var.name] = bool_typ + mask_val = ir.Expr.getitem(index, index_var, loc) + body_block.body.extend([ + ir.Assign(mask_val, mask_var, loc), + ir.Branch(mask_var, true_label, end_label, loc) + ]) + + value_typ = pass_states.typemap[value.name] + if isinstance(value_typ, types.npytypes.Array): + value_var = ir.Var(scope, mk_unique_var("$value_var"), loc) + pass_states.typemap[value_var.name] = value_typ.dtype + getitem_call = ir.Expr.getitem(value, index_var, loc) + pass_states.calltypes[getitem_call] = signature( + value_typ.dtype, value_typ, index_var_typ) + true_block.body.append(ir.Assign(getitem_call, value_var, loc)) + else: + value_var = value + setitem_node = ir.SetItem(target, index_var, value_var, loc) + pass_states.calltypes[setitem_node] = signature( + types.none, pass_states.typemap[target.name], index_var_typ, el_typ) + true_block.body.append(setitem_node) + if end_label: + true_block.body.append(ir.Jump(end_label, loc)) + + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor from setitem") + parfor.dump() + return parfor + + def _type_getitem(self, args): + fnty = operator.getitem + return self.pass_states.typingctx.resolve_function_type(fnty, tuple(args), {}) + + +def _make_index_var(typemap, scope, index_vars, body_block, force_tuple=False): + """ When generating a SetItem call to an array in a parfor, the general + strategy is to generate a tuple if the array is more than 1 dimension. + If it is 1 dimensional then you can use a simple variable. This routine + is also used when converting pndindex to parfor but pndindex requires a + tuple even if the iteration space is 1 dimensional. The pndindex use of + this function will use force_tuple to make the output index a tuple even + if it is one dimensional. + """ + ndims = len(index_vars) + loc = body_block.loc + if ndims > 1 or force_tuple: + tuple_var = ir.Var(scope, mk_unique_var( + "$parfor_index_tuple_var"), loc) + typemap[tuple_var.name] = types.containers.UniTuple( + types.uintp, ndims) + tuple_call = ir.Expr.build_tuple(list(index_vars), loc) + tuple_assign = ir.Assign(tuple_call, tuple_var, loc) + body_block.body.append(tuple_assign) + return tuple_var, types.containers.UniTuple(types.uintp, ndims) + elif ndims == 1: + return index_vars[0], types.uintp + else: + raise errors.UnsupportedRewriteError( + "Parfor does not handle arrays of dimension 0", + loc=loc, + ) + + +def _mk_parfor_loops(typemap, size_vars, scope, loc): + """ + Create loop index variables and build LoopNest objects for a parfor. + """ + loopnests = [] + index_vars = [] + for size_var in size_vars: + index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc) + index_vars.append(index_var) + typemap[index_var.name] = types.uintp + loopnests.append(LoopNest(index_var, 0, size_var, 1)) + return index_vars, loopnests + +class ConvertNumpyPass: + """ + Convert supported Numpy functions, as well as arrayexpr nodes, to + parfor nodes. + """ + def __init__(self, pass_states): + self.pass_states = pass_states + self.rewritten = [] + + def run(self, blocks): + pass_states = self.pass_states + topo_order = find_topo_order(blocks) + # variables available in the program so far (used for finding map + # functions in array_expr lowering) + avail_vars = [] + for label in topo_order: + block = blocks[label] + new_body = [] + equiv_set = pass_states.array_analysis.get_equiv_set(label) + for instr in block.body: + if isinstance(instr, ir.Assign): + expr = instr.value + lhs = instr.target + lhs_typ = self.pass_states.typemap[lhs.name] + if self._is_C_or_F_order(lhs_typ): + if guard(self._is_supported_npycall, expr): + new_instr = self._numpy_to_parfor(equiv_set, lhs, expr) + if new_instr is not None: + self.rewritten.append(dict( + old=instr, + new=new_instr, + reason='numpy_allocator', + )) + instr = new_instr + elif isinstance(expr, ir.Expr) and expr.op == 'arrayexpr': + new_instr = self._arrayexpr_to_parfor( + equiv_set, lhs, expr, avail_vars) + self.rewritten.append(dict( + old=instr, + new=new_instr, + reason='arrayexpr', + )) + instr = new_instr + avail_vars.append(lhs.name) + new_body.append(instr) + block.body = new_body + + def _is_C_order(self, arr_name): + if isinstance(arr_name, types.npytypes.Array): + return arr_name.layout == 'C' and arr_name.ndim > 0 + elif arr_name is str: + typ = self.pass_states.typemap[arr_name] + return (isinstance(typ, types.npytypes.Array) and + typ.layout == 'C' and + typ.ndim > 0) + else: + return False + + def _is_C_or_F_order(self, arr_name): + if isinstance(arr_name, types.npytypes.Array): + return (arr_name.layout == 'C' or arr_name.layout == 'F') and arr_name.ndim > 0 + elif arr_name is str: + typ = self.pass_states.typemap[arr_name] + return (isinstance(typ, types.npytypes.Array) and + (typ.layout == 'C' or typ.layout == 'F') and + typ.ndim > 0) + else: + return False + + def _arrayexpr_to_parfor(self, equiv_set, lhs, arrayexpr, avail_vars): + """generate parfor from arrayexpr node, which is essentially a + map with recursive tree. + """ + pass_states = self.pass_states + scope = lhs.scope + loc = lhs.loc + expr = arrayexpr.expr + arr_typ = pass_states.typemap[lhs.name] + el_typ = arr_typ.dtype + + # generate loopnests and size variables from lhs correlations + size_vars = equiv_set.get_shape(lhs) + index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc) + + # generate init block and body + init_block = ir.Block(scope, loc) + init_block.body = mk_alloc( + pass_states.typingctx, + pass_states.typemap, pass_states.calltypes, lhs, + tuple(size_vars), el_typ, scope, loc, + pass_states.typemap[lhs.name]) + body_label = next_label() + body_block = ir.Block(scope, loc) + expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc) + pass_states.typemap[expr_out_var.name] = el_typ + + index_var, index_var_typ = _make_index_var( + pass_states.typemap, scope, index_vars, body_block) + + body_block.body.extend( + _arrayexpr_tree_to_ir( + pass_states.func_ir, + pass_states.typingctx, + pass_states.typemap, + pass_states.calltypes, + equiv_set, + init_block, + expr_out_var, + expr, + index_var, + index_vars, + avail_vars)) + + pat = ('array expression {}'.format(repr_arrayexpr(arrayexpr.expr)),) + + parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set, pat[0], pass_states.flags) + + setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc) + pass_states.calltypes[setitem_node] = signature( + types.none, pass_states.typemap[lhs.name], index_var_typ, el_typ) + body_block.body.append(setitem_node) + parfor.loop_body = {body_label: body_block} + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor from arrayexpr") + parfor.dump() + return parfor + + def _is_supported_npycall(self, expr): + """check if we support parfor translation for + this Numpy call. + """ + call_name, mod_name = find_callname(self.pass_states.func_ir, expr) + if not (isinstance(mod_name, str) and mod_name.startswith('numpy')): + return False + if call_name in ['zeros', 'ones']: + return True + if mod_name == 'numpy.random' and call_name in random_calls: + return True + # TODO: add more calls + return False + + def _numpy_to_parfor(self, equiv_set, lhs, expr): + call_name, mod_name = find_callname(self.pass_states.func_ir, expr) + args = expr.args + kws = dict(expr.kws) + if call_name in ['zeros', 'ones'] or mod_name == 'numpy.random': + return self._numpy_map_to_parfor(equiv_set, call_name, lhs, args, kws, expr) + # return error if we couldn't handle it (avoid rewrite infinite loop) + raise errors.UnsupportedRewriteError( + f"parfor translation failed for {expr}", loc=expr.loc, + ) + + def _numpy_map_to_parfor(self, equiv_set, call_name, lhs, args, kws, expr): + """generate parfor from Numpy calls that are maps. + """ + pass_states = self.pass_states + scope = lhs.scope + loc = lhs.loc + arr_typ = pass_states.typemap[lhs.name] + el_typ = arr_typ.dtype + + # generate loopnests and size variables from lhs correlations + size_vars = equiv_set.get_shape(lhs) + if size_vars is None: + if config.DEBUG_ARRAY_OPT >= 1: + print("Could not convert numpy map to parfor, unknown size") + return None + + index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc) + + # generate init block and body + init_block = ir.Block(scope, loc) + init_block.body = mk_alloc( + pass_states.typingctx, + pass_states.typemap, pass_states.calltypes, lhs, + tuple(size_vars), el_typ, scope, loc, + pass_states.typemap[lhs.name]) + body_label = next_label() + body_block = ir.Block(scope, loc) + expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc) + pass_states.typemap[expr_out_var.name] = el_typ + + index_var, index_var_typ = _make_index_var( + pass_states.typemap, scope, index_vars, body_block) + + if call_name == 'zeros': + value = ir.Const(el_typ(0), loc) + elif call_name == 'ones': + value = ir.Const(el_typ(1), loc) + elif call_name in random_calls: + # remove size arg to reuse the call expr for single value + _remove_size_arg(call_name, expr) + # update expr type + new_arg_typs, new_kw_types = _get_call_arg_types( + expr, pass_states.typemap) + pass_states.calltypes.pop(expr) + pass_states.calltypes[expr] = pass_states.typemap[expr.func.name].get_call_type( + typing.Context(), new_arg_typs, new_kw_types) + value = expr + else: + raise NotImplementedError( + "Map of numpy.{} to parfor is not implemented".format(call_name)) + + value_assign = ir.Assign(value, expr_out_var, loc) + body_block.body.append(value_assign) + + setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc) + pass_states.calltypes[setitem_node] = signature( + types.none, pass_states.typemap[lhs.name], index_var_typ, el_typ) + body_block.body.append(setitem_node) + + parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set, + ('{} function'.format(call_name,), 'NumPy mapping'), + pass_states.flags) + parfor.loop_body = {body_label: body_block} + if config.DEBUG_ARRAY_OPT >= 1: + print("generated parfor for numpy map:") + parfor.dump() + return parfor + + +class ConvertReducePass: + """ + Find reduce() calls and convert them to parfors. + """ + def __init__(self, pass_states): + self.pass_states = pass_states + self.rewritten = [] + + def run(self, blocks): + pass_states = self.pass_states + + topo_order = find_topo_order(blocks) + for label in topo_order: + block = blocks[label] + new_body = [] + equiv_set = pass_states.array_analysis.get_equiv_set(label) + for instr in block.body: + parfor = None + if isinstance(instr, ir.Assign): + loc = instr.loc + lhs = instr.target + expr = instr.value + callname = guard(find_callname, pass_states.func_ir, expr) + if (callname == ('reduce', 'builtins') + or callname == ('reduce', '_functools')): + # reduce function with generic function + parfor = guard(self._reduce_to_parfor, equiv_set, lhs, + expr.args, loc) + if parfor: + self.rewritten.append(dict( + new=parfor, + old=instr, + reason='reduce', + )) + instr = parfor + new_body.append(instr) + block.body = new_body + return + + def _reduce_to_parfor(self, equiv_set, lhs, args, loc): + """ + Convert a reduce call to a parfor. + The call arguments should be (call_name, array, init_value). + """ + pass_states = self.pass_states + + scope = lhs.scope + call_name = args[0] + in_arr = args[1] + arr_def = get_definition(pass_states.func_ir, in_arr.name) + + mask_var = None + mask_indices = None + + # Search for array[boolean_mask] + mask_query_result = guard(_find_mask, pass_states.typemap, pass_states.func_ir, arr_def) + if mask_query_result: + in_arr, mask_var, mask_typ, mask_indices = mask_query_result + + init_val = args[2] + size_vars = equiv_set.get_shape(in_arr if mask_indices is None else mask_var) + if size_vars is None: + return None + + index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc) + mask_index = index_vars + if mask_indices: + # the following is never tested + raise AssertionError("unreachable") + index_vars = tuple(x if x else index_vars[0] for x in mask_indices) + acc_var = lhs + + # init block has to init the reduction variable + init_block = ir.Block(scope, loc) + init_block.body.append(ir.Assign(init_val, acc_var, loc)) + + # produce loop body + body_label = next_label() + index_var, loop_body = self._mk_reduction_body(call_name, + scope, loc, index_vars, in_arr, acc_var) + if mask_indices: + # the following is never tested + raise AssertionError("unreachable") + index_var = mask_index[0] + + if mask_var is not None: + true_label = min(loop_body.keys()) + false_label = max(loop_body.keys()) + body_block = ir.Block(scope, loc) + loop_body[body_label] = body_block + mask = ir.Var(scope, mk_unique_var("$mask_val"), loc) + pass_states.typemap[mask.name] = mask_typ + mask_val = ir.Expr.getitem(mask_var, index_var, loc) + body_block.body.extend([ + ir.Assign(mask_val, mask, loc), + ir.Branch(mask, true_label, false_label, loc) + ]) + + parfor = Parfor(loopnests, init_block, loop_body, loc, index_var, + equiv_set, ('{} function'.format(call_name), + 'reduction'), pass_states.flags) + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor from reduction") + parfor.dump() + return parfor + + def _mk_reduction_body(self, call_name, scope, loc, + index_vars, in_arr, acc_var): + """ + Produce the body blocks for a reduction function indicated by call_name. + """ + from numba.core.inline_closurecall import check_reduce_func + + pass_states = self.pass_states + reduce_func = get_definition(pass_states.func_ir, call_name) + fcode = check_reduce_func(pass_states.func_ir, reduce_func) + + arr_typ = pass_states.typemap[in_arr.name] + in_typ = arr_typ.dtype + body_block = ir.Block(scope, loc) + index_var, index_var_type = _make_index_var( + pass_states.typemap, scope, index_vars, body_block) + + tmp_var = ir.Var(scope, mk_unique_var("$val"), loc) + pass_states.typemap[tmp_var.name] = in_typ + getitem_call = ir.Expr.getitem(in_arr, index_var, loc) + pass_states.calltypes[getitem_call] = signature( + in_typ, arr_typ, index_var_type) + body_block.append(ir.Assign(getitem_call, tmp_var, loc)) + + reduce_f_ir = compile_to_numba_ir(fcode, + pass_states.func_ir.func_id.func.__globals__, + pass_states.typingctx, + pass_states.targetctx, + (in_typ, in_typ), + pass_states.typemap, + pass_states.calltypes) + loop_body = reduce_f_ir.blocks + end_label = next_label() + end_block = ir.Block(scope, loc) + loop_body[end_label] = end_block + first_reduce_label = min(reduce_f_ir.blocks.keys()) + first_reduce_block = reduce_f_ir.blocks[first_reduce_label] + body_block.body.extend(first_reduce_block.body) + first_reduce_block.body = body_block.body + replace_arg_nodes(first_reduce_block, [acc_var, tmp_var]) + replace_returns(loop_body, acc_var, end_label) + return index_var, loop_body + + +class ConvertLoopPass: + """Build Parfor nodes from prange loops. + """ + def __init__(self, pass_states): + self.pass_states = pass_states + self.rewritten = [] + + def run(self, blocks): + pass_states = self.pass_states + + call_table, _ = get_call_table(blocks) + cfg = compute_cfg_from_blocks(blocks) + usedefs = compute_use_defs(blocks) + live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap) + loops = cfg.loops() + sized_loops = [(loops[k], len(loops[k].body)) for k in loops.keys()] + moved_blocks = [] + # We go over all loops, smaller loops first (inner first) + for loop, s in sorted(sized_loops, key=lambda tup: tup[1]): + if len(loop.entries) != 1 or len(loop.exits) != 1: + if not config.DISABLE_PERFORMANCE_WARNINGS: + for entry in loop.entries: + for inst in blocks[entry].body: + # if prange or pndindex call + if ( + isinstance(inst, ir.Assign) + and isinstance(inst.value, ir.Expr) + and inst.value.op == "call" + and self._is_parallel_loop( + inst.value.func.name, call_table) + ): + msg = "\nprange or pndindex loop " \ + "will not be executed in " \ + "parallel due to there being more than one " \ + "entry to or exit from the loop (e.g., an " \ + "assertion)." + warnings.warn( + errors.NumbaPerformanceWarning( + msg, inst.loc)) + continue + + entry = list(loop.entries)[0] + for inst in blocks[entry].body: + # if prange or pndindex call + if (isinstance(inst, ir.Assign) + and isinstance(inst.value, ir.Expr) + and inst.value.op == 'call' + and self._is_parallel_loop(inst.value.func.name, call_table)): + # Here we've found a parallel loop, either prange or pndindex. + # We create a parfor from this loop and then overwrite the contents + # of the original loop header block to contain this parfor and then + # a jump to the original loop exit block. Other blocks in the + # original loop are discarded. + body_labels = [ l for l in loop.body if + l in blocks and l != loop.header ] + args = inst.value.args + loop_kind, loop_replacing = self._get_loop_kind(inst.value.func.name, + call_table) + # Get the body of the header of the loops minus the branch terminator + # The general approach is to prepend the header block to the first + # body block and then let dead code removal handle removing unneeded + # statements. Not all statements in the header block are unnecessary. + header_body = blocks[loop.header].body[:-1] + # find loop index variable (pair_first in header block) + loop_index = None + for hbi, stmt in enumerate(header_body): + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op == 'pair_first'): + loop_index = stmt.target.name + li_index = hbi + break + assert(loop_index is not None) + # Remove pair_first from header. + # We have to remove the pair_first by hand since it causes problems + # for some code below if we don't. + header_body = header_body[:li_index] + header_body[li_index+1:] + + # loop_index may be assigned to other vars + # get header copies to find all of them + cps, _ = get_block_copies({0: blocks[loop.header]}, + pass_states.typemap) + cps = cps[0] + loop_index_vars = set(t for t, v in cps if v == loop_index) + loop_index_vars.add(loop_index) + + scope = blocks[entry].scope + loc = inst.loc + equiv_set = pass_states.array_analysis.get_equiv_set(loop.header) + init_block = ir.Block(scope, loc) + init_block.body = self._get_prange_init_block(blocks[entry], + call_table, args) + loop_body = {l: blocks[l] for l in body_labels} + # Add an empty block to the end of loop body + end_label = next_label() + loop_body[end_label] = ir.Block(scope, loc) + + # Detect races in the prange. + # Races are defs in the parfor body that are live at the exit block. + bodydefs = set() + for bl in body_labels: + bodydefs = bodydefs.union(usedefs.defmap[bl]) + exit_lives = set() + for bl in loop.exits: + exit_lives = exit_lives.union(live_map[bl]) + races = bodydefs.intersection(exit_lives) + # It is possible for the result of an ir.Global to be flagged + # as a race if it is defined in this Parfor and then used in + # a subsequent Parfor. push_call_vars() in the Parfor pass + # copies such ir.Global nodes into the Parfors in which they + # are used so no need to treat things of type Module as a race. + races = races.intersection({x for x in races + if not isinstance(pass_states.typemap[x], types.misc.Module)}) + + # replace jumps to header block with the end block + for l in body_labels: + last_inst = loop_body[l].body[-1] + if (isinstance(last_inst, ir.Jump) and + last_inst.target == loop.header): + last_inst.target = end_label + + def find_indexed_arrays(): + """find expressions that involve getitem using the + index variable. Return both the arrays and expressions. + """ + indices = copy.copy(loop_index_vars) + for block in loop_body.values(): + for inst in block.find_insts(ir.Assign): + if (isinstance(inst.value, ir.Var) and + inst.value.name in indices): + indices.add(inst.target.name) + arrs = [] + exprs = [] + for block in loop_body.values(): + for inst in block.body: + lv = set(x.name for x in inst.list_vars()) + if lv & indices: + if lv.issubset(indices): + continue + require(isinstance(inst, ir.Assign)) + expr = inst.value + require(isinstance(expr, ir.Expr) and + expr.op in ['getitem', 'static_getitem']) + arrs.append(expr.value.name) + exprs.append(expr) + return arrs, exprs + + mask_var = None + mask_indices = None + def find_mask_from_size(size_var): + """Find the case where size_var is defined by A[M].shape, + where M is a boolean array. + """ + size_def = get_definition(pass_states.func_ir, size_var) + require(size_def and isinstance(size_def, ir.Expr) and + size_def.op == 'getattr' and size_def.attr == 'shape') + arr_var = size_def.value + live_vars = set.union(*[live_map[l] for l in loop.exits]) + index_arrs, index_exprs = find_indexed_arrays() + require([arr_var.name] == list(index_arrs)) + # input array has to be dead after loop + require(arr_var.name not in live_vars) + # loop for arr's definition, where size = arr.shape + arr_def = get_definition(pass_states.func_ir, size_def.value) + result = _find_mask(pass_states.typemap, pass_states.func_ir, arr_def) + + # The following is never tested. + raise AssertionError("unreachable") + # Found the mask. + # Replace B[i] with A[i], where B = A[M] + for expr in index_exprs: + expr.value = result[0] + return result + + # pndindex and prange are provably positive except when + # user provides negative start to prange() + unsigned_index = True + # TODO: support array mask optimization for prange + # TODO: refactor and simplify array mask optimization + if loop_kind == 'pndindex': + assert(equiv_set.has_shape(args[0])) + # see if input array to pndindex is output of array + # mask like B = A[M] + result = guard(find_mask_from_size, args[0]) + if result: + in_arr, mask_var, mask_typ, mask_indices = result + else: + in_arr = args[0] + assert(isinstance(in_arr, ir.Var)) + in_arr_typ = pass_states.typemap[in_arr.name] + if isinstance(in_arr_typ, types.Integer): + index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc) + pass_states.typemap[index_var.name] = types.uintp + loops = [LoopNest(index_var, 0, in_arr, 1)] + index_vars = [index_var] + else: + size_vars = equiv_set.get_shape(in_arr + if mask_indices is None else mask_var) + index_vars, loops = _mk_parfor_loops( + pass_states.typemap, size_vars, scope, loc, + ) + assert(len(loops) > 0) + orig_index = index_vars + if mask_indices: + # replace mask indices if required; + # integer indices of original array should be used + # instead of parfor indices + index_vars = tuple(x if x else index_vars[0] + for x in mask_indices) + first_body_block = loop_body[min(loop_body.keys())] + body_block = ir.Block(scope, loc) + index_var, index_var_typ = _make_index_var( + pass_states.typemap, scope, index_vars, body_block, + force_tuple=True + ) + body = body_block.body + first_body_block.body + first_body_block.body = body + if mask_indices: + orig_index_var = orig_index[0] + else: + orig_index_var = index_var + + # if masked array optimization is being applied, create + # the branch for array selection + if mask_var is not None: + # The following code are not tested + raise AssertionError("unreachable") + body_label = next_label() + # loop_body needs new labels greater than body_label + loop_body = add_offset_to_labels(loop_body, + body_label - min(loop_body.keys()) + 1) + labels = loop_body.keys() + true_label = min(labels) + false_label = max(labels) + body_block = ir.Block(scope, loc) + loop_body[body_label] = body_block + mask = ir.Var(scope, mk_unique_var("$mask_val"), loc) + pass_states.typemap[mask.name] = mask_typ + mask_val = ir.Expr.getitem(mask_var, orig_index_var, loc) + body_block.body.extend([ + ir.Assign(mask_val, mask, loc), + ir.Branch(mask, true_label, false_label, loc) + ]) + else: # prange + start = 0 + step = 1 + size_var = args[0] + if len(args) == 2: + start = args[0] + size_var = args[1] + if len(args) == 3: + start = args[0] + size_var = args[1] + try: + step = pass_states.func_ir.get_definition(args[2]) + except KeyError: + raise errors.UnsupportedRewriteError( + "Only known step size is supported for prange", + loc=inst.loc, + ) + if not isinstance(step, ir.Const): + raise errors.UnsupportedRewriteError( + "Only constant step size is supported for prange", + loc=inst.loc, + ) + step = step.value + if step != 1: + raise errors.UnsupportedRewriteError( + "Only constant step size of 1 is supported for prange", + loc=inst.loc, + ) + index_var = ir.Var(scope, mk_unique_var("parfor_index"), loc) + # assume user-provided start to prange can be negative + # this is the only case parfor can have negative index + if isinstance(start, int) and start >= 0: + index_var_typ = types.uintp + else: + index_var_typ = types.intp + unsigned_index = False + loops = [LoopNest(index_var, start, size_var, step)] + pass_states.typemap[index_var.name] = index_var_typ + + # We can't just drop the header block since there can be things + # in there other than the prange looping infrastructure. + # So we just add the header to the first loop body block (minus the + # branch) and let dead code elimination remove the unnecessary parts. + first_body_label = min(loop_body.keys()) + loop_body[first_body_label].body = header_body + loop_body[first_body_label].body + + index_var_map = {v: index_var for v in loop_index_vars} + replace_vars(loop_body, index_var_map) + if unsigned_index: + # need to replace signed array access indices to enable + # optimizations (see #2846) + self._replace_loop_access_indices( + loop_body, loop_index_vars, index_var) + parfor = Parfor(loops, init_block, loop_body, loc, + orig_index_var if mask_indices else index_var, + equiv_set, + ("prange", loop_kind, loop_replacing), + pass_states.flags, races=races) + + blocks[loop.header].body = [parfor] + # We have to insert the header_body after the parfor because in + # a Numba loop this will be executed one more times before the + # branch and may contain instructions such as variable renamings + # that are relied upon later. + blocks[loop.header].body.extend(header_body) + blocks[loop.header].body.append(ir.Jump(list(loop.exits)[0], loc)) + self.rewritten.append(dict( + old_loop=loop, + new=parfor, + reason='loop', + )) + # remove loop blocks from top level dict + for l in body_labels: + if l != loop.header: + blocks.pop(l) + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor from loop") + parfor.dump() + + def _is_parallel_loop(self, func_var, call_table): + # prange can be either getattr (numba.prange) or global (prange) + if func_var not in call_table: + return False + call = call_table[func_var] + return len(call) > 0 and (call[0] == 'prange' or call[0] == prange + or call[0] == 'internal_prange' or call[0] == internal_prange + or call[0] == 'pndindex' or call[0] == pndindex) + + def _get_loop_kind(self, func_var, call_table): + """see if prange is user prange or internal""" + pass_states = self.pass_states + # prange can be either getattr (numba.prange) or global (prange) + assert func_var in call_table + call = call_table[func_var] + assert len(call) > 0 + kind = 'user', '' + if call[0] == 'internal_prange' or call[0] == internal_prange: + try: + kind = 'internal', (pass_states.swapped_fns[func_var][0], pass_states.swapped_fns[func_var][-1]) + except KeyError: + # FIXME: Fix this issue... the code didn't manage to trace the + # swapout for func_var so set the kind as internal so that the + # transform can occur, it's just not tracked + kind = 'internal', ('', '') + elif call[0] == 'pndindex' or call[0] == pndindex: + kind = 'pndindex', '' + return kind + + def _get_prange_init_block(self, entry_block, call_table, prange_args): + """ + If there is init_prange, find the code between init_prange and prange + calls. Remove the code from entry_block and return it. + """ + init_call_ind = -1 + prange_call_ind = -1 + init_body = [] + for i, inst in enumerate(entry_block.body): + # if init_prange call + if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr) + and inst.value.op == 'call' + and self._is_prange_init(inst.value.func.name, call_table)): + init_call_ind = i + if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr) + and inst.value.op == 'call' + and self._is_parallel_loop(inst.value.func.name, call_table)): + prange_call_ind = i + if init_call_ind != -1 and prange_call_ind != -1: + # we save instructions that are used to calculate prange call args + # in the entry block. The rest go to parfor init_block + arg_related_vars = {v.name for v in prange_args} + saved_nodes = [] + for i in reversed(range(init_call_ind+1, prange_call_ind)): + inst = entry_block.body[i] + inst_vars = {v.name for v in inst.list_vars()} + if arg_related_vars & inst_vars: + arg_related_vars |= inst_vars + saved_nodes.append(inst) + else: + init_body.append(inst) + + init_body.reverse() + saved_nodes.reverse() + entry_block.body = (entry_block.body[:init_call_ind] + + saved_nodes + entry_block.body[prange_call_ind+1:]) + + return init_body + + def _is_prange_init(self, func_var, call_table): + if func_var not in call_table: + return False + call = call_table[func_var] + return len(call) > 0 and (call[0] == 'init_prange' or call[0] == init_prange) + + def _replace_loop_access_indices(self, loop_body, index_set, new_index): + """ + Replace array access indices in a loop body with a new index. + index_set has all the variables that are equivalent to loop index. + """ + # treat new index like others since replacing it with itself is ok + index_set.add(new_index.name) + + with dummy_return_in_loop_body(loop_body): + labels = find_topo_order(loop_body) + + first_label = labels[0] + added_indices = set() + + # traverse loop body and replace indices in getitem/setitem with + # new_index if possible. + # also, find equivalent indices defined in first block. + for l in labels: + block = loop_body[l] + for stmt in block.body: + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Var)): + # the first block dominates others so we can use copies + # of indices safely + if (l == first_label and stmt.value.name in index_set + and stmt.target.name not in index_set): + index_set.add(stmt.target.name) + added_indices.add(stmt.target.name) + # make sure parallel index is not overwritten + else: + scope = block.scope + + def unver(name): + from numba.core import errors + try: + return scope.get_exact(name).unversioned_name + except errors.NotDefinedError: + return name + + if unver(stmt.target.name) in map(unver, index_set) and unver(stmt.target.name) != unver(stmt.value.name): + raise errors.UnsupportedRewriteError( + "Overwrite of parallel loop index", + loc=stmt.target.loc, + ) + + + + if is_get_setitem(stmt): + index = index_var_of_get_setitem(stmt) + # statics can have none indices + if index is None: + continue + ind_def = guard(get_definition, self.pass_states.func_ir, + index, lhs_only=True) + if (index.name in index_set + or (ind_def is not None + and ind_def.name in index_set)): + set_index_var_of_get_setitem(stmt, new_index) + # corner case where one dimension of a multi-dim access + # should be replaced + guard(self._replace_multi_dim_ind, ind_def, index_set, + new_index) + + if isinstance(stmt, Parfor): + self._replace_loop_access_indices(stmt.loop_body, index_set, new_index) + + # remove added indices for correct recursive parfor handling + index_set -= added_indices + return + + def _replace_multi_dim_ind(self, ind_var, index_set, new_index): + """ + replace individual indices in multi-dimensional access variable, which + is a build_tuple + """ + pass_states = self.pass_states + require(ind_var is not None) + # check for Tuple instead of UniTuple since some dims could be slices + require(isinstance(pass_states.typemap[ind_var.name], + (types.Tuple, types.UniTuple))) + ind_def_node = get_definition(pass_states.func_ir, ind_var) + require(isinstance(ind_def_node, ir.Expr) + and ind_def_node.op == 'build_tuple') + ind_def_node.items = [new_index if v.name in index_set else v + for v in ind_def_node.items] + + +def _find_mask(typemap, func_ir, arr_def): + """check if an array is of B[...M...], where M is a + boolean array, and other indices (if available) are ints. + If found, return B, M, M's type, and a tuple representing mask indices. + Otherwise, raise GuardException. + """ + require(isinstance(arr_def, ir.Expr) and arr_def.op == 'getitem') + value = arr_def.value + index = arr_def.index + value_typ = typemap[value.name] + index_typ = typemap[index.name] + ndim = value_typ.ndim + require(isinstance(value_typ, types.npytypes.Array)) + if (isinstance(index_typ, types.npytypes.Array) and + isinstance(index_typ.dtype, types.Boolean) and + ndim == index_typ.ndim): + return value, index, index_typ.dtype, None + elif isinstance(index_typ, types.BaseTuple): + # Handle multi-dimension differently by requiring + # all indices to be constant except the one for mask. + seq, op = find_build_sequence(func_ir, index) + require(op == 'build_tuple' and len(seq) == ndim) + count_consts = 0 + mask_indices = [] + mask_var = None + for ind in seq: + index_typ = typemap[ind.name] + # Handle boolean mask + if (isinstance(index_typ, types.npytypes.Array) and + isinstance(index_typ.dtype, types.Boolean)): + mask_var = ind + mask_typ = index_typ.dtype + mask_indices.append(None) + # Handle integer array selector + elif (isinstance(index_typ, types.npytypes.Array) and + isinstance(index_typ.dtype, types.Integer)): + mask_var = ind + mask_typ = index_typ.dtype + mask_indices.append(None) + # Handle integer index + elif isinstance(index_typ, types.Integer): + count_consts += 1 + mask_indices.append(ind) + + require(mask_var and count_consts == ndim - 1) + return value, mask_var, mask_typ, mask_indices + raise GuardException + + +class ParforPass(ParforPassStates): + + """ParforPass class is responsible for converting NumPy + calls in Numba intermediate representation to Parfors, which + will lower into either sequential or parallel loops during lowering + stage. + """ + + def _pre_run(self): + # run array analysis, a pre-requisite for parfor translation + self.array_analysis.run(self.func_ir.blocks) + # NOTE: Prepare _the_max_label. See #6102 + ir_utils._the_max_label.update( + ir_utils.find_max_label(self.func_ir.blocks)) + + def run(self): + """run parfor conversion pass: replace Numpy calls + with Parfors when possible and optimize the IR.""" + self._pre_run() + # run stencil translation to parfor + if self.options.stencil: + stencil_pass = StencilPass(self.func_ir, self.typemap, + self.calltypes, self.array_analysis, + self.typingctx, self.targetctx, + self.flags) + stencil_pass.run() + if self.options.setitem: + ConvertSetItemPass(self).run(self.func_ir.blocks) + if self.options.numpy: + ConvertNumpyPass(self).run(self.func_ir.blocks) + if self.options.reduction: + ConvertReducePass(self).run(self.func_ir.blocks) + if self.options.prange: + ConvertLoopPass(self).run(self.func_ir.blocks) + if self.options.inplace_binop: + ConvertInplaceBinop(self).run(self.func_ir.blocks) + + # setup diagnostics now parfors are found + self.diagnostics.setup(self.func_ir, self.options.fusion) + + dprint_func_ir(self.func_ir, "after parfor pass") + + def _find_mask(self, arr_def): + """check if an array is of B[...M...], where M is a + boolean array, and other indices (if available) are ints. + If found, return B, M, M's type, and a tuple representing mask indices. + Otherwise, raise GuardException. + """ + return _find_mask(self.typemap, self.func_ir, arr_def) + + def _mk_parfor_loops(self, size_vars, scope, loc): + """ + Create loop index variables and build LoopNest objects for a parfor. + """ + return _mk_parfor_loops(self.typemap, size_vars, scope, loc) + + +class ParforFusionPass(ParforPassStates): + + """ParforFusionPass class is responsible for fusing parfors + """ + + def run(self): + """run parfor fusion pass""" + + # simplify CFG of parfor body loops since nested parfors with extra + # jumps can be created with prange conversion + n_parfors = simplify_parfor_body_CFG(self.func_ir.blocks) + # simplify before fusion + simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"]) + # need two rounds of copy propagation to enable fusion of long sequences + # of parfors like test_fuse_argmin (some PYTHONHASHSEED values since + # apply_copies_parfor depends on set order for creating dummy assigns) + simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"]) + + if self.options.fusion and n_parfors >= 2: + self.func_ir._definitions = build_definitions(self.func_ir.blocks) + self.array_analysis.equiv_sets = dict() + self.array_analysis.run(self.func_ir.blocks) + + # Get parfor params to calculate reductions below. + _, parfors = get_parfor_params(self.func_ir.blocks, + self.options.fusion, + self.nested_fusion_info) + + # Find reductions so that fusion can be disallowed if a + # subsequent parfor read a reduction variable. + for p in parfors: + p.redvars, p.reddict = get_parfor_reductions(self.func_ir, + p, + p.params, + self.calltypes) + + # reorder statements to maximize fusion + # push non-parfors down + maximize_fusion(self.func_ir, self.func_ir.blocks, self.typemap, + up_direction=False) + dprint_func_ir(self.func_ir, "after maximize fusion down") + self.fuse_parfors(self.array_analysis, + self.func_ir.blocks, + self.func_ir, + self.typemap) + dprint_func_ir(self.func_ir, "after first fuse") + # push non-parfors up + maximize_fusion(self.func_ir, self.func_ir.blocks, self.typemap) + dprint_func_ir(self.func_ir, "after maximize fusion up") + # try fuse again after maximize + self.fuse_parfors(self.array_analysis, + self.func_ir.blocks, + self.func_ir, + self.typemap) + dprint_func_ir(self.func_ir, "after fusion") + # remove dead code after fusion to remove extra arrays and variables + simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"]) + + def fuse_parfors(self, array_analysis, blocks, func_ir, typemap): + for label, block in blocks.items(): + equiv_set = array_analysis.get_equiv_set(label) + fusion_happened = True + while fusion_happened: + fusion_happened = False + new_body = [] + i = 0 + while i < len(block.body) - 1: + stmt = block.body[i] + next_stmt = block.body[i + 1] + if isinstance(stmt, Parfor) and isinstance(next_stmt, Parfor): + # we have to update equiv_set since they have changed due to + # variables being renamed before fusion. + equiv_set = array_analysis.get_equiv_set(label) + stmt.equiv_set = equiv_set + next_stmt.equiv_set = equiv_set + fused_node, fuse_report = try_fuse(equiv_set, stmt, next_stmt, + self.metadata["parfors"], func_ir, typemap) + # accumulate fusion reports + self.diagnostics.fusion_reports.append(fuse_report) + if fused_node is not None: + fusion_happened = True + self.diagnostics.fusion_info[stmt.id].extend([next_stmt.id]) + new_body.append(fused_node) + self.fuse_recursive_parfor(fused_node, equiv_set, func_ir, typemap) + i += 2 + continue + new_body.append(stmt) + if isinstance(stmt, Parfor): + self.fuse_recursive_parfor(stmt, equiv_set, func_ir, typemap) + i += 1 + new_body.append(block.body[-1]) + block.body = new_body + return + + def fuse_recursive_parfor(self, parfor, equiv_set, func_ir, typemap): + blocks = wrap_parfor_blocks(parfor) + maximize_fusion(self.func_ir, blocks, self.typemap) + dprint_func_ir(self.func_ir, "after recursive maximize fusion down", blocks) + arr_analysis = array_analysis.ArrayAnalysis(self.typingctx, self.func_ir, + self.typemap, self.calltypes) + arr_analysis.run(blocks, equiv_set) + self.fuse_parfors(arr_analysis, blocks, func_ir, typemap) + unwrap_parfor_blocks(parfor) + + +class ParforPreLoweringPass(ParforPassStates): + + """ParforPreLoweringPass class is responsible for preparing parfors for lowering. + """ + + def run(self): + """run parfor prelowering pass""" + + # push function call variables inside parfors so gufunc function + # wouldn't need function variables as argument + push_call_vars(self.func_ir.blocks, {}, {}, self.typemap) + dprint_func_ir(self.func_ir, "after push call vars") + # simplify again + simplify(self.func_ir, self.typemap, self.calltypes, self.metadata["parfors"]) + dprint_func_ir(self.func_ir, "after optimization") + if config.DEBUG_ARRAY_OPT >= 1: + print("variable types: ", sorted(self.typemap.items())) + print("call types: ", self.calltypes) + + if config.DEBUG_ARRAY_OPT >= 3: + for(block_label, block) in self.func_ir.blocks.items(): + new_block = [] + scope = block.scope + for stmt in block.body: + new_block.append(stmt) + if isinstance(stmt, ir.Assign): + loc = stmt.loc + lhs = stmt.target + rhs = stmt.value + lhs_typ = self.typemap[lhs.name] + print("Adding print for assignment to ", lhs.name, lhs_typ, type(lhs_typ)) + if lhs_typ in types.number_domain or isinstance(lhs_typ, types.Literal): + str_var = ir.Var(scope, mk_unique_var("str_var"), loc) + self.typemap[str_var.name] = types.StringLiteral(lhs.name) + lhs_const = ir.Const(lhs.name, loc) + str_assign = ir.Assign(lhs_const, str_var, loc) + new_block.append(str_assign) + str_print = ir.Print([str_var], None, loc) + self.calltypes[str_print] = signature(types.none, self.typemap[str_var.name]) + new_block.append(str_print) + ir_print = ir.Print([lhs], None, loc) + self.calltypes[ir_print] = signature(types.none, lhs_typ) + new_block.append(ir_print) + block.body = new_block + + if self.func_ir.is_generator: + fix_generator_types(self.func_ir.generator_info, self.return_type, + self.typemap) + if sequential_parfor_lowering: + lower_parfor_sequential( + self.typingctx, self.func_ir, self.typemap, self.calltypes, self.metadata) + else: + # prepare for parallel lowering + # add parfor params to parfors here since lowering is destructive + # changing the IR after this is not allowed + parfor_ids, parfors = get_parfor_params(self.func_ir.blocks, + self.options.fusion, + self.nested_fusion_info) + + # Validate reduction in parfors. + for p in parfors: + p.redvars, p.reddict = get_parfor_reductions(self.func_ir, + p, + p.params, + self.calltypes) + + # Validate parameters: + for p in parfors: + p.validate_params(self.typemap) + + if config.DEBUG_ARRAY_OPT_STATS: + name = self.func_ir.func_id.func_qualname + n_parfors = len(parfor_ids) + if n_parfors > 0: + after_fusion = ("After fusion" if self.options.fusion + else "With fusion disabled") + print(('{}, function {} has ' + '{} parallel for-loop(s) #{}.').format( + after_fusion, name, n_parfors, parfor_ids)) + else: + print('Function {} has no Parfor.'.format(name)) + + +def _remove_size_arg(call_name, expr): + "remove size argument from args or kws" + # remove size kwarg + kws = dict(expr.kws) + kws.pop('size', '') + expr.kws = tuple(kws.items()) + + # remove size arg if available + if call_name in random_1arg_size + random_int_args: + # these calls have only a "size" argument or list of ints + # so remove all args + expr.args = [] + + if call_name in random_3arg_sizelast: + # normal, uniform, ... have 3 args, last one is size + if len(expr.args) == 3: + expr.args.pop() + + if call_name in random_2arg_sizelast: + # have 2 args, last one is size + if len(expr.args) == 2: + expr.args.pop() + + if call_name == 'randint': + # has 4 args, 3rd one is size + if len(expr.args) == 3: + expr.args.pop() + if len(expr.args) == 4: + dt_arg = expr.args.pop() + expr.args.pop() # remove size + expr.args.append(dt_arg) + + if call_name == 'triangular': + # has 4 args, last one is size + if len(expr.args) == 4: + expr.args.pop() + + return + + +def _get_call_arg_types(expr, typemap): + new_arg_typs = [] + for arg in expr.args: + new_arg_typs.append(typemap[arg.name]) + + new_kw_types = {} + for name, arg in expr.kws: + new_kw_types[name] = typemap[arg.name] + + return tuple(new_arg_typs), new_kw_types + + +def _arrayexpr_tree_to_ir( + func_ir, + typingctx, + typemap, + calltypes, + equiv_set, + init_block, + expr_out_var, + expr, + parfor_index_tuple_var, + all_parfor_indices, + avail_vars): + """generate IR from array_expr's expr tree recursively. Assign output to + expr_out_var and returns the whole IR as a list of Assign nodes. + """ + el_typ = typemap[expr_out_var.name] + scope = expr_out_var.scope + loc = expr_out_var.loc + out_ir = [] + + if isinstance(expr, tuple): + op, arr_expr_args = expr + arg_vars = [] + for arg in arr_expr_args: + arg_out_var = ir.Var(scope, mk_unique_var("$arg_out_var"), loc) + typemap[arg_out_var.name] = el_typ + out_ir += _arrayexpr_tree_to_ir(func_ir, + typingctx, + typemap, + calltypes, + equiv_set, + init_block, + arg_out_var, + arg, + parfor_index_tuple_var, + all_parfor_indices, + avail_vars) + arg_vars.append(arg_out_var) + if op in npydecl.supported_array_operators: + el_typ1 = typemap[arg_vars[0].name] + if len(arg_vars) == 2: + el_typ2 = typemap[arg_vars[1].name] + func_typ = typingctx.resolve_function_type(op, (el_typ1, + el_typ2), {}) + ir_expr = ir.Expr.binop(op, arg_vars[0], arg_vars[1], loc) + if op == operator.truediv: + func_typ, ir_expr = _gen_np_divide( + arg_vars[0], arg_vars[1], out_ir, typemap) + else: + func_typ = typingctx.resolve_function_type(op, (el_typ1,), {}) + ir_expr = ir.Expr.unary(op, arg_vars[0], loc) + calltypes[ir_expr] = func_typ + el_typ = func_typ.return_type + out_ir.append(ir.Assign(ir_expr, expr_out_var, loc)) + for T in array_analysis.MAP_TYPES: + if isinstance(op, T): + # elif isinstance(op, (np.ufunc, DUFunc)): + # function calls are stored in variables which are not removed + # op is typing_key to the variables type + func_var_name = _find_func_var(typemap, op, avail_vars, loc=loc) + func_var = ir.Var(scope, mk_unique_var(func_var_name), loc) + typemap[func_var.name] = typemap[func_var_name] + func_var_def = copy.deepcopy(func_ir.get_definition(func_var_name)) + if isinstance(func_var_def, ir.Expr) and func_var_def.op == 'getattr' and func_var_def.attr == 'sqrt': + g_math_var = ir.Var(scope, mk_unique_var("$math_g_var"), loc) + typemap[g_math_var.name] = types.misc.Module(math) + g_math = ir.Global('math', math, loc) + g_math_assign = ir.Assign(g_math, g_math_var, loc) + func_var_def = ir.Expr.getattr(g_math_var, 'sqrt', loc) + out_ir.append(g_math_assign) +# out_ir.append(func_var_def) + ir_expr = ir.Expr.call(func_var, arg_vars, (), loc) + call_typ = typemap[func_var.name].get_call_type( + typingctx, tuple(typemap[a.name] for a in arg_vars), {}) + calltypes[ir_expr] = call_typ + el_typ = call_typ.return_type + #signature(el_typ, el_typ) + out_ir.append(ir.Assign(func_var_def, func_var, loc)) + out_ir.append(ir.Assign(ir_expr, expr_out_var, loc)) + elif isinstance(expr, ir.Var): + var_typ = typemap[expr.name] + if isinstance(var_typ, types.Array): + el_typ = var_typ.dtype + ir_expr = _gen_arrayexpr_getitem( + equiv_set, + expr, + parfor_index_tuple_var, + all_parfor_indices, + el_typ, + calltypes, + typingctx, + typemap, + init_block, + out_ir) + else: + # assert typemap[expr.name]==el_typ + el_typ = var_typ + ir_expr = expr + out_ir.append(ir.Assign(ir_expr, expr_out_var, loc)) + elif isinstance(expr, ir.Const): + el_typ = typing.Context().resolve_value_type(expr.value) + out_ir.append(ir.Assign(expr, expr_out_var, loc)) + + if len(out_ir) == 0: + raise errors.UnsupportedRewriteError( + f"Don't know how to translate array expression '{expr:r}'", + loc=expr.loc, + ) + typemap.pop(expr_out_var.name, None) + typemap[expr_out_var.name] = el_typ + return out_ir + + +def _gen_np_divide(arg1, arg2, out_ir, typemap): + """generate np.divide() instead of / for array_expr to get numpy error model + like inf for division by zero (test_division_by_zero). + """ + scope = arg1.scope + loc = arg1.loc + # g_np_var = Global(numpy) + g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc) + typemap[g_np_var.name] = types.misc.Module(numpy) + g_np = ir.Global('np', numpy, loc) + g_np_assign = ir.Assign(g_np, g_np_var, loc) + # attr call: div_attr = getattr(g_np_var, divide) + div_attr_call = ir.Expr.getattr(g_np_var, "divide", loc) + attr_var = ir.Var(scope, mk_unique_var("$div_attr"), loc) + func_var_typ = get_np_ufunc_typ(numpy.divide) + typemap[attr_var.name] = func_var_typ + attr_assign = ir.Assign(div_attr_call, attr_var, loc) + # divide call: div_attr(arg1, arg2) + div_call = ir.Expr.call(attr_var, [arg1, arg2], (), loc) + func_typ = func_var_typ.get_call_type( + typing.Context(), [typemap[arg1.name], typemap[arg2.name]], {}) + out_ir.extend([g_np_assign, attr_assign]) + return func_typ, div_call + + +def _gen_arrayexpr_getitem( + equiv_set, + var, + parfor_index_tuple_var, + all_parfor_indices, + el_typ, + calltypes, + typingctx, + typemap, + init_block, + out_ir): + """if there is implicit dimension broadcast, generate proper access variable + for getitem. For example, if indices are (i1,i2,i3) but shape is (c1,0,c3), + generate a tuple with (i1,0,i3) for access. Another example: for (i1,i2,i3) + and (c1,c2) generate (i2,i3). + """ + loc = var.loc + index_var = parfor_index_tuple_var + var_typ = typemap[var.name] + ndims = typemap[var.name].ndim + num_indices = len(all_parfor_indices) + size_vars = equiv_set.get_shape(var) or [] + size_consts = [equiv_set.get_equiv_const(x) for x in size_vars] + # Handle array-scalar + if ndims == 0: + # call np.ravel + ravel_var = ir.Var(var.scope, mk_unique_var("$ravel"), loc) + ravel_typ = types.npytypes.Array(dtype=var_typ.dtype, ndim=1, layout='C') + typemap[ravel_var.name] = ravel_typ + stmts = ir_utils.gen_np_call('ravel', numpy.ravel, ravel_var, [var], typingctx, typemap, calltypes) + init_block.body.extend(stmts) + var = ravel_var + # Const(0) + const_node = ir.Const(0, var.loc) + const_var = ir.Var(var.scope, mk_unique_var("$const_ind_0"), loc) + typemap[const_var.name] = types.uintp + const_assign = ir.Assign(const_node, const_var, loc) + out_ir.append(const_assign) + index_var = const_var + # Handle 1d array + elif ndims == 1: + # Use last index for 1D arrays + index_var = all_parfor_indices[-1] + # Handle known constant size + elif any([x is not None for x in size_consts]): + # Need a tuple as index + ind_offset = num_indices - ndims + tuple_var = ir.Var(var.scope, mk_unique_var( + "$parfor_index_tuple_var_bcast"), loc) + typemap[tuple_var.name] = types.containers.UniTuple(types.uintp, ndims) + # Just in case, const var for size 1 dim access index: $const0 = + # Const(0) + const_node = ir.Const(0, var.loc) + const_var = ir.Var(var.scope, mk_unique_var("$const_ind_0"), loc) + typemap[const_var.name] = types.uintp + const_assign = ir.Assign(const_node, const_var, loc) + out_ir.append(const_assign) + index_vars = [] + for i in reversed(range(ndims)): + size_var = size_vars[i] + size_const = size_consts[i] + if size_const == 1: + index_vars.append(const_var) + else: + index_vars.append(all_parfor_indices[ind_offset + i]) + index_vars = list(reversed(index_vars)) + tuple_call = ir.Expr.build_tuple(index_vars, loc) + tuple_assign = ir.Assign(tuple_call, tuple_var, loc) + out_ir.append(tuple_assign) + index_var = tuple_var + + ir_expr = ir.Expr.getitem(var, index_var, loc) + calltypes[ir_expr] = signature(el_typ, typemap[var.name], + typemap[index_var.name]) + return ir_expr + + +def _find_func_var(typemap, func, avail_vars, loc): + """find variable in typemap which represents the function func. + """ + for v in avail_vars: + t = typemap[v] + # Function types store actual functions in typing_key. + if isinstance(t, Function) and t.typing_key == func: + return v + raise errors.UnsupportedRewriteError("ufunc call variable not found", loc=loc) + + +def lower_parfor_sequential(typingctx, func_ir, typemap, calltypes, metadata): + ir_utils._the_max_label.update(ir_utils.find_max_label(func_ir.blocks)) + parfor_found = False + new_blocks = {} + scope = next(iter(func_ir.blocks.values())).scope + for (block_label, block) in func_ir.blocks.items(): + block_label, parfor_found = _lower_parfor_sequential_block( + block_label, block, new_blocks, typemap, calltypes, parfor_found, + scope=scope) + # old block stays either way + new_blocks[block_label] = block + func_ir.blocks = new_blocks + # rename only if parfor found and replaced (avoid test_flow_control error) + if parfor_found: + func_ir.blocks = rename_labels(func_ir.blocks) + dprint_func_ir(func_ir, "after parfor sequential lowering") + simplify(func_ir, typemap, calltypes, metadata["parfors"]) + dprint_func_ir(func_ir, "after parfor sequential simplify") + + +def _lower_parfor_sequential_block( + block_label, + block, + new_blocks, + typemap, + calltypes, + parfor_found, + scope): + i = _find_first_parfor(block.body) + while i != -1: + parfor_found = True + inst = block.body[i] + loc = inst.init_block.loc + # split block across parfor + prev_block = ir.Block(scope, loc) + prev_block.body = block.body[:i] + block.body = block.body[i + 1:] + # previous block jump to parfor init block + init_label = next_label() + prev_block.body.append(ir.Jump(init_label, loc)) + new_blocks[init_label] = transfer_scope(inst.init_block, scope) + new_blocks[block_label] = prev_block + block_label = next_label() + + ndims = len(inst.loop_nests) + for i in range(ndims): + loopnest = inst.loop_nests[i] + # create range block for loop + range_label = next_label() + header_label = next_label() + range_block = mk_range_block( + typemap, + loopnest.start, + loopnest.stop, + loopnest.step, + calltypes, + scope, + loc) + range_block.body[-1].target = header_label # fix jump target + phi_var = range_block.body[-2].target + new_blocks[range_label] = range_block + header_block = mk_loop_header(typemap, phi_var, calltypes, + scope, loc) + header_block.body[-2].target = loopnest.index_variable + new_blocks[header_label] = header_block + # jump to this new inner loop + if i == 0: + inst.init_block.body.append(ir.Jump(range_label, loc)) + header_block.body[-1].falsebr = block_label + else: + new_blocks[prev_header_label].body[-1].truebr = range_label + header_block.body[-1].falsebr = prev_header_label + prev_header_label = header_label # to set truebr next loop + + # last body block jump to inner most header + body_last_label = max(inst.loop_body.keys()) + inst.loop_body[body_last_label].body.append( + ir.Jump(header_label, loc)) + # inner most header jumps to first body block + body_first_label = min(inst.loop_body.keys()) + header_block.body[-1].truebr = body_first_label + # add parfor body to blocks + for (l, b) in inst.loop_body.items(): + l, parfor_found = _lower_parfor_sequential_block( + l, b, new_blocks, typemap, calltypes, parfor_found, + scope=scope) + new_blocks[l] = transfer_scope(b, scope) + i = _find_first_parfor(block.body) + return block_label, parfor_found + + +def _find_first_parfor(body): + for (i, inst) in enumerate(body): + if isinstance(inst, Parfor) and not inst.no_sequential_lowering: + return i + return -1 + + +def get_parfor_params(blocks, options_fusion, fusion_info): + """find variables used in body of parfors from outside and save them. + computed as live variables at entry of first block. + """ + + # since parfor wrap creates a back-edge to first non-init basic block, + # live_map[first_non_init_block] contains variables defined in parfor body + # that could be undefined before. So we only consider variables that are + # actually defined before the parfor body in the program. + parfor_ids = set() + parfors = [] + pre_defs = set() + _, all_defs = compute_use_defs(blocks) + topo_order = find_topo_order(blocks) + for label in topo_order: + block = blocks[label] + for i, parfor in _find_parfors(block.body): + # find variable defs before the parfor in the same block + dummy_block = ir.Block(block.scope, block.loc) + dummy_block.body = block.body[:i] + before_defs = compute_use_defs({0: dummy_block}).defmap[0] + pre_defs |= before_defs + params = get_parfor_params_inner( + parfor, pre_defs, options_fusion, fusion_info, + ) + parfor.params, parfor.races = _combine_params_races_for_ssa_names( + block.scope, params, parfor.races, + ) + parfor_ids.add(parfor.id) + parfors.append(parfor) + + pre_defs |= all_defs[label] + return parfor_ids, parfors + + +def _combine_params_races_for_ssa_names(scope, params, races): + """Returns `(params|races1, races1)`, where `races1` contains all variables + in `races` are NOT referring to the same unversioned (SSA) variables in + `params`. + """ + def unversion(k): + try: + return scope.get_exact(k).unversioned_name + except ir.NotDefinedError: + # XXX: it's a bug that something references an undefined name + return k + + races1 = set(races) + unver_params = list(map(unversion, params)) + + for rv in races: + if any(unversion(rv) == pv for pv in unver_params): + races1.discard(rv) + else: + break + + return params | races1, races1 + + +def get_parfor_params_inner(parfor, pre_defs, options_fusion, fusion_info): + blocks = wrap_parfor_blocks(parfor) + cfg = compute_cfg_from_blocks(blocks) + usedefs = compute_use_defs(blocks) + live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap) + parfor_ids, _ = get_parfor_params(blocks, options_fusion, fusion_info) + n_parfors = len(parfor_ids) + if n_parfors > 0: + if config.DEBUG_ARRAY_OPT_STATS: + after_fusion = ("After fusion" if options_fusion + else "With fusion disabled") + print(('{}, parallel for-loop {} has ' + 'nested Parfor(s) #{}.').format( + after_fusion, parfor.id, n_parfors, parfor_ids)) + fusion_info[parfor.id] = list(parfor_ids) + + unwrap_parfor_blocks(parfor) + keylist = sorted(live_map.keys()) + init_block = keylist[0] + first_non_init_block = keylist[1] + + before_defs = usedefs.defmap[init_block] | pre_defs + params = live_map[first_non_init_block] & before_defs + return params + + +def _find_parfors(body): + for i, inst in enumerate(body): + if isinstance(inst, Parfor): + yield i, inst + + +def _is_indirect_index(func_ir, index, nest_indices): + index_def = guard(get_definition, func_ir, index.name) + if isinstance(index_def, ir.Expr) and index_def.op == 'build_tuple': + if [x.name for x in index_def.items] == [x.name for x in nest_indices]: + return True + + return False + + +def get_array_indexed_with_parfor_index_internal(loop_body, + index, + ret_indexed, + ret_not_indexed, + nest_indices, + func_ir): + for blk in loop_body: + for stmt in blk.body: + if isinstance(stmt, (ir.StaticSetItem, ir.SetItem)): + setarray_index = get_index_var(stmt) + if (isinstance(setarray_index, ir.Var) and + (setarray_index.name == index or + _is_indirect_index( + func_ir, + setarray_index, + nest_indices))): + ret_indexed.add(stmt.target.name) + else: + ret_not_indexed.add(stmt.target.name) + elif (isinstance(stmt, ir.Assign) and + isinstance(stmt.value, ir.Expr) and + stmt.value.op in ['getitem', 'static_getitem']): + getarray_index = stmt.value.index + getarray_name = stmt.value.value.name + if (isinstance(getarray_index, ir.Var) and + (getarray_index.name == index or + _is_indirect_index( + func_ir, + getarray_index, + nest_indices))): + ret_indexed.add(getarray_name) + else: + ret_not_indexed.add(getarray_name) + elif isinstance(stmt, Parfor): + get_array_indexed_with_parfor_index_internal( + stmt.loop_body.values(), + index, + ret_indexed, + ret_not_indexed, + nest_indices, + func_ir) + + +def get_array_indexed_with_parfor_index(loop_body, + index, + nest_indices, + func_ir): + ret_indexed = set() + ret_not_indexed = set() + get_array_indexed_with_parfor_index_internal( + loop_body, + index, + ret_indexed, + ret_not_indexed, + nest_indices, + func_ir) + return ret_indexed, ret_not_indexed + + +def get_parfor_outputs(parfor, parfor_params): + """get arrays that are written to inside the parfor and need to be passed + as parameters to gufunc. + """ + # FIXME: The following assumes the target of all SetItem are outputs, + # which is wrong! + last_label = max(parfor.loop_body.keys()) + outputs = [] + for blk in parfor.loop_body.values(): + for stmt in blk.body: + if (isinstance(stmt, (ir.StaticSetItem, ir.SetItem)) and + get_index_var(stmt).name == parfor.index_var.name): + outputs.append(stmt.target.name) + # make sure these written arrays are in parfor parameters (live coming in) + outputs = list(set(outputs) & set(parfor_params)) + return sorted(outputs) + + +_RedVarInfo = make_dataclass( + "_RedVarInfo", + ["init_val", "reduce_nodes", "redop"], + frozen=True, +) + + +def get_parfor_reductions(func_ir, parfor, parfor_params, calltypes, reductions=None, + reduce_varnames=None, param_uses=None, param_nodes=None, + var_to_param=None): + """find variables that are updated using their previous values and an array + item accessed with parfor index, e.g. s = s+A[i] + """ + if reductions is None: + reductions = {} + if reduce_varnames is None: + reduce_varnames = [] + + # for each param variable, find what other variables are used to update it + # also, keep the related nodes + if param_uses is None: + param_uses = defaultdict(list) + if param_nodes is None: + param_nodes = defaultdict(list) + if var_to_param is None: + var_to_param = {} + + blocks = wrap_parfor_blocks(parfor) + topo_order = find_topo_order(blocks) + topo_order = topo_order[1:] # ignore init block + unwrap_parfor_blocks(parfor) + + for label in reversed(topo_order): + for stmt in reversed(parfor.loop_body[label].body): + if (isinstance(stmt, ir.Assign) + and (stmt.target.name in parfor_params + or stmt.target.name in var_to_param)): + lhs = stmt.target + rhs = stmt.value + cur_param = lhs if lhs.name in parfor_params else var_to_param[lhs.name] + used_vars = [] + if isinstance(rhs, ir.Var): + used_vars = [rhs.name] + elif isinstance(rhs, ir.Expr): + used_vars = [v.name for v in stmt.value.list_vars()] + param_uses[cur_param].extend(used_vars) + for v in used_vars: + var_to_param[v] = cur_param + # save copy of dependent stmt + stmt_cp = copy.deepcopy(stmt) + if stmt.value in calltypes: + calltypes[stmt_cp.value] = calltypes[stmt.value] + param_nodes[cur_param].append(stmt_cp) + if isinstance(stmt, Parfor): + # recursive parfors can have reductions like test_prange8 + get_parfor_reductions(func_ir, stmt, parfor_params, calltypes, + reductions, reduce_varnames, None, param_nodes, var_to_param) + + for param, used_vars in param_uses.items(): + # a parameter is a reduction variable if its value is used to update it + # check reduce_varnames since recursive parfors might have processed + # param already + param_name = param.name + if param_name in used_vars and param_name not in reduce_varnames: + param_nodes[param].reverse() + reduce_nodes = get_reduce_nodes(param, param_nodes[param], func_ir) + # Certain kinds of ill-formed Python (like potentially undefined + # variables) in combination with SSA can make things look like + # reductions except that they don't have reduction operators. + # If we get to this point but don't find a reduction operator + # then assume it is this situation and just don't treat this + # variable as a reduction. + if reduce_nodes is not None: + reduce_varnames.append(param_name) + check_conflicting_reduction_operators(param, reduce_nodes) + gri_out = guard(get_reduction_init, reduce_nodes) + if gri_out is not None: + init_val, redop = gri_out + else: + init_val = None + redop = None + reductions[param_name] = _RedVarInfo( + init_val=init_val, + reduce_nodes=reduce_nodes, + redop=redop, + ) + + return reduce_varnames, reductions + +def check_conflicting_reduction_operators(param, nodes): + """In prange, a user could theoretically specify conflicting + reduction operators. For example, in one spot it is += and + another spot *=. Here, we raise an exception if multiple + different reduction operators are used in one prange. + """ + first_red_func = None + for node in nodes: + if (isinstance(node, ir.Assign) and + isinstance(node.value, ir.Expr) and + node.value.op=='inplace_binop'): + if first_red_func is None: + first_red_func = node.value.fn + else: + if first_red_func != node.value.fn: + msg = ("Reduction variable %s has multiple conflicting " + "reduction operators." % param.unversioned_name) + raise errors.UnsupportedRewriteError(msg, node.loc) + +def get_reduction_init(nodes): + """ + Get initial value for known reductions. + Currently, only += and *= are supported. + """ + require(len(nodes) >= 1) + # there could be multiple extra assignments after the reduce node + # See: test_reduction_var_reuse + acc_expr = list(filter(lambda x: isinstance(x.value, ir.Expr), nodes))[-1].value + require(isinstance(acc_expr, ir.Expr) and acc_expr.op in ['inplace_binop', 'binop']) + acc_expr_fn = acc_expr.fn + if acc_expr.op == 'binop': + if acc_expr_fn == operator.add: + acc_expr_fn = operator.iadd + elif acc_expr_fn == operator.sub: + acc_expr_fn = operator.isub + elif acc_expr_fn == operator.mul: + acc_expr_fn = operator.imul + elif acc_expr_fn == operator.truediv: + acc_expr_fn = operator.itruediv + if acc_expr_fn == operator.iadd or acc_expr_fn == operator.isub: + return 0, acc_expr_fn + if ( acc_expr_fn == operator.imul + or acc_expr_fn == operator.itruediv ): + return 1, acc_expr_fn + return None, None + +def supported_reduction(x, func_ir): + if x.op == 'inplace_binop' or x.op == 'binop': + if x.fn == operator.ifloordiv or x.fn == operator.floordiv: + raise errors.NumbaValueError(("Parallel floordiv reductions are not supported. " + "If all divisors are integers then a floordiv " + "reduction can in some cases be parallelized as " + "a multiply reduction followed by a floordiv of " + "the resulting product."), x.loc) + supps = [operator.iadd, + operator.isub, + operator.imul, + operator.itruediv, + operator.add, + operator.sub, + operator.mul, + operator.truediv] + return x.fn in supps + if x.op == 'call': + callname = guard(find_callname, func_ir, x) + if callname in [ + ('max', 'builtins'), + ('min', 'builtins'), + ('datetime_minimum', 'numba.np.npdatetime_helpers'), + ('datetime_maximum', 'numba.np.npdatetime_helpers'), + ]: + return True + return False + +def get_reduce_nodes(reduction_node, nodes, func_ir): + """ + Get nodes that combine the reduction variable with a sentinel variable. + Recognizes the first node that combines the reduction variable with another + variable. + """ + reduce_nodes = None + defs = {} + + def cyclic_lookup(var, varonly=True, start=None): + """Lookup definition of ``var``. + Returns ``None`` if variable definition forms a cycle. + """ + lookedup_var = defs.get(var.name, None) + if isinstance(lookedup_var, ir.Var): + if start is None: + start = lookedup_var + elif start == lookedup_var: + # cycle detected + return None + return cyclic_lookup(lookedup_var, start=start) + else: + return var if (varonly or lookedup_var is None) else lookedup_var + + def noncyclic_lookup(*args, **kwargs): + """Similar to cyclic_lookup but raise AssertionError if a cycle is + detected. + """ + res = cyclic_lookup(*args, **kwargs) + if res is None: + raise AssertionError("unexpected cycle in lookup()") + return res + + name = reduction_node.name + unversioned_name = reduction_node.unversioned_name + for i, stmt in enumerate(nodes): + lhs = stmt.target + rhs = stmt.value + defs[lhs.name] = rhs + if isinstance(rhs, ir.Var) and rhs.name in defs: + rhs = cyclic_lookup(rhs) + if isinstance(rhs, ir.Expr): + in_vars = set(noncyclic_lookup(v, True).name + for v in rhs.list_vars()) + if name in in_vars: + # reductions like sum have an assignment afterwards + # e.g. $2 = a + $1; a = $2 + # reductions that are functions calls like max() don't have an + # extra assignment afterwards + + # This code was created when Numba had an IR generation strategy + # where a binop for a reduction would be followed by an + # assignment as follows: + #$c.4.15 = inplace_binop(fn=, ...>, lhs=c.3, rhs=$const20) + #c.4 = $c.4.15 + + # With Python 3.12 changes, Numba may separate that assignment + # to a new basic block. The code below looks and sees if an + # assignment to the reduction var follows the reduction operator + # and if not it searches the rest of the reduction nodes to find + # the assignment that should follow the reduction operator + # and then reorders the reduction nodes so that assignment + # follows the reduction operator. + if (i + 1 < len(nodes) and + ((not isinstance(nodes[i + 1], ir.Assign)) or + nodes[i + 1].target.unversioned_name != unversioned_name)): + foundj = None + # Iterate through the rest of the reduction nodes. + for j, jstmt in enumerate(nodes[i + 1:]): + # If this stmt is an assignment where the right-hand + # side of the assignment is the output of the reduction + # operator. + if isinstance(jstmt, ir.Assign) and jstmt.value == lhs: + # Remember the index of this node. Because of + # nodes[i+1] above, we have to add i + 1 to j below + # to get the index in the original nodes list. + foundj = i + j + 1 + break + if foundj is not None: + # If we found the correct assignment then move it to + # after the reduction operator. + nodes = (nodes[:i + 1] + # nodes up to operator + nodes[foundj:foundj + 1] + # assignment node + nodes[i + 1:foundj] + # between op and assign + nodes[foundj + 1:]) # after assignment node + + if (not (i+1 < len(nodes) and isinstance(nodes[i+1], ir.Assign) + and nodes[i+1].target.unversioned_name == unversioned_name) + and lhs.unversioned_name != unversioned_name): + raise ValueError( + f"Use of reduction variable {unversioned_name!r} other " + "than in a supported reduction function is not " + "permitted." + ) + + if not supported_reduction(rhs, func_ir): + raise ValueError(("Use of reduction variable " + unversioned_name + + " in an unsupported reduction function.")) + args = [(x.name, noncyclic_lookup(x, True)) + for x in get_expr_args(rhs) ] + non_red_args = [ x for (x, y) in args if y.name != name ] + assert len(non_red_args) == 1 + args = [ (x, y) for (x, y) in args if x != y.name ] + replace_dict = dict(args) + replace_dict[non_red_args[0]] = ir.Var(lhs.scope, name+"#init", lhs.loc) + replace_vars_inner(rhs, replace_dict) + reduce_nodes = nodes[i:] + break + return reduce_nodes + +def get_expr_args(expr): + """ + Get arguments of an expression node + """ + if expr.op in ['binop', 'inplace_binop']: + return [expr.lhs, expr.rhs] + if expr.op == 'call': + return [v for v in expr.args] + raise NotImplementedError("get arguments for expression {}".format(expr)) + +def visit_parfor_pattern_vars(parfor, callback, cbdata): + # currently, only stencil pattern has variables + for pattern in parfor.patterns: + if pattern[0] == 'stencil': + left_lengths = pattern[1][0] + for i in range(len(left_lengths)): + if isinstance(left_lengths[i], ir.Var): + left_lengths[i] = visit_vars_inner(left_lengths[i], + callback, cbdata) + right_lengths = pattern[1][1] + for i in range(len(right_lengths)): + if isinstance(right_lengths[i], ir.Var): + right_lengths[i] = visit_vars_inner(right_lengths[i], + callback, cbdata) + +def visit_vars_parfor(parfor, callback, cbdata): + if config.DEBUG_ARRAY_OPT >= 1: + print("visiting parfor vars for:", parfor) + print("cbdata: ", sorted(cbdata.items())) + for l in parfor.loop_nests: + l.index_variable = visit_vars_inner(l.index_variable, callback, cbdata) + if isinstance(l.start, ir.Var): + l.start = visit_vars_inner(l.start, callback, cbdata) + if isinstance(l.stop, ir.Var): + l.stop = visit_vars_inner(l.stop, callback, cbdata) + if isinstance(l.step, ir.Var): + l.step = visit_vars_inner(l.step, callback, cbdata) + visit_vars({-1: parfor.init_block}, callback, cbdata) + visit_parfor_pattern_vars(parfor, callback, cbdata) + visit_vars(parfor.loop_body, callback, cbdata) + return + + +# add call to visit parfor variable +ir_utils.visit_vars_extensions[Parfor] = visit_vars_parfor + + +def parfor_defs(parfor, use_set=None, def_set=None): + """list variables written in this parfor by recursively + calling compute_use_defs() on body and combining block defs. + """ + if use_set is None: + use_set = set() + if def_set is None: + def_set = set() + blocks = wrap_parfor_blocks(parfor) + uses, defs = compute_use_defs(blocks) + cfg = compute_cfg_from_blocks(blocks) + last_label = max(blocks.keys()) + unwrap_parfor_blocks(parfor) + + # Conservatively, only add defs for blocks that are definitely executed + # Go through blocks in order, as if they are statements of the block that + # includes the parfor, and update uses/defs. + + # no need for topo order of ir_utils + topo_order = cfg.topo_order() + # blocks that dominate last block are definitely executed + definitely_executed = cfg.dominators()[last_label] + # except loop bodies that might not execute + for loop in cfg.loops().values(): + definitely_executed -= loop.body + for label in topo_order: + if label in definitely_executed: + # see compute_use_defs() in analysis.py + # variables defined in the block that includes the parfor are not + # uses of that block (are not potentially live in the beginning of + # the block) + use_set.update(uses[label] - def_set) + def_set.update(defs[label]) + else: + use_set.update(uses[label] - def_set) + + # treat loop variables and size variables as use + loop_vars = { + l.start.name for l in parfor.loop_nests if isinstance( + l.start, ir.Var)} + loop_vars |= { + l.stop.name for l in parfor.loop_nests if isinstance( + l.stop, ir.Var)} + loop_vars |= { + l.step.name for l in parfor.loop_nests if isinstance( + l.step, ir.Var)} + use_set.update(loop_vars - def_set) + use_set |= get_parfor_pattern_vars(parfor) + + return analysis._use_defs_result(usemap=use_set, defmap=def_set) + + +analysis.ir_extension_usedefs[Parfor] = parfor_defs + + +def _parfor_use_alloca(parfor, alloca_set): + """ + Reduction variables for parfors and the reduction variables within + nested parfors must be stack allocated. + """ + alloca_set |= set(parfor.redvars) + + blocks = wrap_parfor_blocks(parfor) + alloca_set |= analysis.must_use_alloca(blocks) + + unwrap_parfor_blocks(parfor) + + +analysis.ir_extension_use_alloca[Parfor] = _parfor_use_alloca + + +def parfor_insert_dels(parfor, curr_dead_set): + """insert dels in parfor. input: dead variable set right after parfor. + returns the variables for which del was inserted. + """ + blocks = wrap_parfor_blocks(parfor) + cfg = compute_cfg_from_blocks(blocks) + usedefs = compute_use_defs(blocks) + live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap) + dead_map = compute_dead_maps(cfg, blocks, live_map, usedefs.defmap) + + # treat loop variables and size variables as live + loop_vars = { + l.start.name for l in parfor.loop_nests if isinstance( + l.start, ir.Var)} + loop_vars |= { + l.stop.name for l in parfor.loop_nests if isinstance( + l.stop, ir.Var)} + loop_vars |= { + l.step.name for l in parfor.loop_nests if isinstance( + l.step, ir.Var)} + loop_vars |= {l.index_variable.name for l in parfor.loop_nests} + # for var_list in parfor.array_analysis.array_size_vars.values(): + # loop_vars |= {v.name for v in var_list if isinstance(v, ir.Var)} + + dead_set = set() + for label in blocks.keys(): + # only kill vars that are actually dead at the parfor's block + dead_map.internal[label] &= curr_dead_set + dead_map.internal[label] -= loop_vars + dead_set |= dead_map.internal[label] + dead_map.escaping[label] &= curr_dead_set + dead_map.escaping[label] -= loop_vars + dead_set |= dead_map.escaping[label] + + # dummy class to replace func_ir. _patch_var_dels only accesses blocks + class DummyFuncIR(object): + + def __init__(self, blocks): + self.blocks = blocks + post_proc = postproc.PostProcessor(DummyFuncIR(blocks)) + post_proc._patch_var_dels(dead_map.internal, dead_map.escaping) + unwrap_parfor_blocks(parfor) + + return dead_set | loop_vars + + +postproc.ir_extension_insert_dels[Parfor] = parfor_insert_dels + + +def maximize_fusion(func_ir, blocks, typemap, up_direction=True): + """ + Reorder statements to maximize parfor fusion. Push all parfors up or down + so they are adjacent. + """ + call_table, _ = get_call_table(blocks) + alias_map, arg_aliases = find_potential_aliases( + blocks, + func_ir.arg_names, + typemap, + func_ir + ) + for block in blocks.values(): + order_changed = True + while order_changed: + order_changed = maximize_fusion_inner( + func_ir, + block, + call_table, + alias_map, + arg_aliases, + up_direction + ) + +def maximize_fusion_inner(func_ir, block, call_table, alias_map, + arg_aliases, up_direction=True): + order_changed = False + i = 0 + # i goes to body[-3] (i+1 to body[-2]) since body[-1] is terminator and + # shouldn't be reordered + while i < len(block.body) - 2: + stmt = block.body[i] + next_stmt = block.body[i+1] + can_reorder = (_can_reorder_stmts(stmt, next_stmt, func_ir, + call_table, alias_map, arg_aliases) + if up_direction else _can_reorder_stmts(next_stmt, stmt, + func_ir, call_table, alias_map, arg_aliases)) + if can_reorder: + block.body[i] = next_stmt + block.body[i+1] = stmt + order_changed = True + i += 1 + return order_changed + +def expand_aliases(the_set, alias_map, arg_aliases): + ret = set() + for i in the_set: + if i in alias_map: + ret = ret.union(alias_map[i]) + if i in arg_aliases: + ret = ret.union(arg_aliases) + ret.add(i) + return ret + +def _can_reorder_stmts(stmt, next_stmt, func_ir, call_table, + alias_map, arg_aliases): + """ + Check dependencies to determine if a parfor can be reordered in the IR block + with a non-parfor statement. + """ + # swap only parfors with non-parfors + # don't reorder calls with side effects (e.g. file close) + # only read-read dependencies are OK + # make sure there is no write-write, write-read dependencies + if (isinstance(stmt, Parfor) + and not isinstance(next_stmt, Parfor) + and not isinstance(next_stmt, ir.Print) + and (not isinstance(next_stmt, ir.Assign) + or has_no_side_effect(next_stmt.value, set(), call_table) + or guard(is_assert_equiv, func_ir, next_stmt.value))): + stmt_accesses = expand_aliases({v.name for v in stmt.list_vars()}, + alias_map, arg_aliases) + stmt_writes = expand_aliases(get_parfor_writes(stmt), + alias_map, arg_aliases) + next_accesses = expand_aliases({v.name for v in next_stmt.list_vars()}, + alias_map, arg_aliases) + next_writes = expand_aliases(get_stmt_writes(next_stmt), + alias_map, arg_aliases) + if len((stmt_writes & next_accesses) + | (next_writes & stmt_accesses)) == 0: + return True + return False + +def is_assert_equiv(func_ir, expr): + func_name, mod_name = find_callname(func_ir, expr) + return func_name == 'assert_equiv' + + +def get_parfor_writes(parfor): + assert isinstance(parfor, Parfor) + writes = set() + blocks = parfor.loop_body.copy() + blocks[-1] = parfor.init_block + for block in blocks.values(): + for stmt in block.body: + writes.update(get_stmt_writes(stmt)) + if isinstance(stmt, Parfor): + writes.update(get_parfor_writes(stmt)) + return writes + +FusionReport = namedtuple('FusionReport', ['first', 'second', 'message']) + +def try_fuse(equiv_set, parfor1, parfor2, metadata, func_ir, typemap): + """try to fuse parfors and return a fused parfor, otherwise return None + """ + dprint("try_fuse: trying to fuse \n", parfor1, "\n", parfor2) + + # default report is None + report = None + + # fusion of parfors with different lowerers is not possible + if parfor1.lowerer != parfor2.lowerer: + dprint("try_fuse: parfors different lowerers") + msg = "- fusion failed: lowerer mismatch" + report = FusionReport(parfor1.id, parfor2.id, msg) + return None, report + + # fusion of parfors with different dimensions not supported yet + if len(parfor1.loop_nests) != len(parfor2.loop_nests): + dprint("try_fuse: parfors number of dimensions mismatch") + msg = "- fusion failed: number of loops mismatched, %s, %s." + fmt = "parallel loop #%s has a nest of %s loops" + l1 = fmt % (parfor1.id, len(parfor1.loop_nests)) + l2 = fmt % (parfor2.id, len(parfor2.loop_nests)) + report = FusionReport(parfor1.id, parfor2.id, msg % (l1, l2)) + return None, report + + ndims = len(parfor1.loop_nests) + # all loops should be equal length + + def is_equiv(x, y): + return x == y or equiv_set.is_equiv(x, y) + + def get_user_varname(v): + """get original variable name by user if possible""" + if not isinstance(v, ir.Var): + return v + v = v.name + if "var_rename_map" in metadata and v in metadata["var_rename_map"]: + user_varname = metadata["var_rename_map"][v] + return user_varname + return v + + for i in range(ndims): + nest1 = parfor1.loop_nests[i] + nest2 = parfor2.loop_nests[i] + if not (is_equiv(nest1.start, nest2.start) and + is_equiv(nest1.stop, nest2.stop) and + is_equiv(nest1.step, nest2.step)): + dprint("try_fuse: parfor dimension correlation mismatch", i) + msg = "- fusion failed: loop dimension mismatched in axis %s. " + msg += "slice(%s, %s, %s) != " % (get_user_varname(nest1.start), + get_user_varname(nest1.stop), get_user_varname(nest1.step)) + msg += "slice(%s, %s, %s)" % (get_user_varname(nest2.start), + get_user_varname(nest2.stop), get_user_varname(nest2.step)) + report = FusionReport(parfor1.id, parfor2.id, msg % i) + return None, report + + func_ir._definitions = build_definitions(func_ir.blocks) + p1_cross_dep, p1_ip, p1_ia, p1_non_ia = has_cross_iter_dep(parfor1, func_ir, typemap) + if not p1_cross_dep: + p2_cross_dep = has_cross_iter_dep(parfor2, func_ir, typemap, p1_ip, p1_ia, p1_non_ia)[0] + else: + p2_cross_dep = True + + if p1_cross_dep or p2_cross_dep: + dprint("try_fuse: parfor cross iteration dependency found") + msg = ("- fusion failed: cross iteration dependency found " + "between loops #%s and #%s") + report = FusionReport(parfor1.id, parfor2.id, + msg % (parfor1.id, parfor2.id)) + return None, report + + + # find parfor1's defs, only body is considered since init_block will run + # first after fusion as well + p1_body_usedefs = compute_use_defs(parfor1.loop_body) + p1_body_defs = set() + for defs in p1_body_usedefs.defmap.values(): + p1_body_defs |= defs + p1_body_defs |= get_parfor_writes(parfor1) + # Add reduction variables from parfor1 to the set of body defs + # so that if parfor2 reads the reduction variable it won't fuse. + p1_body_defs |= set(parfor1.redvars) + + p2_usedefs = compute_use_defs(parfor2.loop_body) + p2_uses = compute_use_defs({0: parfor2.init_block}).usemap[0] + for uses in p2_usedefs.usemap.values(): + p2_uses |= uses + + overlap = p1_body_defs.intersection(p2_uses) + # overlap are those variable defined in first parfor and used in the second + if len(overlap) != 0: + # Get all the arrays + _, p2arraynotindexed = get_array_indexed_with_parfor_index( + parfor2.loop_body.values(), + parfor2.index_var.name, + parfor2.get_loop_nest_vars(), + func_ir) + + unsafe_var = (not isinstance(typemap[x], types.ArrayCompatible) or x in p2arraynotindexed for x in overlap) + + if any(unsafe_var): + dprint("try_fuse: parfor2 depends on parfor1 body") + msg = ("- fusion failed: parallel loop %s has a dependency on the " + "body of parallel loop %s. ") + report = FusionReport(parfor1.id, parfor2.id, + msg % (parfor1.id, parfor2.id)) + return None, report + + return fuse_parfors_inner(parfor1, parfor2) + + +def fuse_parfors_inner(parfor1, parfor2): + # fuse parfor2 into parfor1 + # append parfor2's init block on parfor1's + parfor1.init_block.body.extend(parfor2.init_block.body) + + # append parfor2's first block to parfor1's last block + parfor2_first_label = min(parfor2.loop_body.keys()) + parfor2_first_block = parfor2.loop_body[parfor2_first_label].body + parfor1_first_label = min(parfor1.loop_body.keys()) + parfor1_last_label = max(parfor1.loop_body.keys()) + parfor1.loop_body[parfor1_last_label].body.extend(parfor2_first_block) + + # add parfor2 body blocks to parfor1's except first + parfor1.loop_body.update(parfor2.loop_body) + parfor1.loop_body.pop(parfor2_first_label) + + # replace parfor2 indices with parfor1's + ndims = len(parfor1.loop_nests) + index_dict = {parfor2.index_var.name: parfor1.index_var} + for i in range(ndims): + index_dict[parfor2.loop_nests[i].index_variable.name] = parfor1.loop_nests[ + i].index_variable + replace_vars(parfor1.loop_body, index_dict) + + # re-order labels from min to max + blocks = wrap_parfor_blocks(parfor1, entry_label=parfor1_first_label) + blocks = rename_labels(blocks) + unwrap_parfor_blocks(parfor1, blocks) + + nameset = set(x.name for x in index_dict.values()) + remove_duplicate_definitions(parfor1.loop_body, nameset) + parfor1.patterns.extend(parfor2.patterns) + if config.DEBUG_ARRAY_OPT_STATS: + print('Parallel for-loop #{} is fused into for-loop #{}.'.format( + parfor2.id, parfor1.id)) + + msg = '- fusion succeeded: parallel for-loop #{} is fused into for-loop #{}.' + msg = msg.format(parfor2.id, parfor1.id) + report = FusionReport(parfor1.id, parfor2.id, msg) + + return parfor1, report + + +def remove_duplicate_definitions(blocks, nameset): + """Remove duplicated definition for variables in the given nameset, which + is often a result of parfor fusion. + """ + for label, block in blocks.items(): + body = block.body + new_body = [] + defined = set() + for inst in body: + if isinstance(inst, ir.Assign): + name = inst.target.name + if name in nameset: + if name in defined: + continue + defined.add(name) + new_body.append(inst) + block.body = new_body + return + + +def has_cross_iter_dep( + parfor, + func_ir, + typemap, + index_positions=None, + indexed_arrays=None, + non_indexed_arrays=None): + # We should assume there is cross iteration dependency unless we can + # prove otherwise. Return True if there is a cross-iter dependency + # that should prevent fusion, False if fusion is okay. + # Also returns index_positions, indexed_arrays, and non_indexed_arrays + # who purpose is described below so that subsequent additional + # has_cross_iter_dep calls for other parfors can build on the same + # data structures to make sure that the array accesses generate no + # cross-iter dependencies both within a parfor but also across parfors. + + # TODO: make it more accurate using ud-chains + + # Get the index variable used by this parfor. + # This will hold other variables with equivalent value, e.g., a = index_var + indices = {l.index_variable.name for l in parfor.loop_nests} + # This set will store variables that are (potentially recursively) + # defined in relation to an index variable, e.g., a = index_var + 1. + # A getitem that uses an index variable from this set will be considered + # as potentially having a cross-iter dependency and so won't fuse. + derived_from_indices = set() + # For the first parfor considered for fusion, the latter 3 args will be None + # and initialized to empty. For the second parfor, the structures from the + # previous parfor are passed in so that cross-parfor violations of the + # below comments can prevent fusion. + # + # index_positions keeps track of which index positions have had an index + # variable used for them and which ones haven't for each possible array + # dimensionality. After the first array access is seen, if subsequent + # ones use a parfor index for a different dimension then we conservatively + # say that we can't fuse. For example, if a 2D array is accessed with + # a[parfor_index, 0] then index_positions[2] will be (True, False) and + # if a[0, parfor_index] happens later which is (False, True) then this + # conflicts with the previous value and will prevent fusion. + # + # indexed_arrays records arrays that are accessed with at least one + # parfor index. If such an array is later accessed with indices that + # don't include a parfor index then conservatively assume we can't fuse. + # + # non_indexed_arrays holds arrays that are indexed without a parfor index. + # If an array first accessed without a parfor index is later indexed + # with one then conservatively assume we can't fuse. + if index_positions is None: + index_positions = {} + if indexed_arrays is None: + indexed_arrays = set() + if non_indexed_arrays is None: + non_indexed_arrays = set() + + def add_check_position(new_position, + array_accessed, + index_positions, + indexed_arrays, + non_indexed_arrays): + """Returns True if there is a reason to prevent fusion based + on the rules described above. + new_position will be a list or tuple of booleans that + says whether the index in that spot is a parfor index + or not. array_accessed is the array on which the access + is occurring.""" + + # Convert list indices to tuple for generality. + if isinstance(new_position, list): + new_position = tuple(new_position) + + # If none of the indices are based on a parfor index. + if True not in new_position: + # See if this array has been accessed before with a + # a parfor index and if so say that we can't fuse. + if array_accessed in indexed_arrays: + return True + else: + # Either array is already in non_indexed arrays or we + # will add it. Either way, this index usage can fuse. + non_indexed_arrays.add(array_accessed) + return False + + # Fallthrough for cases where one of the indices is a parfor index. + # If this array was previously accessed without a parfor index then + # conservatively say we can't fuse. + if array_accessed in non_indexed_arrays: + return True + + indexed_arrays.add(array_accessed) + + npsize = len(new_position) + # See if we have not seen a npsize dimensioned array accessed before. + if npsize not in index_positions: + # If not then add current set of parfor/non-parfor indices and + # indicate it is safe as it is the first usage. + index_positions[npsize] = new_position + return False + + # Here we have a subsequent access to a npsize-dimensioned array. + # Make sure we see the same combination of parfor/non-parfor index + # indices that we've seen before. If not then return True saying + # that we can't fuse. + return index_positions[npsize] != new_position + + def check_index(stmt_index, + array_accessed, + index_positions, + indexed_arrays, + non_indexed_arrays, + derived_from_indices): + """Looks at the indices of a getitem or setitem to see if there + is a reason that they would prevent fusion. + Returns True if fusion should be prohibited, False otherwise. + """ + if isinstance(stmt_index, ir.Var): + # If the array is 2+ dimensions then the index should be a tuple. + if isinstance(typemap[stmt_index.name], types.BaseTuple): + # Get how the index tuple is constructed. + fbs_res = guard(find_build_sequence, func_ir, stmt_index) + if fbs_res is not None: + ind_seq, _ = fbs_res + # If any indices are derived from an index is used then + # return True to say we can't fuse. + if (all([x.name in indices or + x.name not in derived_from_indices for x in ind_seq])): + # Get position in index tuple where parfor indices used. + new_index_positions = [x.name in indices for x in ind_seq] + # Make sure that we aren't accessing a given array with + # different indices in a different order. + return add_check_position(new_index_positions, + array_accessed, + index_positions, + indexed_arrays, + non_indexed_arrays) + else: + # index derived from a parfor index used so no fusion + return True + else: + # Don't know how the index tuple is built so + # have to assume fusion can't happen. + return True + else: + # Should be for 1D arrays. + if stmt_index.name in indices: + # Array indexed by a parfor index variable. + # Make sure this 1D access is consistent with prior ones. + return add_check_position((True,), + array_accessed, + index_positions, + indexed_arrays, + non_indexed_arrays) + elif stmt_index.name in derived_from_indices: + # If we ever index an array with something calculated + # from an index then no fusion. + return True + else: + # Some kind of index that isn't a parfor index or + # one derived from one, e.g., a constant. Make sure + # this is consistent with prior accessed of this array. + return add_check_position((False,), + array_accessed, + index_positions, + indexed_arrays, + non_indexed_arrays) + else: + # We don't know how to handle non-Var indices so no fusion. + return True + + # All branches above should cover all the cases and each should + # return so we should never get here. + raise errors.InternalError("Some code path in the parfor fusion " + "cross-iteration dependency checker " + "check_index didn't return a result.") + + # Iterate through all the statements in the parfor. + for b in parfor.loop_body.values(): + for stmt in b.body: + # Make sure SetItem accesses are fusion safe. + if isinstance(stmt, (ir.SetItem, ir.StaticSetItem)): + if isinstance(typemap[stmt.target.name], types.npytypes.Array): + # Check index safety with prior array accesses. + if check_index(stmt.index, + stmt.target.name, + index_positions, + indexed_arrays, + non_indexed_arrays, + derived_from_indices): + return True, index_positions, indexed_arrays, non_indexed_arrays + # Fusion safe so go to next statement. + continue + elif isinstance(stmt, ir.Assign): + # If stmt of form a = parfor_index then add "a" to set of + # parfor indices. + if isinstance(stmt.value, ir.Var): + if stmt.value.name in indices: + indices.add(stmt.target.name) + continue + elif isinstance(stmt.value, ir.Expr): + op = stmt.value.op + # Make sure getitem accesses are fusion safe. + if op in ['getitem', 'static_getitem']: + if isinstance(typemap[stmt.value.value.name], types.npytypes.Array): + # Check index safety with prior array accesses. + if check_index(stmt.value.index, + stmt.value.value.name, + index_positions, + indexed_arrays, + non_indexed_arrays, + derived_from_indices): + return True, index_positions, indexed_arrays, non_indexed_arrays + # Fusion safe so go to next statement. + continue + elif op == 'call': + # If there is a call in the parfor body that takes some + # array parameter then we have no way to analyze what + # that call is doing so presume it is unsafe for fusion. + if (any([isinstance(typemap[x.name], types.npytypes.Array) + for x in stmt.value.list_vars()])): + return True, index_positions, indexed_arrays, non_indexed_arrays + + # Get the vars used by this non-setitem/getitem statement. + rhs_vars = [x.name for x in stmt.value.list_vars()] + # If a parfor index is used as part of this statement or + # something previous determined to be derived from a parfor + # index then add the target variable to the set of + # variables that are derived from parfors and so should + # prevent fusion if used as an index. + if (not indices.isdisjoint(rhs_vars) or + not derived_from_indices.isdisjoint(rhs_vars)): + derived_from_indices.add(stmt.target.name) + + return False, index_positions, indexed_arrays, non_indexed_arrays + + +def dprint(*s): + if config.DEBUG_ARRAY_OPT >= 1: + print(*s) + +def get_parfor_pattern_vars(parfor): + """ get the variables used in parfor pattern information + """ + out = set() + # currently, only stencil pattern has variables + for pattern in parfor.patterns: + if pattern[0] == 'stencil': + left_lengths = pattern[1][0] + right_lengths = pattern[1][1] + for v in left_lengths+right_lengths: + if isinstance(v, ir.Var): + out.add(v.name) + return out + +def remove_dead_parfor(parfor, lives, lives_n_aliases, arg_aliases, alias_map, func_ir, typemap): + """ remove dead code inside parfor including get/sets + """ + + with dummy_return_in_loop_body(parfor.loop_body): + labels = find_topo_order(parfor.loop_body) + + # get/setitem replacement should ideally use dataflow to propagate setitem + # saved values, but for simplicity we handle the common case of propagating + # setitems in the first block (which is dominant) if the array is not + # potentially changed in any way + first_label = labels[0] + first_block_saved_values = {} + _update_parfor_get_setitems( + parfor.loop_body[first_label].body, + parfor.index_var, alias_map, + first_block_saved_values, + lives_n_aliases + ) + + # remove saved first block setitems if array potentially changed later + saved_arrs = set(first_block_saved_values.keys()) + for l in labels: + if l == first_label: + continue + for stmt in parfor.loop_body[l].body: + if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) + and stmt.value.op == 'getitem' + and stmt.value.index.name == parfor.index_var.name): + continue + varnames = set(v.name for v in stmt.list_vars()) + rm_arrs = varnames & saved_arrs + for a in rm_arrs: + first_block_saved_values.pop(a, None) + + + # replace getitems with available value + # e.g. A[i] = v; ... s = A[i] -> s = v + for l in labels: + if l == first_label: + continue + block = parfor.loop_body[l] + saved_values = first_block_saved_values.copy() + _update_parfor_get_setitems(block.body, parfor.index_var, alias_map, + saved_values, lives_n_aliases) + + + # after getitem replacement, remove extra setitems + blocks = parfor.loop_body.copy() # shallow copy is enough + last_label = max(blocks.keys()) + return_label, tuple_var = _add_liveness_return_block(blocks, lives_n_aliases, typemap) + # jump to return label + jump = ir.Jump(return_label, ir.Loc("parfors_dummy", -1)) + blocks[last_label].body.append(jump) + cfg = compute_cfg_from_blocks(blocks) + usedefs = compute_use_defs(blocks) + live_map = compute_live_map(cfg, blocks, usedefs.usemap, usedefs.defmap) + alias_set = set(alias_map.keys()) + + for label, block in blocks.items(): + new_body = [] + in_lives = {v.name for v in block.terminator.list_vars()} + # find live variables at the end of block + for out_blk, _data in cfg.successors(label): + in_lives |= live_map[out_blk] + for stmt in reversed(block.body): + # aliases of lives are also live for setitems + alias_lives = in_lives & alias_set + for v in alias_lives: + in_lives |= alias_map[v] + if (isinstance(stmt, (ir.StaticSetItem, ir.SetItem)) and + get_index_var(stmt).name == parfor.index_var.name and + stmt.target.name not in in_lives and + stmt.target.name not in arg_aliases): + continue + in_lives |= {v.name for v in stmt.list_vars()} + new_body.append(stmt) + new_body.reverse() + block.body = new_body + + typemap.pop(tuple_var.name) # remove dummy tuple type + blocks[last_label].body.pop() # remove jump + + """ + Process parfor body recursively. + Note that this is the only place in this function that uses the + argument lives instead of lives_n_aliases. The former does not + include the aliases of live variables but only the live variable + names themselves. See a comment in this function for how that + is used. + """ + remove_dead_parfor_recursive( + parfor, lives, arg_aliases, alias_map, func_ir, typemap) + + # remove parfor if empty + is_empty = len(parfor.init_block.body) == 0 + for block in parfor.loop_body.values(): + is_empty &= len(block.body) == 0 + if is_empty: + return None + return parfor + +def _update_parfor_get_setitems(block_body, index_var, alias_map, + saved_values, lives): + """ + replace getitems of a previously set array in a block of parfor loop body + """ + for stmt in block_body: + if (isinstance(stmt, (ir.StaticSetItem, ir.SetItem)) and + get_index_var(stmt).name == index_var.name and + stmt.target.name not in lives): + # saved values of aliases of SetItem target array are invalid + for w in alias_map.get(stmt.target.name, []): + saved_values.pop(w, None) + # set saved value after invalidation since alias_map may + # contain the array itself (e.g. pi example) + saved_values[stmt.target.name] = stmt.value + continue + if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr): + rhs = stmt.value + if rhs.op == 'getitem' and isinstance(rhs.index, ir.Var): + if rhs.index.name == index_var.name: + # replace getitem if value saved + stmt.value = saved_values.get(rhs.value.name, rhs) + continue + # conservative assumption: array is modified if referenced + # remove all referenced arrays + for v in stmt.list_vars(): + saved_values.pop(v.name, None) + # aliases are potentially modified as well + for w in alias_map.get(v.name, []): + saved_values.pop(w, None) + + return + +ir_utils.remove_dead_extensions[Parfor] = remove_dead_parfor + + +def remove_dead_parfor_recursive(parfor, lives, arg_aliases, alias_map, + func_ir, typemap): + """create a dummy function from parfor and call remove dead recursively + """ + blocks = parfor.loop_body.copy() # shallow copy is enough + first_body_block = min(blocks.keys()) + assert first_body_block > 0 # we are using 0 for init block here + last_label = max(blocks.keys()) + + """ + Previously, this statement used lives_n_aliases. That had the effect of + keeping variables in the init_block alive if they aliased an array that + was later written to. By using just lives to indicate which variables + names are live at exit of the parfor but then using alias_map for the + actual recursive dead code removal, we keep any writes to aliased arrays + alive but also allow aliasing assignments (i.e., a = b) to be eliminated + so long as 'b' is not written to through the variable 'a' later on. + This makes assignment handling of remove_dead_block work properly since + it allows distinguishing between live variables and their aliases. + """ + return_label, tuple_var = _add_liveness_return_block(blocks, lives, typemap) + + # branch back to first body label to simulate loop + scope = blocks[last_label].scope + + branchcond = ir.Var(scope, mk_unique_var("$branchcond"), ir.Loc("parfors_dummy", -1)) + typemap[branchcond.name] = types.boolean + + branch = ir.Branch(branchcond, first_body_block, return_label, ir.Loc("parfors_dummy", -1)) + blocks[last_label].body.append(branch) + + # add dummy jump in init_block for CFG to work + blocks[0] = parfor.init_block + blocks[0].body.append(ir.Jump(first_body_block, ir.Loc("parfors_dummy", -1))) + + # args var including aliases is ok + remove_dead(blocks, arg_aliases, func_ir, typemap, alias_map, arg_aliases) + typemap.pop(tuple_var.name) # remove dummy tuple type + blocks[0].body.pop() # remove dummy jump + blocks[last_label].body.pop() # remove branch + return + +def _add_liveness_return_block(blocks, lives, typemap): + last_label = max(blocks.keys()) + return_label = last_label + 1 + + loc = blocks[last_label].loc + scope = blocks[last_label].scope + blocks[return_label] = ir.Block(scope, loc) + + # add lives in a dummpy return to last block to avoid their removal + tuple_var = ir.Var(scope, mk_unique_var("$tuple_var"), loc) + # dummy type for tuple_var + typemap[tuple_var.name] = types.containers.UniTuple( + types.uintp, 2) + live_vars = [ir.Var(scope, v, loc) for v in lives] + tuple_call = ir.Expr.build_tuple(live_vars, loc) + blocks[return_label].body.append(ir.Assign(tuple_call, tuple_var, loc)) + blocks[return_label].body.append(ir.Return(tuple_var, loc)) + return return_label, tuple_var + + +def find_potential_aliases_parfor(parfor, args, typemap, func_ir, alias_map, arg_aliases): + blocks = wrap_parfor_blocks(parfor) + ir_utils.find_potential_aliases( + blocks, args, typemap, func_ir, alias_map, arg_aliases) + unwrap_parfor_blocks(parfor) + return + +ir_utils.alias_analysis_extensions[Parfor] = find_potential_aliases_parfor + +def simplify_parfor_body_CFG(blocks): + """simplify CFG of body loops in parfors""" + n_parfors = 0 + for block in blocks.values(): + for stmt in block.body: + if isinstance(stmt, Parfor): + n_parfors += 1 + parfor = stmt + # add dummy return to enable CFG creation + # can't use dummy_return_in_loop_body since body changes + last_block = parfor.loop_body[max(parfor.loop_body.keys())] + scope = last_block.scope + loc = ir.Loc("parfors_dummy", -1) + const = ir.Var(scope, mk_unique_var("$const"), loc) + last_block.body.append(ir.Assign(ir.Const(0, loc), const, loc)) + last_block.body.append(ir.Return(const, loc)) + parfor.loop_body = simplify_CFG(parfor.loop_body) + last_block = parfor.loop_body[max(parfor.loop_body.keys())] + last_block.body.pop() + # call on body recursively + simplify_parfor_body_CFG(parfor.loop_body) + return n_parfors + + +def wrap_parfor_blocks(parfor, entry_label = None): + """wrap parfor blocks for analysis/optimization like CFG""" + blocks = parfor.loop_body.copy() # shallow copy is enough + if entry_label is None: + entry_label = min(blocks.keys()) + assert entry_label > 0 # we are using 0 for init block here + + # add dummy jump in init_block for CFG to work + blocks[0] = parfor.init_block + blocks[0].body.append(ir.Jump(entry_label, blocks[0].loc)) + for block in blocks.values(): + if len(block.body) == 0 or (not block.body[-1].is_terminator): + block.body.append(ir.Jump(entry_label, block.loc)) + return blocks + + +def unwrap_parfor_blocks(parfor, blocks=None): + """ + unwrap parfor blocks after analysis/optimization. + Allows changes to the parfor loop. + """ + if blocks is not None: + # make sure init block isn't removed + init_block_label = min(blocks.keys()) + # update loop body blocks + blocks.pop(init_block_label) + parfor.loop_body = blocks + + # make sure dummy jump to loop body isn't altered + first_body_label = min(parfor.loop_body.keys()) + assert isinstance(parfor.init_block.body[-1], ir.Jump) + + # remove dummy jump to loop body + parfor.init_block.body.pop() + + # make sure dummy jump back to loop body isn't altered + for block in parfor.loop_body.values(): + if (isinstance(block.body[-1], ir.Jump) and + block.body[-1].target == first_body_label): + # remove dummy jump back to loop + block.body.pop() + return + + +def get_copies_parfor(parfor, typemap): + """find copies generated/killed by parfor""" + blocks = wrap_parfor_blocks(parfor) + in_copies_parfor, out_copies_parfor = copy_propagate(blocks, typemap) + in_gen_copies, in_extra_kill = get_block_copies(blocks, typemap) + unwrap_parfor_blocks(parfor) + + # parfor's extra kill is kills of its init block, + # and all possible gens and kills of it's body loop. + # body doesn't gen and only kills since it may or may not run + # TODO: save copies that are repeated in parfor + kill_set = in_extra_kill[0] + for label in parfor.loop_body.keys(): + kill_set |= {l for l, r in in_gen_copies[label]} + kill_set |= in_extra_kill[label] + + # gen copies is copies generated by init that are not killed by body + last_label = max(parfor.loop_body.keys()) + gens = out_copies_parfor[last_label] & in_gen_copies[0] + + if config.DEBUG_ARRAY_OPT >= 1: + print("copy propagate parfor gens:", gens, "kill_set", kill_set) + return gens, kill_set + + +ir_utils.copy_propagate_extensions[Parfor] = get_copies_parfor + + +def apply_copies_parfor(parfor, var_dict, name_var_table, + typemap, calltypes, save_copies): + """apply copy propagate recursively in parfor""" + # replace variables in pattern metadata like stencil neighborhood + for i, pattern in enumerate(parfor.patterns): + if pattern[0] == 'stencil': + parfor.patterns[i] = ('stencil', + replace_vars_inner(pattern[1], var_dict)) + + # replace loop boundary variables + for l in parfor.loop_nests: + l.start = replace_vars_inner(l.start, var_dict) + l.stop = replace_vars_inner(l.stop, var_dict) + l.step = replace_vars_inner(l.step, var_dict) + + blocks = wrap_parfor_blocks(parfor) + # add dummy assigns for each copy + assign_list = [] + for lhs_name, rhs in var_dict.items(): + assign_list.append(ir.Assign(rhs, name_var_table[lhs_name], + ir.Loc("dummy", -1))) + blocks[0].body = assign_list + blocks[0].body + in_copies_parfor, out_copies_parfor = copy_propagate(blocks, typemap) + apply_copy_propagate(blocks, in_copies_parfor, name_var_table, typemap, + calltypes, save_copies) + unwrap_parfor_blocks(parfor) + # remove dummy assignments + blocks[0].body = blocks[0].body[len(assign_list):] + return + + +ir_utils.apply_copy_propagate_extensions[Parfor] = apply_copies_parfor + + +def push_call_vars(blocks, saved_globals, saved_getattrs, typemap, nested=False): + """push call variables to right before their call site. + assuming one global/getattr is created for each call site and control flow + doesn't change it. + """ + for block in blocks.values(): + new_body = [] + # global/attr variables that are defined in this block already, + # no need to reassign them + block_defs = set() + # Some definitions are copied right before the call but then we + # need to rename that symbol in that block so that typing won't + # generate an error trying to lock the save var twice. + # In rename_dict, we collect the symbols that must be renamed in + # this block. We collect them then apply the renaming at the end. + rename_dict = {} + for stmt in block.body: + def process_assign(stmt): + if isinstance(stmt, ir.Assign): + rhs = stmt.value + lhs = stmt.target + if (isinstance(rhs, ir.Global)): + saved_globals[lhs.name] = stmt + block_defs.add(lhs.name) + elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr': + if (rhs.value.name in saved_globals + or rhs.value.name in saved_getattrs): + saved_getattrs[lhs.name] = stmt + block_defs.add(lhs.name) + + if not nested and isinstance(stmt, Parfor): + for s in stmt.init_block.body: + process_assign(s) + pblocks = stmt.loop_body.copy() + push_call_vars(pblocks, saved_globals, saved_getattrs, typemap, nested=True) + new_body.append(stmt) + continue + else: + process_assign(stmt) + for v in stmt.list_vars(): + new_body += _get_saved_call_nodes(v.name, saved_globals, + saved_getattrs, block_defs, rename_dict) + new_body.append(stmt) + block.body = new_body + # If there is anything to rename then apply the renaming here. + if len(rename_dict) > 0: + # Fix-up the typing for the renamed vars. + for k, v in rename_dict.items(): + typemap[v] = typemap[k] + # This is only to call replace_var_names which takes a dict. + temp_blocks = {0: block} + replace_var_names(temp_blocks, rename_dict) + + return + + +def _get_saved_call_nodes(fname, saved_globals, saved_getattrs, block_defs, rename_dict): + """ Implement the copying of globals or getattrs for the purposes noted in + push_call_vars. We make a new var and assign to it a copy of the + global or getattr. We remember this new assignment node and add an + entry in the renaming dictionary so that for this block the original + var name is replaced by the new var name we created. + """ + nodes = [] + while (fname not in block_defs and (fname in saved_globals + or fname in saved_getattrs)): + def rename_global_or_getattr(obj, var_base, nodes, block_defs, rename_dict): + assert(isinstance(obj, ir.Assign)) + renamed_var = ir.Var(obj.target.scope, + mk_unique_var(var_base), + obj.target.loc) + renamed_assign = ir.Assign(copy.deepcopy(obj.value), + renamed_var, + obj.loc) + nodes.append(renamed_assign) + block_defs.add(obj.target.name) + rename_dict[obj.target.name] = renamed_assign.target.name + + if fname in saved_globals: + rename_global_or_getattr(saved_globals[fname], "$push_global_to_block", + nodes, block_defs, rename_dict) + fname = '_PA_DONE' + elif fname in saved_getattrs: + rename_global_or_getattr(saved_getattrs[fname], "$push_getattr_to_block", + nodes, block_defs, rename_dict) + fname = saved_getattrs[fname].value.value.name + nodes.reverse() + return nodes + +def repr_arrayexpr(arrayexpr): + """Extract operators from arrayexpr to represent it abstractly as a string. + """ + if isinstance(arrayexpr, tuple): + opr = arrayexpr[0] + # sometimes opr is not string like '+', but is a ufunc object + if not isinstance(opr, str): + if hasattr(opr, '__name__'): + opr = opr.__name__ + else: + opr = '_' # can return dummy since repr is not critical + args = arrayexpr[1] + if len(args) == 1: + return '({}({}))'.format(opr, repr_arrayexpr(args[0])) + else: + opr = ' ' + opr + ' ' + return '({})'.format(opr.join([ repr_arrayexpr(x) for x in args ])) + elif isinstance(arrayexpr, numba.core.ir.Var): + name = arrayexpr.name + if name.startswith('$'): + return '\'%s\' (temporary variable)' % name + else: + return name + elif isinstance(arrayexpr, numba.core.ir.Const): + return repr(arrayexpr.value) + else: + return '_' + +def fix_generator_types(generator_info, return_type, typemap): + """postproc updates generator_info with live variables after transformations + but generator variables have types in return_type that are updated here. + """ + new_state_types = [] + for v in generator_info.state_vars: + new_state_types.append(typemap[v]) + return_type.state_types = tuple(new_state_types) + return + + +def get_parfor_call_table(parfor, call_table=None, reverse_call_table=None): + if call_table is None: + call_table = {} + if reverse_call_table is None: + reverse_call_table = {} + blocks = wrap_parfor_blocks(parfor) + call_table, reverse_call_table = get_call_table(blocks, call_table, + reverse_call_table) + unwrap_parfor_blocks(parfor) + return call_table, reverse_call_table + + +ir_utils.call_table_extensions[Parfor] = get_parfor_call_table + + +def get_parfor_tuple_table(parfor, tuple_table=None): + if tuple_table is None: + tuple_table = {} + blocks = wrap_parfor_blocks(parfor) + tuple_table = ir_utils.get_tuple_table(blocks, tuple_table) + unwrap_parfor_blocks(parfor) + return tuple_table + + +ir_utils.tuple_table_extensions[Parfor] = get_parfor_tuple_table + + +def get_parfor_array_accesses(parfor, accesses=None): + if accesses is None: + accesses = set() + blocks = wrap_parfor_blocks(parfor) + accesses = ir_utils.get_array_accesses(blocks, accesses) + unwrap_parfor_blocks(parfor) + return accesses + + +# parfor handler is same as +ir_utils.array_accesses_extensions[Parfor] = get_parfor_array_accesses + + +def parfor_add_offset_to_labels(parfor, offset): + blocks = wrap_parfor_blocks(parfor) + blocks = add_offset_to_labels(blocks, offset) + blocks[0] = blocks[offset] + blocks.pop(offset) + unwrap_parfor_blocks(parfor, blocks) + return + + +ir_utils.add_offset_to_labels_extensions[Parfor] = parfor_add_offset_to_labels + + +def parfor_find_max_label(parfor): + blocks = wrap_parfor_blocks(parfor) + max_label = ir_utils.find_max_label(blocks) + unwrap_parfor_blocks(parfor) + return max_label + +ir_utils.find_max_label_extensions[Parfor] = parfor_find_max_label + + +def parfor_typeinfer(parfor, typeinferer): + save_blocks = typeinferer.blocks + blocks = wrap_parfor_blocks(parfor) + index_vars = [l.index_variable for l in parfor.loop_nests] + # no need to handle parfor.index_var (tuple of variables), since it will be + # assigned to a tuple from individual indices + first_block = min(blocks.keys()) + loc = blocks[first_block].loc + # XXX + index_assigns = [ir.Assign(ir.Const(1, loc=loc, use_literal_type=False), v, loc) for v in index_vars] + save_first_block_body = blocks[first_block].body + blocks[first_block].body = index_assigns + blocks[first_block].body + typeinferer.blocks = blocks + typeinferer.build_constraint() + typeinferer.blocks = save_blocks + blocks[first_block].body = save_first_block_body + unwrap_parfor_blocks(parfor) + + +typeinfer.typeinfer_extensions[Parfor] = parfor_typeinfer + +def build_parfor_definitions(parfor, definitions=None): + """get variable definition table for parfors""" + if definitions is None: + definitions = defaultdict(list) + + # avoid wrap_parfor_blocks() since build_definitions is called inside + # find_potential_aliases_parfor where the parfor is already wrapped + build_definitions(parfor.loop_body, definitions) + build_definitions({0: parfor.init_block}, definitions) + return definitions + +ir_utils.build_defs_extensions[Parfor] = build_parfor_definitions + +@contextmanager +def dummy_return_in_loop_body(loop_body): + """adds dummy return to last block of parfor loop body for CFG computation + """ + # max is last block since we add it manually for prange + last_label = max(loop_body.keys()) + scope = loop_body[last_label].scope + const = ir.Var(scope, mk_unique_var("$const"), ir.Loc("parfors_dummy", -1)) + loop_body[last_label].body.append( + ir.Return(const, ir.Loc("parfors_dummy", -1))) + yield + # remove dummy return + loop_body[last_label].body.pop() + +@infer_global(reduce) +class ReduceInfer(AbstractTemplate): + def generic(self, args, kws): + assert not kws + if len(args) != 3: + raise errors.NumbaAssertionError("len(args) != 3") + assert isinstance(args[1], types.Array) + return signature(args[1].dtype, *args) + + +def ensure_parallel_support(): + """Check if the platform supports parallel=True and raise if it does not. + """ + if config.IS_32BITS: + msg = ("The 'parallel' target is not currently supported on 32 bit " + "hardware.") + raise errors.UnsupportedParforsError(msg) diff --git a/venv/lib/python3.10/site-packages/numba/parfors/parfor_lowering.py b/venv/lib/python3.10/site-packages/numba/parfors/parfor_lowering.py new file mode 100644 index 0000000000000000000000000000000000000000..38e40a889b24caf5e98aeeab75adfb9e5a4ba2b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/parfors/parfor_lowering.py @@ -0,0 +1,2068 @@ +import copy +import operator + +import types as pytypes +import operator +import warnings +from dataclasses import make_dataclass + +import llvmlite.ir +import numpy as np + +import numba +from numba.parfors import parfor +from numba.core import types, ir, config, compiler, sigutils, cgutils +from numba.core.ir_utils import ( + add_offset_to_labels, + replace_var_names, + remove_dels, + legalize_names, + rename_labels, + get_name_var_table, + visit_vars_inner, + get_definition, + guard, + get_call_table, + is_pure, + get_np_ufunc_typ, + get_unused_var_name, + is_const_call, + fixup_var_define_in_scope, + transfer_scope, + find_max_label, + get_global_func_typ, + find_topo_order, +) +from numba.core.typing import signature +from numba.core import lowering +from numba.parfors.parfor import ensure_parallel_support +from numba.core.errors import ( + NumbaParallelSafetyWarning, NotDefinedError, CompilerError, InternalError, +) +from numba.parfors.parfor_lowering_utils import ParforLoweringBuilder + + +class ParforLower(lowering.Lower): + """This is a custom lowering class that extends standard lowering so as + to accommodate parfor.Parfor nodes.""" + + # custom instruction lowering to handle parfor nodes + def lower_inst(self, inst): + if isinstance(inst, parfor.Parfor): + _lower_parfor_parallel(self, inst) + else: + super().lower_inst(inst) + + @property + def _disable_sroa_like_opt(self): + """ + Force disable this because Parfor use-defs is incompatible---it only + considers use-defs in blocks that must be executing. + See https://github.com/numba/numba/commit/017e2ff9db87fc34149b49dd5367ecbf0bb45268 + """ + return True + + +def _lower_parfor_parallel(lowerer, parfor): + if parfor.lowerer is None: + return _lower_parfor_parallel_std(lowerer, parfor) + else: + return parfor.lowerer(lowerer, parfor) + + +def _lower_parfor_parallel_std(lowerer, parfor): + """Lowerer that handles LLVM code generation for parfor. + This function lowers a parfor IR node to LLVM. + The general approach is as follows: + 1) The code from the parfor's init block is lowered normally + in the context of the current function. + 2) The body of the parfor is transformed into a gufunc function. + 3) Code is inserted into the main function that calls do_scheduling + to divide the iteration space for each thread, allocates + reduction arrays, calls the gufunc function, and then invokes + the reduction function across the reduction arrays to produce + the final reduction values. + """ + from numba.np.ufunc.parallel import get_thread_count + + ensure_parallel_support() + typingctx = lowerer.context.typing_context + targetctx = lowerer.context + builder = lowerer.builder + # We copy the typemap here because for race condition variable we'll + # update their type to array so they can be updated by the gufunc. + orig_typemap = lowerer.fndesc.typemap + # replace original typemap with copy and restore the original at the end. + lowerer.fndesc.typemap = copy.copy(orig_typemap) + if config.DEBUG_ARRAY_OPT: + print("lowerer.fndesc", lowerer.fndesc, type(lowerer.fndesc)) + typemap = lowerer.fndesc.typemap + varmap = lowerer.varmap + + if config.DEBUG_ARRAY_OPT: + print("_lower_parfor_parallel") + parfor.dump() + + loc = parfor.init_block.loc + scope = parfor.init_block.scope + + # produce instructions for init_block + if config.DEBUG_ARRAY_OPT: + print("init_block = ", parfor.init_block, " ", type(parfor.init_block)) + for instr in parfor.init_block.body: + if config.DEBUG_ARRAY_OPT: + print("lower init_block instr = ", instr) + lowerer.lower_inst(instr) + + for racevar in parfor.races: + if racevar not in varmap: + rvtyp = typemap[racevar] + rv = ir.Var(scope, racevar, loc) + lowerer._alloca_var(rv.name, rvtyp) + + alias_map = {} + arg_aliases = {} + numba.parfors.parfor.find_potential_aliases_parfor(parfor, parfor.params, typemap, + lowerer.func_ir, alias_map, arg_aliases) + if config.DEBUG_ARRAY_OPT: + print("alias_map", alias_map) + print("arg_aliases", arg_aliases) + + # run get_parfor_outputs() and get_parfor_reductions() before gufunc creation + # since Jumps are modified so CFG of loop_body dict will become invalid + assert parfor.params is not None + + parfor_output_arrays = numba.parfors.parfor.get_parfor_outputs( + parfor, parfor.params) + parfor_redvars, parfor_reddict = parfor.redvars, parfor.reddict + if config.DEBUG_ARRAY_OPT: + print("parfor_redvars:", parfor_redvars) + print("parfor_reddict:", parfor_reddict) + + # init reduction array allocation here. + nredvars = len(parfor_redvars) + redarrs = {} + to_cleanup = [] + if nredvars > 0: + # reduction arrays outer dimension equal to thread count + scope = parfor.init_block.scope + loc = parfor.init_block.loc + pfbdr = ParforLoweringBuilder(lowerer=lowerer, scope=scope, loc=loc) + + # Get the Numba internal function to call to get the thread count. + get_num_threads = pfbdr.bind_global_function( + fobj=numba.np.ufunc.parallel._iget_num_threads, + ftype=get_global_func_typ(numba.np.ufunc.parallel._iget_num_threads), + args=() + ) + + # Insert the call to assign the thread count to a variable. + num_threads_var = pfbdr.assign( + rhs=pfbdr.call(get_num_threads, args=[]), + typ=types.intp, + name="num_threads_var") + + # For each reduction variable... + for i in range(nredvars): + red_name = parfor_redvars[i] + # Get the type of the reduction variable. + redvar_typ = lowerer.fndesc.typemap[red_name] + # Get the ir.Var for the reduction variable. + redvar = ir.Var(scope, red_name, loc) + # Get the type of the array that holds the per-thread + # reduction variables. + redarrvar_typ = redtyp_to_redarraytype(redvar_typ) + reddtype = redarrvar_typ.dtype + if config.DEBUG_ARRAY_OPT: + print( + "reduction_info", + red_name, + redvar_typ, + redarrvar_typ, + reddtype, + types.DType(reddtype), + num_threads_var, + type(num_threads_var) + ) + + # If this is reduction over an array, + # the reduction array has just one added per-worker dimension. + if isinstance(redvar_typ, types.npytypes.Array): + redarrdim = redvar_typ.ndim + 1 + else: + redarrdim = 1 + + # Reduction array is created and initialized to the initial reduction value. + + # First create a var for the numpy empty ufunc. + glbl_np_empty = pfbdr.bind_global_function( + fobj=np.empty, + ftype=get_np_ufunc_typ(np.empty), + args=( + types.UniTuple(types.intp, redarrdim), + ), + kws={'dtype': types.DType(reddtype)} + ) + + size_var_list = [num_threads_var] + + # If this is a reduction over an array... + if isinstance(redvar_typ, types.npytypes.Array): + # Add code to get the shape of the array being reduced over. + redshape_var = pfbdr.assign( + rhs=ir.Expr.getattr(redvar, "shape", loc), + typ=types.UniTuple(types.intp, redvar_typ.ndim), + name="redarr_shape", + ) + + # Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty. + for j in range(redvar_typ.ndim): + onedimvar = pfbdr.assign( + rhs=ir.Expr.static_getitem(redshape_var, j, None, loc), + typ=types.intp, + name="redshapeonedim", + ) + size_var_list.append(onedimvar) + + # Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads). + size_var = pfbdr.make_tuple_variable( + size_var_list, name='tuple_size_var', + ) + + # Resolve dtype + cval = pfbdr._typingctx.resolve_value_type(reddtype) + dt = pfbdr.make_const_variable(cval=cval, typ=types.DType(reddtype)) + # Add call to empty passing the size var tuple. + empty_call = pfbdr.call(glbl_np_empty, args=[size_var, dt]) + + redarr_var = pfbdr.assign( + rhs=empty_call, typ=redarrvar_typ, name="redarr", + ) + + # Remember mapping of original reduction array to the newly created per-worker reduction array. + redarrs[redvar.name] = redarr_var + to_cleanup.append(redarr_var) + + init_val = parfor_reddict[red_name].init_val + + if init_val is not None: + if isinstance(redvar_typ, types.npytypes.Array): + # Create an array of identity values for the reduction. + # First, create a variable for np.full. + full_func_node = pfbdr.bind_global_function( + fobj=np.full, + ftype=get_np_ufunc_typ(np.full), + args=( + types.UniTuple(types.intp, redvar_typ.ndim), + reddtype, + ), + kws={'dtype': types.DType(reddtype)}, + ) + + # Then create a var with the identify value. + init_val_var = pfbdr.make_const_variable( + cval=init_val, + typ=reddtype, + name="init_val", + ) + + # Then, call np.full with the shape of the reduction array and the identity value. + full_call = pfbdr.call( + full_func_node, args=[redshape_var, init_val_var, dt], + ) + + redtoset = pfbdr.assign( + rhs=full_call, + typ=redvar_typ, + name="redtoset", + ) + # rettoset is an array from np.full() and must be released + to_cleanup.append(redtoset) + else: + redtoset = pfbdr.make_const_variable( + cval=init_val, + typ=reddtype, + name="redtoset", + ) + else: + redtoset = redvar + + if config.DEBUG_ARRAY_OPT_RUNTIME: + res_print_str = "res_print1 for redvar " + str(redvar) + ":" + strconsttyp = types.StringLiteral(res_print_str) + + lhs = pfbdr.make_const_variable( + cval=res_print_str, + typ=strconsttyp, + name="str_const", + ) + + res_print = ir.Print(args=[lhs, redvar], + vararg=None, loc=loc) + lowerer.fndesc.calltypes[res_print] = signature(types.none, + typemap[lhs.name], + typemap[redvar.name]) + print("res_print_redvar", res_print) + lowerer.lower_inst(res_print) + + + # For each thread, initialize the per-worker reduction array to + # the current reduction array value. + + # Get the Numba type of the variable that holds the thread count. + num_thread_type = typemap[num_threads_var.name] + # Get the LLVM type of the thread count variable. + ntllvm_type = targetctx.get_value_type(num_thread_type) + # Create a LLVM variable to hold the loop index. + alloc_loop_var = cgutils.alloca_once(builder, ntllvm_type) + # Associate this LLVM variable to a Numba IR variable so that + # we can use setitem IR builder. + # Create a Numba IR variable. + numba_ir_loop_index_var = scope.redefine("$loop_index", loc) + # Give that variable the right type. + typemap[numba_ir_loop_index_var.name] = num_thread_type + # Associate this Numba variable to the LLVM variable in the + # lowerer's varmap. + lowerer.varmap[numba_ir_loop_index_var.name] = alloc_loop_var + # Insert a loop into the outputed LLVM that goes from 0 to + # the current thread count. + with cgutils.for_range(builder, lowerer.loadvar(num_threads_var.name), intp=ntllvm_type) as loop: + # Store the loop index into the alloca'd LLVM loop index variable. + builder.store(loop.index, alloc_loop_var) + # Initialize one element of the reduction array using the Numba + # IR variable associated with this loop's index. + pfbdr.setitem(obj=redarr_var, index=numba_ir_loop_index_var, val=redtoset) + + # compile parfor body as a separate function to be used with GUFuncWrapper + flags = parfor.flags.copy() + flags.error_model = "numpy" + # Can't get here unless flags.auto_parallel == ParallelOptions(True) + index_var_typ = typemap[parfor.loop_nests[0].index_variable.name] + # index variables should have the same type, check rest of indices + for l in parfor.loop_nests[1:]: + assert typemap[l.index_variable.name] == index_var_typ + numba.parfors.parfor.sequential_parfor_lowering = True + try: + (func, + func_args, + func_sig, + func_arg_types, + exp_name_to_tuple_var) = _create_gufunc_for_parfor_body( + lowerer, parfor, typemap, typingctx, targetctx, flags, {}, + bool(alias_map), index_var_typ, parfor.races) + finally: + numba.parfors.parfor.sequential_parfor_lowering = False + + # get the shape signature + func_args = ['sched'] + func_args + num_reductions = len(parfor_redvars) + num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions + if config.DEBUG_ARRAY_OPT: + print("func_args = ", func_args) + print("num_inputs = ", num_inputs) + print("parfor_outputs = ", parfor_output_arrays) + print("parfor_redvars = ", parfor_redvars) + print("num_reductions = ", num_reductions) + gu_signature = _create_shape_signature( + parfor.get_shape_classes, + num_inputs, + num_reductions, + func_args, + func_sig, + parfor.races, + typemap) + if config.DEBUG_ARRAY_OPT: + print("gu_signature = ", gu_signature) + + # call the func in parallel by wrapping it with ParallelGUFuncBuilder + loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests] + if config.DEBUG_ARRAY_OPT: + print("loop_nests = ", parfor.loop_nests) + print("loop_ranges = ", loop_ranges) + call_parallel_gufunc( + lowerer, + func, + gu_signature, + func_sig, + func_args, + func_arg_types, + loop_ranges, + parfor_redvars, + parfor_reddict, + redarrs, + parfor.init_block, + index_var_typ, + parfor.races, + exp_name_to_tuple_var) + + if nredvars > 0: + _parfor_lowering_finalize_reduction( + parfor, redarrs, lowerer, parfor_reddict, num_threads_var, + ) + + # Cleanup reduction variable + for v in to_cleanup: + lowerer.lower_inst(ir.Del(v.name, loc=loc)) + # Restore the original typemap of the function that was replaced temporarily at the + # Beginning of this function. + lowerer.fndesc.typemap = orig_typemap + + if config.DEBUG_ARRAY_OPT: + print("_lower_parfor_parallel done") + + +_ReductionInfo = make_dataclass( + "_ReductionInfo", + [ + "redvar_info", + "redvar_name", + "redvar_typ", + "redarr_var", + "redarr_typ", + "init_val", + ], + frozen=True, +) + + +def _parfor_lowering_finalize_reduction( + parfor, + redarrs, + lowerer, + parfor_reddict, + thread_count_var, + ): + """Emit code to finalize the reduction from the intermediate values of + each thread. + """ + # For each reduction variable + for redvar_name, redarr_var in redarrs.items(): + # Pseudo-code for this loop body: + # tmp = redarr[0] + # for i in range(1, thread_count): + # tmp = reduce_op(redarr[i], tmp) + # reduction_result = tmp + redvar_typ = lowerer.fndesc.typemap[redvar_name] + redarr_typ = lowerer.fndesc.typemap[redarr_var.name] + init_val = lowerer.loadvar(redvar_name) + + reduce_info = _ReductionInfo( + redvar_info = parfor_reddict[redvar_name], + redvar_name=redvar_name, + redvar_typ=redvar_typ, + redarr_var=redarr_var, + redarr_typ=redarr_typ, + init_val=init_val, + ) + # generate code for combining reduction variable with thread output + handler = (_lower_trivial_inplace_binops + if reduce_info.redvar_info.redop is not None + else _lower_non_trivial_reduce) + handler(parfor, lowerer, thread_count_var, reduce_info) + + +class ParforsUnexpectedReduceNodeError(InternalError): + def __init__(self, inst): + super().__init__(f"Unknown reduce instruction node: {inst}") + + +def _lower_trivial_inplace_binops(parfor, lowerer, thread_count_var, reduce_info): + """Lower trivial inplace-binop reduction. + """ + for inst in reduce_info.redvar_info.reduce_nodes: + # Var assigns to Var? + if _lower_var_to_var_assign(lowerer, inst): + pass + # Is inplace-binop for the reduction? + elif _is_right_op_and_rhs_is_init(inst, reduce_info.redvar_name, "inplace_binop"): + fn = inst.value.fn + redvar_result = _emit_binop_reduce_call( + fn, lowerer, thread_count_var, reduce_info, + ) + lowerer.storevar(redvar_result, name=inst.target.name) + # Is binop for the reduction? + elif _is_right_op_and_rhs_is_init(inst, reduce_info.redvar_name, "binop"): + fn = inst.value.fn + redvar_result = _emit_binop_reduce_call( + fn, lowerer, thread_count_var, reduce_info, + ) + lowerer.storevar(redvar_result, name=inst.target.name) + # Otherwise? + else: + raise ParforsUnexpectedReduceNodeError(inst) + + # XXX: This seems like a hack to stop the loop with this condition. + if _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst, + reduce_info.redvar_name): + break + if config.DEBUG_ARRAY_OPT_RUNTIME: + varname = reduce_info.redvar_name + lowerer.print_variable( + f"{parfor.loc}: parfor {fn.__name__} reduction {varname} =", + varname, + ) + + +def _lower_non_trivial_reduce(parfor, lowerer, thread_count_var, reduce_info): + """Lower non-trivial reduction such as call to `functools.reduce()`. + """ + init_name = f"{reduce_info.redvar_name}#init" + # The init_name variable is not defined at this point. + lowerer.fndesc.typemap.setdefault(init_name, reduce_info.redvar_typ) + # Emit a sequence of the reduction operation for each intermediate result + # of each thread. + num_thread_llval = lowerer.loadvar(thread_count_var.name) + with cgutils.for_range(lowerer.builder, num_thread_llval) as loop: + tid = loop.index + for inst in reduce_info.redvar_info.reduce_nodes: + # Var assigns to Var? + if _lower_var_to_var_assign(lowerer, inst): + pass + # The reduction operation? + elif (isinstance(inst, ir.Assign) + and any(var.name == init_name for var in inst.list_vars())): + elem = _emit_getitem_call(tid, lowerer, reduce_info) + lowerer.storevar(elem, init_name) + lowerer.lower_inst(inst) + + # Otherwise? + else: + raise ParforsUnexpectedReduceNodeError(inst) + + # XXX: This seems like a hack to stop the loop with this condition. + if _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst, + reduce_info.redvar_name): + break + + if config.DEBUG_ARRAY_OPT_RUNTIME: + varname = reduce_info.redvar_name + lowerer.print_variable( + f"{parfor.loc}: parfor non-trivial reduction {varname} =", + varname, + ) + +def _lower_var_to_var_assign(lowerer, inst): + """Lower Var->Var assignment. + + Returns True if-and-only-if `inst` is a Var->Var assignment. + """ + if isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Var): + loaded = lowerer.loadvar(inst.value.name) + lowerer.storevar(loaded, name=inst.target.name) + return True + return False + +def _emit_getitem_call(idx, lowerer, reduce_info): + """Emit call to ``redarr_var[idx]`` + """ + def reducer_getitem(redarr, index): + return redarr[index] + + builder = lowerer.builder + ctx = lowerer.context + redarr_typ = reduce_info.redarr_typ + arg_arr = lowerer.loadvar(reduce_info.redarr_var.name) + args = (arg_arr, idx) + sig = signature(reduce_info.redvar_typ, redarr_typ, types.intp) + elem = ctx.compile_internal(builder, reducer_getitem, sig, args) + return elem + + +def _emit_binop_reduce_call(binop, lowerer, thread_count_var, reduce_info): + """Emit call to the ``binop`` for the reduction variable. + """ + + def reduction_add(thread_count, redarr, init): + c = init + for i in range(thread_count): + c += redarr[i] + return c + + def reduction_mul(thread_count, redarr, init): + c = init + for i in range(thread_count): + c *= redarr[i] + return c + + kernel = { + operator.iadd: reduction_add, + operator.isub: reduction_add, + operator.add: reduction_add, + operator.sub: reduction_add, + operator.imul: reduction_mul, + operator.ifloordiv: reduction_mul, + operator.itruediv: reduction_mul, + operator.mul: reduction_mul, + operator.floordiv: reduction_mul, + operator.truediv: reduction_mul, + }[binop] + + ctx = lowerer.context + builder = lowerer.builder + redarr_typ = reduce_info.redarr_typ + arg_arr = lowerer.loadvar(reduce_info.redarr_var.name) + + if config.DEBUG_ARRAY_OPT_RUNTIME: + init_var = reduce_info.redarr_var.scope.get(reduce_info.redvar_name) + res_print = ir.Print( + args=[reduce_info.redarr_var, init_var], vararg=None, + loc=lowerer.loc, + ) + typemap = lowerer.fndesc.typemap + lowerer.fndesc.calltypes[res_print] = signature( + types.none, typemap[reduce_info.redarr_var.name], + typemap[init_var.name], + ) + lowerer.lower_inst(res_print) + + arg_thread_count = lowerer.loadvar(thread_count_var.name) + args = (arg_thread_count, arg_arr, reduce_info.init_val) + sig = signature( + reduce_info.redvar_typ, types.uintp, redarr_typ, reduce_info.redvar_typ, + ) + + redvar_result = ctx.compile_internal(builder, kernel, sig, args) + return redvar_result + + +def _is_right_op_and_rhs_is_init(inst, redvar_name, op): + """Is ``inst`` an inplace-binop and the RHS is the reduction init? + """ + if not isinstance(inst, ir.Assign): + return False + rhs = inst.value + if not isinstance(rhs, ir.Expr): + return False + if rhs.op != op: + return False + if rhs.rhs.name != f"{redvar_name}#init": + return False + return True + + +def _fix_redvar_name_ssa_mismatch(parfor, lowerer, inst, redvar_name): + """Fix reduction variable name mismatch due to SSA. + """ + # Only process reduction statements post-gufunc execution + # until we see an assignment with a left-hand side to the + # reduction variable's name. This fixes problems with + # cases where there are multiple assignments to the + # reduction variable in the parfor. + scope = parfor.init_block.scope + if isinstance(inst, ir.Assign): + try: + reduction_var = scope.get_exact(redvar_name) + except NotDefinedError: + # Ideally, this shouldn't happen. The redvar name + # missing from scope indicates an error from + # other rewrite passes. + is_same_source_var = redvar_name == inst.target.name + else: + # Because of SSA, the redvar and target var of + # the current assignment would be different even + # though they refer to the same source-level var. + redvar_unver_name = reduction_var.unversioned_name + target_unver_name = inst.target.unversioned_name + is_same_source_var = redvar_unver_name == target_unver_name + + if is_same_source_var: + # If redvar is different from target var, add an + # assignment to put target var into redvar. + if redvar_name != inst.target.name: + val = lowerer.loadvar(inst.target.name) + lowerer.storevar(val, name=redvar_name) + return True + + return False + +def _create_shape_signature( + get_shape_classes, + num_inputs, + num_reductions, + args, + func_sig, + races, + typemap): + '''Create shape signature for GUFunc + ''' + if config.DEBUG_ARRAY_OPT: + print("_create_shape_signature", num_inputs, num_reductions, args, races) + for i in args[1:]: + print("argument", i, type(i), get_shape_classes(i, typemap=typemap)) + + num_inouts = len(args) - num_reductions + # maximum class number for array shapes + classes = [get_shape_classes(var, typemap=typemap) if var not in races else (-1,) for var in args[1:]] + class_set = set() + for _class in classes: + if _class: + for i in _class: + class_set.add(i) + max_class = max(class_set) + 1 if class_set else 0 + classes.insert(0, (max_class,)) # force set the class of 'sched' argument + class_set.add(max_class) + thread_num_class = max_class + 1 + class_set.add(thread_num_class) + class_map = {} + # TODO: use prefix + class number instead of single char + alphabet = ord('a') + for n in class_set: + if n >= 0: + class_map[n] = chr(alphabet) + alphabet += 1 + threadcount_ordinal = chr(alphabet) + + alpha_dict = {'latest_alpha' : alphabet} + + def bump_alpha(c, class_map): + if c >= 0: + return class_map[c] + else: + alpha_dict['latest_alpha'] += 1 + return chr(alpha_dict['latest_alpha']) + + gu_sin = [] + gu_sout = [] + count = 0 + syms_sin = () + if config.DEBUG_ARRAY_OPT: + print("args", args) + print("classes", classes) + print("threadcount_ordinal", threadcount_ordinal) + for cls, arg in zip(classes, args): + count = count + 1 + if cls: + dim_syms = tuple(bump_alpha(c, class_map) for c in cls) + else: + dim_syms = () + if (count > num_inouts): + # Add the threadcount_ordinal to represent the thread count + # to the start of the reduction array. + gu_sin.append(tuple([threadcount_ordinal] + list(dim_syms[1:]))) + else: + gu_sin.append(dim_syms) + syms_sin += dim_syms + return (gu_sin, gu_sout) + +def _print_block(block): + for i, inst in enumerate(block.body): + print(" ", i, " ", inst) + +def _print_body(body_dict): + '''Pretty-print a set of IR blocks. + ''' + topo_order = wrap_find_topo(body_dict) + for label in topo_order: + block = body_dict[label] + print("label: ", label) + _print_block(block) + + +def wrap_loop_body(loop_body): + blocks = loop_body.copy() # shallow copy is enough + first_label = min(blocks.keys()) + last_label = max(blocks.keys()) + loc = blocks[last_label].loc + blocks[last_label].body.append(ir.Jump(first_label, loc)) + return blocks + +def unwrap_loop_body(loop_body): + last_label = max(loop_body.keys()) + loop_body[last_label].body = loop_body[last_label].body[:-1] + +def add_to_def_once_sets(a_def, def_once, def_more): + '''If the variable is already defined more than once, do nothing. + Else if defined exactly once previously then transition this + variable to the defined more than once set (remove it from + def_once set and add to def_more set). + Else this must be the first time we've seen this variable defined + so add to def_once set. + ''' + if a_def in def_more: + pass + elif a_def in def_once: + def_more.add(a_def) + def_once.remove(a_def) + else: + def_once.add(a_def) + +def compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns): + '''Effect changes to the set of variables defined once or more than once + for a single block. + block - the block to process + def_once - set of variable names known to be defined exactly once + def_more - set of variable names known to be defined more than once + getattr_taken - dict mapping variable name to tuple of object and attribute taken + module_assigns - dict mapping variable name to the Global that they came from + ''' + # The only "defs" occur in assignments, so find such instructions. + assignments = block.find_insts(ir.Assign) + # For each assignment... + for one_assign in assignments: + # Get the LHS/target of the assignment. + a_def = one_assign.target.name + # Add variable to def sets. + add_to_def_once_sets(a_def, def_once, def_more) + + rhs = one_assign.value + if isinstance(rhs, ir.Global): + # Remember assignments of the form "a = Global(...)" + # Is this a module? + if isinstance(rhs.value, pytypes.ModuleType): + module_assigns[a_def] = rhs.value.__name__ + if isinstance(rhs, ir.Expr) and rhs.op == 'getattr' and rhs.value.name in def_once: + # Remember assignments of the form "a = b.c" + getattr_taken[a_def] = (rhs.value.name, rhs.attr) + if isinstance(rhs, ir.Expr) and rhs.op == 'call' and rhs.func.name in getattr_taken: + # If "a" is being called then lookup the getattr definition of "a" + # as above, getting the module variable "b" (base_obj) + # and the attribute "c" (base_attr). + base_obj, base_attr = getattr_taken[rhs.func.name] + if base_obj in module_assigns: + # If we know the definition of the module variable then get the module + # name from module_assigns. + base_mod_name = module_assigns[base_obj] + if not is_const_call(base_mod_name, base_attr): + # Calling a method on an object could modify the object and is thus + # like a def of that object. We call is_const_call to see if this module/attribute + # combination is known to not modify the module state. If we don't know that + # the combination is safe then we have to assume there could be a modification to + # the module and thus add the module variable as defined more than once. + add_to_def_once_sets(base_obj, def_once, def_more) + else: + # Assume the worst and say that base_obj could be modified by the call. + add_to_def_once_sets(base_obj, def_once, def_more) + if isinstance(rhs, ir.Expr) and rhs.op == 'call': + # If a mutable object is passed to a function, then it may be changed and + # therefore can't be hoisted. + # For each argument to the function... + for argvar in rhs.args: + # Get the argument's type. + if isinstance(argvar, ir.Var): + argvar = argvar.name + avtype = typemap[argvar] + # If that type doesn't have a mutable attribute or it does and it's set to + # not mutable then this usage is safe for hoisting. + if getattr(avtype, 'mutable', False): + # Here we have a mutable variable passed to a function so add this variable + # to the def lists. + add_to_def_once_sets(argvar, def_once, def_more) + +def wrap_find_topo(loop_body): + blocks = wrap_loop_body(loop_body) + topo_order = find_topo_order(blocks) + unwrap_loop_body(loop_body) + return topo_order + +def compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns): + '''Compute the set of variables defined exactly once in the given set of blocks + and use the given sets for storing which variables are defined once, more than + once and which have had a getattr call on them. + ''' + # For each block in topological order... + topo_order = wrap_find_topo(loop_body) + for label in topo_order: + block = loop_body[label] + # Scan this block and effect changes to def_once, def_more, and getattr_taken + # based on the instructions in that block. + compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns) + # Have to recursively process parfors manually here. + for inst in block.body: + if isinstance(inst, parfor.Parfor): + # Recursively compute for the parfor's init block. + compute_def_once_block(inst.init_block, def_once, def_more, getattr_taken, typemap, module_assigns) + # Recursively compute for the parfor's loop body. + compute_def_once_internal(inst.loop_body, def_once, def_more, getattr_taken, typemap, module_assigns) + +def compute_def_once(loop_body, typemap): + '''Compute the set of variables defined exactly once in the given set of blocks. + ''' + def_once = set() # set to hold variables defined exactly once + def_more = set() # set to hold variables defined more than once + getattr_taken = {} + module_assigns = {} + compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns) + return def_once, def_more + +def find_vars(var, varset): + assert isinstance(var, ir.Var) + varset.add(var.name) + return var + +def _hoist_internal(inst, dep_on_param, call_table, hoisted, not_hoisted, + typemap, stored_arrays): + if inst.target.name in stored_arrays: + not_hoisted.append((inst, "stored array")) + if config.DEBUG_ARRAY_OPT >= 1: + print("Instruction", inst, "could not be hoisted because the created array is stored.") + return False + + target_type = typemap[inst.target.name] + + uses = set() + # Get vars used by this statement. + visit_vars_inner(inst.value, find_vars, uses) + # Filter out input parameters from the set of variable usages. + unhoistable = {assgn.target.name for assgn, _ in not_hoisted} + use_unhoist = uses & unhoistable + diff = uses.difference(dep_on_param) + diff |= use_unhoist + if config.DEBUG_ARRAY_OPT >= 1: + print("_hoist_internal:", inst, "uses:", uses, "diff:", diff) + if len(diff) == 0 and is_pure(inst.value, None, call_table): + if config.DEBUG_ARRAY_OPT >= 1: + print("Will hoist instruction", inst, target_type) + hoisted.append(inst) + if not isinstance(target_type, types.npytypes.Array): + dep_on_param += [inst.target.name] + return True + else: + if len(diff) > 0: + not_hoisted.append((inst, "dependency")) + if config.DEBUG_ARRAY_OPT >= 1: + print("Instruction", inst, "could not be hoisted because of a dependency.") + else: + not_hoisted.append((inst, "not pure")) + if config.DEBUG_ARRAY_OPT >= 1: + print("Instruction", inst, "could not be hoisted because it isn't pure.") + return False + +def find_setitems_block(setitems, itemsset, block, typemap): + for inst in block.body: + if isinstance(inst, (ir.StaticSetItem, ir.SetItem)): + setitems.add(inst.target.name) + # If we store a non-mutable object into an array then that is safe to hoist. + # If the stored object is mutable and you hoist then multiple entries in the + # outer array could reference the same object and changing one index would then + # change other indices. + if getattr(typemap[inst.value.name], "mutable", False): + itemsset.add(inst.value.name) + elif isinstance(inst, parfor.Parfor): + find_setitems_block(setitems, itemsset, inst.init_block, typemap) + find_setitems_body(setitems, itemsset, inst.loop_body, typemap) + elif isinstance(inst, ir.Assign): + # If something of mutable type is given to a build_tuple or + # used in a call then consider it unanalyzable and so + # unavailable for hoisting. + rhs = inst.value + def add_to_itemset(item): + assert isinstance(item, ir.Var), rhs + if getattr(typemap[item.name], "mutable", False): + itemsset.add(item.name) + + if isinstance(rhs, ir.Expr): + if rhs.op in ["build_tuple", "build_list", "build_set"]: + for item in rhs.items: + add_to_itemset(item) + elif rhs.op == "build_map": + for pair in rhs.items: + for item in pair: + add_to_itemset(item) + elif rhs.op == "call": + for item in list(rhs.args) + [x[1] for x in rhs.kws]: + add_to_itemset(item) + +def find_setitems_body(setitems, itemsset, loop_body, typemap): + """ + Find the arrays that are written into (goes into setitems) and the + mutable objects (mostly arrays) that are written into other arrays + (goes into itemsset). + """ + for label, block in loop_body.items(): + find_setitems_block(setitems, itemsset, block, typemap) + +def empty_container_allocator_hoist(inst, dep_on_param, call_table, hoisted, + not_hoisted, typemap, stored_arrays): + if (isinstance(inst, ir.Assign) and + isinstance(inst.value, ir.Expr) and + inst.value.op == 'call' and + inst.value.func.name in call_table): + call_list = call_table[inst.value.func.name] + if call_list == ['empty', np]: + return _hoist_internal(inst, dep_on_param, call_table, hoisted, + not_hoisted, typemap, stored_arrays) + return False + +def hoist(parfor_params, loop_body, typemap, wrapped_blocks): + dep_on_param = copy.copy(parfor_params) + hoisted = [] + not_hoisted = [] + + # Compute the set of variable defined exactly once in the loop body. + def_once, def_more = compute_def_once(loop_body, typemap) + (call_table, reverse_call_table) = get_call_table(wrapped_blocks) + + setitems = set() + itemsset = set() + find_setitems_body(setitems, itemsset, loop_body, typemap) + dep_on_param = list(set(dep_on_param).difference(setitems)) + if config.DEBUG_ARRAY_OPT >= 1: + print("hoist - def_once:", def_once, "setitems:", setitems, "itemsset:", itemsset, "dep_on_param:", dep_on_param, "parfor_params:", parfor_params) + for si in setitems: + add_to_def_once_sets(si, def_once, def_more) + + for label, block in loop_body.items(): + new_block = [] + for inst in block.body: + if empty_container_allocator_hoist(inst, dep_on_param, call_table, + hoisted, not_hoisted, typemap, itemsset): + continue + elif isinstance(inst, ir.Assign) and inst.target.name in def_once: + if _hoist_internal(inst, dep_on_param, call_table, + hoisted, not_hoisted, typemap, itemsset): + # don't add this instruction to the block since it is + # hoisted + continue + elif isinstance(inst, parfor.Parfor): + new_init_block = [] + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor") + inst.dump() + for ib_inst in inst.init_block.body: + if empty_container_allocator_hoist(ib_inst, dep_on_param, + call_table, hoisted, not_hoisted, typemap, itemsset): + continue + elif (isinstance(ib_inst, ir.Assign) and + ib_inst.target.name in def_once): + if _hoist_internal(ib_inst, dep_on_param, call_table, + hoisted, not_hoisted, typemap, + itemsset): + # don't add this instruction to the block since it is hoisted + continue + new_init_block.append(ib_inst) + inst.init_block.body = new_init_block + + new_block.append(inst) + block.body = new_block + return hoisted, not_hoisted + +def redtyp_is_scalar(redtype): + return not isinstance(redtype, types.npytypes.Array) + +def redtyp_to_redarraytype(redtyp): + """Go from a reducation variable type to a reduction array type used to hold + per-worker results. + """ + redarrdim = 1 + # If the reduction type is an array then allocate reduction array with ndim+1 dimensions. + if isinstance(redtyp, types.npytypes.Array): + redarrdim += redtyp.ndim + # We don't create array of array but multi-dimensional reduction array with same dtype. + redtyp = redtyp.dtype + return types.npytypes.Array(redtyp, redarrdim, "C") + +def redarraytype_to_sig(redarraytyp): + """Given a reduction array type, find the type of the reduction argument to the gufunc. + """ + assert isinstance(redarraytyp, types.npytypes.Array) + return types.npytypes.Array(redarraytyp.dtype, redarraytyp.ndim, redarraytyp.layout) + +def legalize_names_with_typemap(names, typemap): + """ We use ir_utils.legalize_names to replace internal IR variable names + containing illegal characters (e.g. period) with a legal character + (underscore) so as to create legal variable names. + The original variable names are in the typemap so we also + need to add the legalized name to the typemap as well. + """ + outdict = legalize_names(names) + # For each pair in the dict of legalized names... + for x, y in outdict.items(): + # If the name had some legalization change to it... + if x != y: + # Set the type of the new name the same as the type of the old name. + typemap[y] = typemap[x] + return outdict + +def to_scalar_from_0d(x): + if isinstance(x, types.ArrayCompatible): + if x.ndim == 0: + return x.dtype + return x + +def _create_gufunc_for_parfor_body( + lowerer, + parfor, + typemap, + typingctx, + targetctx, + flags, + locals, + has_aliases, + index_var_typ, + races): + ''' + Takes a parfor and creates a gufunc function for its body. + There are two parts to this function. + 1) Code to iterate across the iteration space as defined by the schedule. + 2) The parfor body that does the work for a single point in the iteration space. + Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point + in the IR where the parfor body should be added. + This Python text is 'exec'ed into existence and its IR retrieved with run_frontend. + The IR is scanned for the sentinel assignment where that basic block is split and the IR + for the parfor body inserted. + ''' + if config.DEBUG_ARRAY_OPT >= 1: + print("starting _create_gufunc_for_parfor_body") + + loc = parfor.init_block.loc + + # The parfor body and the main function body share ir.Var nodes. + # We have to do some replacements of Var names in the parfor body to make them + # legal parameter names. If we don't copy then the Vars in the main function also + # would incorrectly change their name. + loop_body = copy.copy(parfor.loop_body) + remove_dels(loop_body) + + parfor_dim = len(parfor.loop_nests) + loop_indices = [l.index_variable.name for l in parfor.loop_nests] + + # Get all the parfor params. + parfor_params = parfor.params + # Get just the outputs of the parfor. + parfor_outputs = numba.parfors.parfor.get_parfor_outputs(parfor, parfor_params) + # Get all parfor reduction vars, and operators. + typemap = lowerer.fndesc.typemap + parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions( + lowerer.func_ir, parfor, parfor_params, lowerer.fndesc.calltypes) + # Compute just the parfor inputs as a set difference. + parfor_inputs = sorted( + list( + set(parfor_params) - + set(parfor_outputs) - + set(parfor_redvars))) + + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor_params = ", parfor_params, " ", type(parfor_params)) + print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs)) + print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs)) + print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars)) + + # ------------------------------------------------------------------------- + # Convert tuples to individual parameters. + tuple_expanded_parfor_inputs = [] + tuple_var_to_expanded_names = {} + expanded_name_to_tuple_var = {} + next_expanded_tuple_var = 0 + parfor_tuple_params = [] + # For each input to the parfor. + for pi in parfor_inputs: + # Get the type of the input. + pi_type = typemap[pi] + # If it is a UniTuple or Tuple we will do the conversion. + if isinstance(pi_type, types.UniTuple) or isinstance(pi_type, types.NamedUniTuple): + # Get the size and dtype of the tuple. + tuple_count = pi_type.count + tuple_dtype = pi_type.dtype + # Only do tuples up to config.PARFOR_MAX_TUPLE_SIZE length. + assert(tuple_count <= config.PARFOR_MAX_TUPLE_SIZE) + this_var_expansion = [] + for i in range(tuple_count): + # Generate a new name for the individual part of the tuple var. + expanded_name = "expanded_tuple_var_" + str(next_expanded_tuple_var) + # Add that name to the new list of inputs to the gufunc. + tuple_expanded_parfor_inputs.append(expanded_name) + this_var_expansion.append(expanded_name) + # Remember a mapping from new param name to original tuple + # var and the index within the tuple. + expanded_name_to_tuple_var[expanded_name] = (pi, i) + next_expanded_tuple_var += 1 + # Set the type of the new parameter. + typemap[expanded_name] = tuple_dtype + # Remember a mapping from the original tuple var to the + # individual parts. + tuple_var_to_expanded_names[pi] = this_var_expansion + parfor_tuple_params.append(pi) + elif isinstance(pi_type, types.Tuple) or isinstance(pi_type, types.NamedTuple): + # This is the same as above for UniTuple except that each part of + # the tuple can have a different type and we fetch that type with + # pi_type.types[offset]. + tuple_count = pi_type.count + tuple_types = pi_type.types + # Only do tuples up to config.PARFOR_MAX_TUPLE_SIZE length. + assert(tuple_count <= config.PARFOR_MAX_TUPLE_SIZE) + this_var_expansion = [] + for i in range(tuple_count): + expanded_name = "expanded_tuple_var_" + str(next_expanded_tuple_var) + tuple_expanded_parfor_inputs.append(expanded_name) + this_var_expansion.append(expanded_name) + expanded_name_to_tuple_var[expanded_name] = (pi, i) + next_expanded_tuple_var += 1 + typemap[expanded_name] = tuple_types[i] + tuple_var_to_expanded_names[pi] = this_var_expansion + parfor_tuple_params.append(pi) + else: + tuple_expanded_parfor_inputs.append(pi) + parfor_inputs = tuple_expanded_parfor_inputs + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor_inputs post tuple handling = ", parfor_inputs, " ", type(parfor_inputs)) + # ------------------------------------------------------------------------- + + races = races.difference(set(parfor_redvars)) + for race in races: + msg = ("Variable %s used in parallel loop may be written " + "to simultaneously by multiple workers and may result " + "in non-deterministic or unintended results." % race) + warnings.warn(NumbaParallelSafetyWarning(msg, loc)) + replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes) + + # Reduction variables are represented as arrays, so they go under + # different names. + parfor_redarrs = [] + parfor_red_arg_types = [] + for var in parfor_redvars: + arr = var + "_arr" + parfor_redarrs.append(arr) + redarraytype = redtyp_to_redarraytype(typemap[var]) + parfor_red_arg_types.append(redarraytype) + redarrsig = redarraytype_to_sig(redarraytype) + if arr in typemap: + assert(typemap[arr] == redarrsig) + else: + typemap[arr] = redarrsig + + # Reorder all the params so that inputs go first then outputs. + parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs + + if config.DEBUG_ARRAY_OPT >= 1: + print("parfor_params = ", parfor_params, " ", type(parfor_params)) + print("loop_indices = ", loop_indices, " ", type(loop_indices)) + print("loop_body = ", loop_body, " ", type(loop_body)) + _print_body(loop_body) + + # Some Var are not legal parameter names so create a dict of potentially illegal + # param name to guaranteed legal name. + param_dict = legalize_names_with_typemap(parfor_params + parfor_redvars + parfor_tuple_params, typemap) + if config.DEBUG_ARRAY_OPT >= 1: + print( + "param_dict = ", + sorted( + param_dict.items()), + " ", + type(param_dict)) + + # Some loop_indices are not legal parameter names so create a dict of potentially illegal + # loop index to guaranteed legal name. + ind_dict = legalize_names_with_typemap(loop_indices, typemap) + # Compute a new list of legal loop index names. + legal_loop_indices = [ind_dict[v] for v in loop_indices] + if config.DEBUG_ARRAY_OPT >= 1: + print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict)) + print( + "legal_loop_indices = ", + legal_loop_indices, + " ", + type(legal_loop_indices)) + for pd in parfor_params: + print("pd = ", pd) + print("pd type = ", typemap[pd], " ", type(typemap[pd])) + + # Get the types of each parameter. + param_types = [to_scalar_from_0d(typemap[v]) for v in parfor_params] + # Calculate types of args passed to gufunc. + func_arg_types = [typemap[v] for v in (parfor_inputs + parfor_outputs)] + parfor_red_arg_types + if config.DEBUG_ARRAY_OPT >= 1: + print("new param_types:", param_types) + print("new func_arg_types:", func_arg_types) + + # Replace illegal parameter names in the loop body with legal ones. + replace_var_names(loop_body, param_dict) + # remember the name before legalizing as the actual arguments + parfor_args = parfor_params + # Change parfor_params to be legal names. + parfor_params = [param_dict[v] for v in parfor_params] + parfor_params_orig = parfor_params + + parfor_params = [] + ascontig = False + for pindex in range(len(parfor_params_orig)): + if (ascontig and + pindex < len(parfor_inputs) and + isinstance(param_types[pindex], types.npytypes.Array)): + parfor_params.append(parfor_params_orig[pindex]+"param") + else: + parfor_params.append(parfor_params_orig[pindex]) + + # Change parfor body to replace illegal loop index vars with legal ones. + replace_var_names(loop_body, ind_dict) + loop_body_var_table = get_name_var_table(loop_body) + sentinel_name = get_unused_var_name("__sentinel__", loop_body_var_table) + + if config.DEBUG_ARRAY_OPT >= 1: + print( + "legal parfor_params = ", + parfor_params, + " ", + type(parfor_params)) + + # Determine the unique names of the scheduling and gufunc functions. + # sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_")) + gufunc_name = "__numba_parfor_gufunc_%s" % ( + hex(hash(parfor)).replace("-", "_")) + if config.DEBUG_ARRAY_OPT: + # print("sched_func_name ", type(sched_func_name), " ", sched_func_name) + print("gufunc_name ", type(gufunc_name), " ", gufunc_name) + + gufunc_txt = "" + + # Create the gufunc function. + gufunc_txt += "def " + gufunc_name + \ + "(sched, " + (", ".join(parfor_params)) + "):\n" + + globls = {"np": np, "numba": numba} + + # First thing in the gufunc, we reconstruct tuples from their + # individual parts, e.g., orig_tup_name = (part1, part2,). + # The rest of the code of the function will use the original tuple name. + for tup_var, exp_names in tuple_var_to_expanded_names.items(): + tup_type = typemap[tup_var] + gufunc_txt += " " + param_dict[tup_var] + # Determine if the tuple is a named tuple. + if (isinstance(tup_type, types.NamedTuple) or + isinstance(tup_type, types.NamedUniTuple)): + named_tup = True + else: + named_tup = False + + if named_tup: + # It is a named tuple so try to find the global that defines the + # named tuple. + func_def = guard(get_definition, lowerer.func_ir, tup_var) + named_tuple_def = None + if config.DEBUG_ARRAY_OPT: + print("func_def:", func_def, type(func_def)) + if func_def is not None: + if (isinstance(func_def, ir.Expr) and + func_def.op == 'call'): + named_tuple_def = guard(get_definition, lowerer.func_ir, func_def.func) + if config.DEBUG_ARRAY_OPT: + print("named_tuple_def:", named_tuple_def, type(named_tuple_def)) + elif isinstance(func_def, ir.Arg): + named_tuple_def = typemap[func_def.name] + if config.DEBUG_ARRAY_OPT: + print("named_tuple_def:", named_tuple_def, + type(named_tuple_def), named_tuple_def.name) + if named_tuple_def is not None: + if (isinstance(named_tuple_def, ir.Global) or + isinstance(named_tuple_def, ir.FreeVar)): + gval = named_tuple_def.value + if config.DEBUG_ARRAY_OPT: + print("gval:", gval, type(gval)) + globls[named_tuple_def.name] = gval + elif isinstance(named_tuple_def, types.containers.BaseNamedTuple): + named_tuple_name = named_tuple_def.name.split('(')[0] + if config.DEBUG_ARRAY_OPT: + print("name:", named_tuple_name, + named_tuple_def.instance_class, + type(named_tuple_def.instance_class)) + globls[named_tuple_name] = named_tuple_def.instance_class + else: + if config.DEBUG_ARRAY_OPT: + print("Didn't find definition of namedtuple for globls.") + raise CompilerError("Could not find definition of " + str(tup_var), + tup_var.loc) + gufunc_txt += " = " + tup_type.instance_class.__name__ + "(" + for name, field_name in zip(exp_names, tup_type.fields): + gufunc_txt += field_name + "=" + param_dict[name] + "," + else: + # Just a regular tuple so use (part0, part1, ...) + gufunc_txt += " = (" + ", ".join([param_dict[x] for x in exp_names]) + if len(exp_names) == 1: + # Add comma for tuples with singular values. We can't unilaterally + # add a comma always because (,) isn't valid. + gufunc_txt += "," + + gufunc_txt += ")\n" + + for pindex in range(len(parfor_inputs)): + if ascontig and isinstance(param_types[pindex], types.npytypes.Array): + gufunc_txt += (" " + parfor_params_orig[pindex] + + " = np.ascontiguousarray(" + parfor_params[pindex] + ")\n") + + gufunc_thread_id_var = "ParallelAcceleratorGufuncThreadId" + + if len(parfor_redarrs) > 0: + gufunc_txt += " " + gufunc_thread_id_var + " = " + gufunc_txt += "numba.np.ufunc.parallel._iget_thread_id()\n" + + # Add initialization of reduction variables + for arr, var in zip(parfor_redarrs, parfor_redvars): + gufunc_txt += " " + param_dict[var] + \ + "=" + param_dict[arr] + "[" + gufunc_thread_id_var + "]\n" + if config.DEBUG_ARRAY_OPT_RUNTIME: + gufunc_txt += " print(\"thread id =\", ParallelAcceleratorGufuncThreadId)\n" + gufunc_txt += " print(\"initial reduction value\",ParallelAcceleratorGufuncThreadId," + param_dict[var] + "," + param_dict[var] + ".shape)\n" + gufunc_txt += " print(\"reduction array\",ParallelAcceleratorGufuncThreadId," + param_dict[arr] + "," + param_dict[arr] + ".shape)\n" + + # For each dimension of the parfor, create a for loop in the generated gufunc function. + # Iterate across the proper values extracted from the schedule. + # The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0, + # end_dim1, ..., end_dimN + for eachdim in range(parfor_dim): + for indent in range(eachdim + 1): + gufunc_txt += " " + sched_dim = eachdim + gufunc_txt += ("for " + + legal_loop_indices[eachdim] + + " in range(sched[" + + str(sched_dim) + + "], sched[" + + str(sched_dim + + parfor_dim) + + "] + np.uint8(1)):\n") + + if config.DEBUG_ARRAY_OPT_RUNTIME: + for indent in range(parfor_dim + 1): + gufunc_txt += " " + gufunc_txt += "print(" + for eachdim in range(parfor_dim): + gufunc_txt += "\"" + legal_loop_indices[eachdim] + "\"," + legal_loop_indices[eachdim] + "," + gufunc_txt += ")\n" + + # Add the sentinel assignment so that we can find the loop body position + # in the IR. + for indent in range(parfor_dim + 1): + gufunc_txt += " " + gufunc_txt += sentinel_name + " = 0\n" + # Add assignments of reduction variables (for returning the value) + for arr, var in zip(parfor_redarrs, parfor_redvars): + if config.DEBUG_ARRAY_OPT_RUNTIME: + gufunc_txt += " print(\"final reduction value\",ParallelAcceleratorGufuncThreadId," + param_dict[var] + ")\n" + gufunc_txt += " print(\"final reduction array\",ParallelAcceleratorGufuncThreadId," + param_dict[arr] + ")\n" + # After the gufunc loops, copy the accumulated temp value back to reduction array. + gufunc_txt += " " + param_dict[arr] + \ + "[" + gufunc_thread_id_var + "] = " + param_dict[var] + "\n" + gufunc_txt += " return None\n" + + if config.DEBUG_ARRAY_OPT: + print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt) + print("globls:", globls, type(globls)) + # Force gufunc outline into existence. + locls = {} + exec(gufunc_txt, globls, locls) + gufunc_func = locls[gufunc_name] + + if config.DEBUG_ARRAY_OPT: + print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func) + # Get the IR for the gufunc outline. + gufunc_ir = compiler.run_frontend(gufunc_func) + if config.DEBUG_ARRAY_OPT: + print("gufunc_ir dump ", type(gufunc_ir)) + gufunc_ir.dump() + print("loop_body dump ", type(loop_body)) + _print_body(loop_body) + + # rename all variables in gufunc_ir afresh + var_table = get_name_var_table(gufunc_ir.blocks) + new_var_dict = {} + reserved_names = [sentinel_name] + \ + list(param_dict.values()) + legal_loop_indices + for name, var in var_table.items(): + if not (name in reserved_names): + new_var_dict[name] = parfor.init_block.scope.redefine(name, loc).name + replace_var_names(gufunc_ir.blocks, new_var_dict) + if config.DEBUG_ARRAY_OPT: + print("gufunc_ir dump after renaming ") + gufunc_ir.dump() + gufunc_param_types = [types.npytypes.Array( + index_var_typ, 1, "C")] + param_types + if config.DEBUG_ARRAY_OPT: + print( + "gufunc_param_types = ", + type(gufunc_param_types), + "\n", + gufunc_param_types) + + gufunc_stub_last_label = find_max_label(gufunc_ir.blocks) + 1 + + # Add gufunc stub last label to each parfor.loop_body label to prevent + # label conflicts. + loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label) + # new label for splitting sentinel block + new_label = find_max_label(loop_body) + 1 + + # If enabled, add a print statement after every assignment. + if config.DEBUG_ARRAY_OPT_RUNTIME: + for label, block in loop_body.items(): + new_block = block.copy() + new_block.clear() + loc = block.loc + scope = block.scope + for inst in block.body: + new_block.append(inst) + # Append print after assignment + if isinstance(inst, ir.Assign): + # Only apply to numbers + if typemap[inst.target.name] not in types.number_domain: + continue + + # Make constant string + strval = "{} =".format(inst.target.name) + strconsttyp = types.StringLiteral(strval) + + lhs = scope.redefine("str_const", loc) + # lhs = ir.Var(scope, mk_unique_var("str_const"), loc) + assign_lhs = ir.Assign(value=ir.Const(value=strval, loc=loc), + target=lhs, loc=loc) + typemap[lhs.name] = strconsttyp + new_block.append(assign_lhs) + + # Make print node + print_node = ir.Print(args=[lhs, inst.target], vararg=None, loc=loc) + new_block.append(print_node) + sig = numba.core.typing.signature(types.none, + typemap[lhs.name], + typemap[inst.target.name]) + lowerer.fndesc.calltypes[print_node] = sig + loop_body[label] = new_block + + if config.DEBUG_ARRAY_OPT: + print("parfor loop body") + _print_body(loop_body) + + wrapped_blocks = wrap_loop_body(loop_body) + hoisted, not_hoisted = hoist(parfor_params, loop_body, typemap, wrapped_blocks) + start_block = gufunc_ir.blocks[min(gufunc_ir.blocks.keys())] + start_block.body = start_block.body[:-1] + hoisted + [start_block.body[-1]] + unwrap_loop_body(loop_body) + + # store hoisted into diagnostics + diagnostics = lowerer.metadata['parfor_diagnostics'] + diagnostics.hoist_info[parfor.id] = {'hoisted': hoisted, + 'not_hoisted': not_hoisted} + + if config.DEBUG_ARRAY_OPT: + print("After hoisting") + _print_body(loop_body) + + # Search all the block in the gufunc outline for the sentinel assignment. + for label, block in gufunc_ir.blocks.items(): + for i, inst in enumerate(block.body): + if isinstance( + inst, + ir.Assign) and inst.target.name == sentinel_name: + # We found the sentinel assignment. + loc = inst.loc + scope = block.scope + # split block across __sentinel__ + # A new block is allocated for the statements prior to the sentinel + # but the new block maintains the current block label. + prev_block = ir.Block(scope, loc) + prev_block.body = block.body[:i] + # The current block is used for statements after the sentinel. + block.body = block.body[i + 1:] + # But the current block gets a new label. + body_first_label = min(loop_body.keys()) + + # The previous block jumps to the minimum labelled block of the + # parfor body. + prev_block.append(ir.Jump(body_first_label, loc)) + # Add all the parfor loop body blocks to the gufunc function's + # IR. + for (l, b) in loop_body.items(): + gufunc_ir.blocks[l] = transfer_scope(b, scope) + body_last_label = max(loop_body.keys()) + gufunc_ir.blocks[new_label] = block + gufunc_ir.blocks[label] = prev_block + # Add a jump from the last parfor body block to the block containing + # statements after the sentinel. + gufunc_ir.blocks[body_last_label].append( + ir.Jump(new_label, loc)) + break + else: + continue + break + + if config.DEBUG_ARRAY_OPT: + print("gufunc_ir last dump before renaming") + gufunc_ir.dump() + + gufunc_ir.blocks = rename_labels(gufunc_ir.blocks) + remove_dels(gufunc_ir.blocks) + + if config.DEBUG_ARRAY_OPT: + print("gufunc_ir last dump") + gufunc_ir.dump() + print("flags", flags) + print("typemap", typemap) + + old_alias = flags.noalias + if not has_aliases: + if config.DEBUG_ARRAY_OPT: + print("No aliases found so adding noalias flag.") + flags.noalias = True + + fixup_var_define_in_scope(gufunc_ir.blocks) + + class ParforGufuncCompiler(compiler.CompilerBase): + def define_pipelines(self): + from numba.core.compiler_machinery import PassManager + dpb = compiler.DefaultPassBuilder + pm = PassManager("full_parfor_gufunc") + parfor_gufunc_passes = dpb.define_parfor_gufunc_pipeline(self.state) + pm.passes.extend(parfor_gufunc_passes.passes) + lowering_passes = dpb.define_parfor_gufunc_nopython_lowering_pipeline(self.state) + pm.passes.extend(lowering_passes.passes) + + pm.finalize() + return [pm] + + kernel_func = compiler.compile_ir( + typingctx, + targetctx, + gufunc_ir, + gufunc_param_types, + types.none, + flags, + locals, + pipeline_class=ParforGufuncCompiler) + + flags.noalias = old_alias + + kernel_sig = signature(types.none, *gufunc_param_types) + if config.DEBUG_ARRAY_OPT: + print("finished create_gufunc_for_parfor_body. kernel_sig = ", kernel_sig) + + return kernel_func, parfor_args, kernel_sig, func_arg_types, expanded_name_to_tuple_var + +def replace_var_with_array_in_block(vars, block, typemap, calltypes): + new_block = [] + for inst in block.body: + if isinstance(inst, ir.Assign) and inst.target.name in vars: + loc = inst.loc + scope = inst.target.scope + + const_node = ir.Const(0, loc) + const_var = scope.redefine("$const_ind_0", loc) + typemap[const_var.name] = types.uintp + const_assign = ir.Assign(const_node, const_var, loc) + new_block.append(const_assign) + + val_var = scope.redefine("$val", loc) + typemap[val_var.name] = typemap[inst.target.name] + new_block.append(ir.Assign(inst.value, val_var, loc)) + setitem_node = ir.SetItem(inst.target, const_var, val_var, loc) + calltypes[setitem_node] = signature( + types.none, types.npytypes.Array(typemap[inst.target.name], 1, "C"), types.intp, typemap[inst.target.name]) + new_block.append(setitem_node) + continue + elif isinstance(inst, parfor.Parfor): + replace_var_with_array_internal(vars, {0: inst.init_block}, typemap, calltypes) + replace_var_with_array_internal(vars, inst.loop_body, typemap, calltypes) + + new_block.append(inst) + return new_block + +def replace_var_with_array_internal(vars, loop_body, typemap, calltypes): + for label, block in loop_body.items(): + block.body = replace_var_with_array_in_block(vars, block, typemap, calltypes) + +def replace_var_with_array(vars, loop_body, typemap, calltypes): + replace_var_with_array_internal(vars, loop_body, typemap, calltypes) + for v in vars: + el_typ = typemap[v] + typemap.pop(v, None) + typemap[v] = types.npytypes.Array(el_typ, 1, "C") + +def call_parallel_gufunc(lowerer, cres, gu_signature, outer_sig, expr_args, expr_arg_types, + loop_ranges, redvars, reddict, redarrdict, init_block, index_var_typ, races, + exp_name_to_tuple_var): + ''' + Adds the call to the gufunc function from the main function. + ''' + context = lowerer.context + builder = lowerer.builder + + from numba.np.ufunc.parallel import (build_gufunc_wrapper, + _launch_threads) + + if config.DEBUG_ARRAY_OPT: + print("make_parallel_loop") + print("outer_sig = ", outer_sig.args, outer_sig.return_type, + outer_sig.recvr, outer_sig.pysig) + print("loop_ranges = ", loop_ranges) + print("expr_args", expr_args) + print("expr_arg_types", expr_arg_types) + print("gu_signature", gu_signature) + + # Build the wrapper for GUFunc + args, return_type = sigutils.normalize_signature(outer_sig) + llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name) + sin, sout = gu_signature + + # These are necessary for build_gufunc_wrapper to find external symbols + _launch_threads() + + info = build_gufunc_wrapper(llvm_func, cres, sin, sout, + cache=False, is_parfors=True) + wrapper_name = info.name + cres.library._ensure_finalized() + + if config.DEBUG_ARRAY_OPT: + print("parallel function = ", wrapper_name, cres) + + # loadvars for loop_ranges + def load_range(v): + if isinstance(v, ir.Var): + return lowerer.loadvar(v.name) + else: + return context.get_constant(types.uintp, v) + + num_dim = len(loop_ranges) + for i in range(num_dim): + start, stop, step = loop_ranges[i] + start = load_range(start) + stop = load_range(stop) + assert(step == 1) # We do not support loop steps other than 1 + step = load_range(step) + loop_ranges[i] = (start, stop, step) + + if config.DEBUG_ARRAY_OPT: + print("call_parallel_gufunc loop_ranges[{}] = ".format(i), start, + stop, step) + cgutils.printf(builder, "loop range[{}]: %d %d (%d)\n".format(i), + start, stop, step) + + # Commonly used LLVM types and constants + byte_t = llvmlite.ir.IntType(8) + byte_ptr_t = llvmlite.ir.PointerType(byte_t) + byte_ptr_ptr_t = llvmlite.ir.PointerType(byte_ptr_t) + intp_t = context.get_value_type(types.intp) + uintp_t = context.get_value_type(types.uintp) + intp_ptr_t = llvmlite.ir.PointerType(intp_t) + intp_ptr_ptr_t = llvmlite.ir.PointerType(intp_ptr_t) + uintp_ptr_t = llvmlite.ir.PointerType(uintp_t) + uintp_ptr_ptr_t = llvmlite.ir.PointerType(uintp_ptr_t) + zero = context.get_constant(types.uintp, 0) + one = context.get_constant(types.uintp, 1) + one_type = one.type + sizeof_intp = context.get_abi_sizeof(intp_t) + + # Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature + expr_args.pop(0) + sched_sig = sin.pop(0) + + if config.DEBUG_ARRAY_OPT: + print("Parfor has potentially negative start", index_var_typ.signed) + + if index_var_typ.signed: + sched_type = intp_t + sched_ptr_type = intp_ptr_t + sched_ptr_ptr_type = intp_ptr_ptr_t + else: + sched_type = uintp_t + sched_ptr_type = uintp_ptr_t + sched_ptr_ptr_type = uintp_ptr_ptr_t + + # Call do_scheduling with appropriate arguments + dim_starts = cgutils.alloca_once( + builder, sched_type, size=context.get_constant( + types.uintp, num_dim), name="dim_starts") + dim_stops = cgutils.alloca_once( + builder, sched_type, size=context.get_constant( + types.uintp, num_dim), name="dim_stops") + for i in range(num_dim): + start, stop, step = loop_ranges[i] + if start.type != one_type: + start = builder.sext(start, one_type) + if stop.type != one_type: + stop = builder.sext(stop, one_type) + if step.type != one_type: + step = builder.sext(step, one_type) + # substract 1 because do-scheduling takes inclusive ranges + stop = builder.sub(stop, one) + builder.store( + start, builder.gep( + dim_starts, [ + context.get_constant( + types.uintp, i)])) + builder.store(stop, builder.gep(dim_stops, + [context.get_constant(types.uintp, i)])) + + # Prepare to call get/set parallel_chunksize and get the number of threads. + get_chunksize = cgutils.get_or_insert_function( + builder.module, + llvmlite.ir.FunctionType(uintp_t, []), + name="get_parallel_chunksize") + + set_chunksize = cgutils.get_or_insert_function( + builder.module, + llvmlite.ir.FunctionType(llvmlite.ir.VoidType(), [uintp_t]), + name="set_parallel_chunksize") + + get_num_threads = cgutils.get_or_insert_function( + builder.module, + llvmlite.ir.FunctionType(llvmlite.ir.IntType(types.intp.bitwidth), []), + "get_num_threads") + + # Get the current number of threads. + num_threads = builder.call(get_num_threads, []) + # Get the current chunksize so we can use it and restore the value later. + current_chunksize = builder.call(get_chunksize, []) + + with cgutils.if_unlikely(builder, builder.icmp_signed('<=', num_threads, + num_threads.type(0))): + cgutils.printf(builder, "num_threads: %d\n", num_threads) + context.call_conv.return_user_exc(builder, RuntimeError, + ("Invalid number of threads. " + "This likely indicates a bug in Numba.",)) + + # Call get_sched_size from gufunc_scheduler.cpp that incorporates the size of the work, + # the number of threads and the selected chunk size. This will tell us how many entries + # in the schedule we will need. + get_sched_size_fnty = llvmlite.ir.FunctionType(uintp_t, [uintp_t, uintp_t, intp_ptr_t, intp_ptr_t]) + get_sched_size = cgutils.get_or_insert_function( + builder.module, + get_sched_size_fnty, + name="get_sched_size") + num_divisions = builder.call(get_sched_size, [num_threads, + context.get_constant(types.uintp, num_dim), + dim_starts, + dim_stops]) + # Set the chunksize to zero so that any nested calls get the default chunk size behavior. + builder.call(set_chunksize, [zero]) + + # Each entry in the schedule is 2 times the number of dimensions long. + multiplier = context.get_constant(types.uintp, num_dim * 2) + # Compute the total number of entries in the schedule. + sched_size = builder.mul(num_divisions, multiplier) + + # Prepare to dynamically allocate memory to hold the schedule. + alloc_sched_fnty = llvmlite.ir.FunctionType(sched_ptr_type, [uintp_t]) + alloc_sched_func = cgutils.get_or_insert_function( + builder.module, + alloc_sched_fnty, + name="allocate_sched") + # Call gufunc_scheduler.cpp to allocate the schedule. + # This may or may not do pooling. + alloc_space = builder.call(alloc_sched_func, [sched_size]) + # Allocate a slot in the entry block to store the schedule pointer. + sched = cgutils.alloca_once(builder, sched_ptr_type) + # Store the schedule pointer into that slot. + builder.store(alloc_space, sched) + + debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0 + scheduling_fnty = llvmlite.ir.FunctionType( + intp_ptr_t, [uintp_t, intp_ptr_t, intp_ptr_t, uintp_t, sched_ptr_type, intp_t]) + if index_var_typ.signed: + do_scheduling = cgutils.get_or_insert_function(builder.module, + scheduling_fnty, + name="do_scheduling_signed") + else: + do_scheduling = cgutils.get_or_insert_function(builder.module, + scheduling_fnty, + name="do_scheduling_unsigned") + + # Call the scheduling routine that decides how to break up the work. + builder.call( + do_scheduling, [ + context.get_constant( + types.uintp, num_dim), dim_starts, dim_stops, num_divisions, + builder.load(sched), context.get_constant( + types.intp, debug_flag)]) + + # Get the LLVM vars for the Numba IR reduction array vars. + redarrs = [lowerer.loadvar(redarrdict[x].name) for x in redvars] + + nredvars = len(redvars) + ninouts = len(expr_args) - nredvars + + def load_potential_tuple_var(x): + """Given a variable name, if that variable is not a new name + introduced as the extracted part of a tuple then just return + the variable loaded from its name. However, if the variable + does represent part of a tuple, as recognized by the name of + the variable being present in the exp_name_to_tuple_var dict, + then we load the original tuple var instead that we get from + the dict and then extract the corresponding element of the + tuple, also stored and returned to use in the dict (i.e., offset). + """ + if x in exp_name_to_tuple_var: + orig_tup, offset = exp_name_to_tuple_var[x] + tup_var = lowerer.loadvar(orig_tup) + res = builder.extract_value(tup_var, offset) + return res + else: + return lowerer.loadvar(x) + + # ---------------------------------------------------------------------------- + # Prepare arguments: args, shapes, steps, data + all_args = [load_potential_tuple_var(x) for x in expr_args[:ninouts]] + redarrs + num_args = len(all_args) + num_inps = len(sin) + 1 + args = cgutils.alloca_once( + builder, + byte_ptr_t, + size=context.get_constant( + types.intp, + 1 + num_args), + name="pargs") + array_strides = [] + # sched goes first + builder.store(builder.bitcast(builder.load(sched), byte_ptr_t), args) + array_strides.append(context.get_constant(types.intp, sizeof_intp)) + rv_to_arg_dict = {} + # followed by other arguments + for i in range(num_args): + arg = all_args[i] + var = expr_args[i] + aty = expr_arg_types[i] + dst = builder.gep(args, [context.get_constant(types.intp, i + 1)]) + if i >= ninouts: # reduction variables + ary = context.make_array(aty)(context, builder, arg) + strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim) + # Start from 1 because we skip the first dimension of length num_threads just like sched. + for j in range(len(strides)): + array_strides.append(strides[j]) + builder.store(builder.bitcast(ary.data, byte_ptr_t), dst) + elif isinstance(aty, types.ArrayCompatible): + if var in races: + typ = (context.get_data_type(aty.dtype) + if aty.dtype != types.boolean + else llvmlite.ir.IntType(1)) + + rv_arg = cgutils.alloca_once(builder, typ) + builder.store(arg, rv_arg) + builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst) + rv_to_arg_dict[var] = (arg, rv_arg) + + array_strides.append(context.get_constant(types.intp, context.get_abi_sizeof(typ))) + else: + ary = context.make_array(aty)(context, builder, arg) + strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim) + for j in range(len(strides)): + array_strides.append(strides[j]) + builder.store(builder.bitcast(ary.data, byte_ptr_t), dst) + else: + if i < num_inps: + # Scalar input, need to store the value in an array of size 1 + if isinstance(aty, types.Optional): + # Unpack optional type + unpacked_aty = aty.type + arg = context.cast(builder, arg, aty, unpacked_aty) + else: + unpacked_aty = aty + typ = (context.get_data_type(unpacked_aty) + if not isinstance(unpacked_aty, types.Boolean) + else llvmlite.ir.IntType(1)) + ptr = cgutils.alloca_once(builder, typ) + builder.store(arg, ptr) + else: + # Scalar output, must allocate + typ = (context.get_data_type(aty) + if not isinstance(aty, types.Boolean) + else llvmlite.ir.IntType(1)) + ptr = cgutils.alloca_once(builder, typ) + builder.store(builder.bitcast(ptr, byte_ptr_t), dst) + + # ---------------------------------------------------------------------------- + # Next, we prepare the individual dimension info recorded in gu_signature + sig_dim_dict = {} + occurrences = [] + occurrences = [sched_sig[0]] + sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim) + assert len(expr_args) == len(all_args) + assert len(expr_args) == len(expr_arg_types) + assert len(expr_args) == len(sin + sout) + assert len(expr_args) == len(outer_sig.args[1:]) + for var, arg, aty, gu_sig in zip(expr_args, all_args, + expr_arg_types, sin + sout): + if isinstance(aty, types.npytypes.Array): + i = aty.ndim - len(gu_sig) + else: + i = 0 + if config.DEBUG_ARRAY_OPT: + print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i) + + for dim_sym in gu_sig: + if config.DEBUG_ARRAY_OPT: + print("var = ", var, " type = ", aty) + if var in races: + sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1) + else: + ary = context.make_array(aty)(context, builder, arg) + shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim) + sig_dim_dict[dim_sym] = shapes[i] + + if not (dim_sym in occurrences): + if config.DEBUG_ARRAY_OPT: + print("dim_sym = ", dim_sym, ", i = ", i) + cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym]) + occurrences.append(dim_sym) + i = i + 1 + + # ---------------------------------------------------------------------------- + # Prepare shapes, which is a single number (outer loop size), followed by + # the size of individual shape variables. + nshapes = len(sig_dim_dict) + 1 + shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape") + # For now, outer loop size is the same as number of threads + builder.store(num_divisions, shapes) + # Individual shape variables go next + i = 1 + for dim_sym in occurrences: + if config.DEBUG_ARRAY_OPT: + cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym]) + builder.store( + sig_dim_dict[dim_sym], builder.gep( + shapes, [ + context.get_constant( + types.intp, i)])) + i = i + 1 + + # ---------------------------------------------------------------------------- + # Prepare steps for each argument. Note that all steps are counted in + # bytes. + num_steps = num_args + 1 + len(array_strides) + steps = cgutils.alloca_once( + builder, intp_t, size=context.get_constant( + types.intp, num_steps), name="psteps") + # First goes the step size for sched, which is 2 * num_dim + builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp), + steps) + # The steps for all others are 0, except for reduction results. + for i in range(num_args): + # steps are strides from one thread to the next + stepsize = zero + + dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)]) + builder.store(stepsize, dst) + for j in range(len(array_strides)): + dst = builder.gep( + steps, [ + context.get_constant( + types.intp, 1 + num_args + j)]) + builder.store(array_strides[j], dst) + + # ---------------------------------------------------------------------------- + # prepare data + data = cgutils.get_null_value(byte_ptr_t) + + fnty = llvmlite.ir.FunctionType(llvmlite.ir.VoidType(), + [byte_ptr_ptr_t, intp_ptr_t, + intp_ptr_t, byte_ptr_t]) + + fn = cgutils.get_or_insert_function(builder.module, fnty, wrapper_name) + context.active_code_library.add_linking_library(info.library) + + if config.DEBUG_ARRAY_OPT: + cgutils.printf(builder, "before calling kernel %p\n", fn) + builder.call(fn, [args, shapes, steps, data]) + if config.DEBUG_ARRAY_OPT: + cgutils.printf(builder, "after calling kernel %p\n", fn) + + builder.call(set_chunksize, [current_chunksize]) + + # Deallocate the schedule's memory. + dealloc_sched_fnty = llvmlite.ir.FunctionType(llvmlite.ir.VoidType(), [sched_ptr_type]) + dealloc_sched_func = cgutils.get_or_insert_function( + builder.module, + dealloc_sched_fnty, + name="deallocate_sched") + builder.call(dealloc_sched_func, [builder.load(sched)]) + + for k, v in rv_to_arg_dict.items(): + arg, rv_arg = v + only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)]) + builder.store(builder.load(only_elem_ptr), lowerer.getvar(k)) + + context.active_code_library.add_linking_library(cres.library) diff --git a/venv/lib/python3.10/site-packages/numba/parfors/parfor_lowering_utils.py b/venv/lib/python3.10/site-packages/numba/parfors/parfor_lowering_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b30b81b64be9e2eee86e1ad9ffd06ac7f00dd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/parfors/parfor_lowering_utils.py @@ -0,0 +1,212 @@ +from collections import namedtuple + +from numba.core import types, ir +from numba.core.typing import signature + + +_CallableNode = namedtuple("BoundFunc", ["func", "sig"]) + + +class ParforLoweringBuilder: + """Helper class for building Numba-IR and lowering for Parfor. + """ + def __init__(self, lowerer, scope, loc): + self._lowerer = lowerer + self._scope = scope + self._loc = loc + + @property + def _context(self): + return self._lowerer.context + + @property + def _typingctx(self): + return self._context.typing_context + + @property + def _typemap(self): + return self._lowerer.fndesc.typemap + + @property + def _calltypes(self): + return self._lowerer.fndesc.calltypes + + def bind_global_function(self, fobj, ftype, args, kws={}): + """Binds a global function to a variable. + + Parameters + ---------- + fobj : object + The function to be bound. + ftype : types.Type + args : Sequence[types.Type] + kws : Mapping[str, types.Type] + + Returns + ------- + callable: _CallableNode + """ + loc = self._loc + varname = f"{fobj.__name__}_func" + gvname = f"{fobj.__name__}" + + func_sig = self._typingctx.resolve_function_type(ftype, args, kws) + func_var = self.assign( + rhs=ir.Global(gvname, fobj, loc=loc), typ=ftype, name=varname + ) + return _CallableNode(func=func_var, sig=func_sig) + + def make_const_variable(self, cval, typ, name="pf_const") -> ir.Var: + """Makes a constant variable + + Parameters + ---------- + cval : object + The constant value + typ : types.Type + type of the value + name : str + variable name to store to + + Returns + ------- + res : ir.Var + """ + return self.assign( + rhs=ir.Const(cval, loc=self._loc), typ=typ, name=name + ) + + def make_tuple_variable(self, varlist, name="pf_tuple") -> ir.Var: + """Makes a tuple variable + + Parameters + ---------- + varlist : Sequence[ir.Var] + Variables containing the values to be stored. + name : str + variable name to store to + + Returns + ------- + res : ir.Var + """ + loc = self._loc + vartys = [self._typemap[x.name] for x in varlist] + tupty = types.Tuple.from_types(vartys) + return self.assign( + rhs=ir.Expr.build_tuple(varlist, loc), typ=tupty, name=name + ) + + def assign(self, rhs, typ, name="pf_assign") -> ir.Var: + """Assign a value to a new variable + + Parameters + ---------- + rhs : object + The value + typ : types.Type + type of the value + name : str + variable name to store to + + Returns + ------- + res : ir.Var + """ + loc = self._loc + var = self._scope.redefine(name, loc) + self._typemap[var.name] = typ + assign = ir.Assign(rhs, var, loc) + self._lowerer.lower_inst(assign) + return var + + def assign_inplace(self, rhs, typ, name) -> ir.Var: + """Assign a value to a new variable or inplace if it already exist + + Parameters + ---------- + rhs : object + The value + typ : types.Type + type of the value + name : str + variable name to store to + + Returns + ------- + res : ir.Var + """ + loc = self._loc + var = ir.Var(self._scope, name, loc) + assign = ir.Assign(rhs, var, loc) + self._typemap.setdefault(var.name, typ) + self._lowerer.lower_inst(assign) + return var + + def call(self, callable_node, args, kws={}) -> ir.Expr: + """Call a bound callable + + Parameters + ---------- + callable_node : _CallableNode + The callee + args : Sequence[ir.Var] + kws : Mapping[str, ir.Var] + + Returns + ------- + res : ir.Expr + The expression node for the return value of the call + """ + call = ir.Expr.call(callable_node.func, args, kws, loc=self._loc) + self._calltypes[call] = callable_node.sig + return call + + def setitem(self, obj, index, val) -> ir.SetItem: + """Makes a setitem call + + Parameters + ---------- + obj : ir.Var + the object being indexed + index : ir.Var + the index + val : ir.Var + the value to be stored + + Returns + ------- + res : ir.SetItem + """ + loc = self._loc + tm = self._typemap + setitem = ir.SetItem(obj, index, val, loc=loc) + self._lowerer.fndesc.calltypes[setitem] = signature( + types.none, tm[obj.name], tm[index.name], tm[val.name] + ) + self._lowerer.lower_inst(setitem) + return setitem + + def getitem(self, obj, index, typ) -> ir.Expr: + """Makes a getitem call + + Parameters + ---------- + obj : ir.Var + the object being indexed + index : ir.Var + the index + val : ir.Var + the ty + + Returns + ------- + res : ir.Expr + the retrieved value + """ + tm = self._typemap + getitem = ir.Expr.getitem(obj, index, loc=self._loc) + self._lowerer.fndesc.calltypes[getitem] = signature( + typ, tm[obj.name], tm[index.name], + ) + return getitem diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__init__.py b/venv/lib/python3.10/site-packages/numba/pycc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c797fecd1c3863e7e26375aa69835dc82f297e25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/__init__.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +import warnings +from numba.core.errors import NumbaPendingDeprecationWarning +# The pycc module requires setuptools. +try: + import setuptools +except ImportError: + msg = "The 'setuptools' package is required at runtime for pycc support." + raise ImportError(msg) + +# Public API +from .cc import CC +from .decorators import export, exportmany + +# If use of anything is attempted through the `pycc` import path this warning +# will be shown. +__pycc_deprecation_doc_url = ("https://numba.readthedocs.io/en/stable/" + "reference/deprecation.html" + "#deprecation-of-the-numba-pycc-module") +__pycc_pending_deprecation_message = ("The 'pycc' module is pending " + "deprecation. Replacement technology is " + "being developed.\n\n" + "Pending Deprecation in Numba 0.57.0. " + "For more information please see: " + f"{__pycc_deprecation_doc_url}") + +_pend_dep = NumbaPendingDeprecationWarning(__pycc_pending_deprecation_message) +warnings.warn(_pend_dep, stacklevel=2) diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5625c39c4e763b9548f11f4529c9f7171e4173e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/cc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/cc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1821a12dcf3f08342dbd409c47b8cf3cceffebb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/cc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/compiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/compiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a657263ba765aa90c96a1c180dad7cdee482d41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/compiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/decorators.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9acf74e212f31a709adceee728993bb62098c562 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/decorators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/llvm_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/llvm_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5cc6cdd2ac471e424cfd8561b172a5a6fef66a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/llvm_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/platform.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/platform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a9f222abf250dd7723b03955b2a60c8a5a47509 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/pycc/__pycache__/platform.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/pycc/cc.py b/venv/lib/python3.10/site-packages/numba/pycc/cc.py new file mode 100644 index 0000000000000000000000000000000000000000..15a27c1bb156c415ee1b176fbe416c676bf3a243 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/cc.py @@ -0,0 +1,309 @@ +from setuptools import distutils as dutils +from setuptools.command import build_ext +from setuptools.extension import Extension + +import os +import shutil +import sys +import tempfile + +from numba.core import typing, sigutils +from numba.core.compiler_lock import global_compiler_lock +from numba.pycc.compiler import ModuleCompiler, ExportEntry +from numba.pycc.platform import Toolchain +from numba import cext + + +dir_util = dutils.dir_util +log = dutils.log +extension_libs = cext.get_extension_libs() + + +class CC(object): + """ + An ahead-of-time compiler to create extension modules that don't + depend on Numba. + """ + + # NOTE: using ccache can speed up repetitive builds + # (especially for the mixin modules) + + _mixin_sources = ['modulemixin.c',] + extension_libs + + # -flto strips all unused helper functions, which 1) makes the + # produced output much smaller and 2) can make the linking step faster. + # (the Windows linker seems to do this by default, judging by the results) + + _extra_cflags = { + # Comment out due to odd behavior with GCC 4.9+ with LTO + # 'posix': ['-flto'], + } + + _extra_ldflags = { + # Comment out due to odd behavior with GCC 4.9+ with LTO + # 'posix': ['-flto'], + } + + def __init__(self, extension_name, source_module=None): + if '.' in extension_name: + raise ValueError("basename should be a simple module name, not " + "qualified name") + + self._basename = extension_name + self._init_function = 'pycc_init_' + extension_name + self._exported_functions = {} + # Resolve source module name and directory + f = sys._getframe(1) + if source_module is None: + dct = f.f_globals + source_module = dct['__name__'] + elif hasattr(source_module, '__name__'): + dct = source_module.__dict__ + source_module = source_module.__name__ + else: + dct = sys.modules[source_module].__dict__ + + self._source_path = dct.get('__file__', '') + self._source_module = source_module + self._toolchain = Toolchain() + self._verbose = False + # By default, output in directory of caller module + self._output_dir = os.path.dirname(self._source_path) + self._output_file = self._toolchain.get_ext_filename(extension_name) + self._use_nrt = True + self._target_cpu = '' + + @property + def name(self): + """ + The name of the extension module to create. + """ + return self._basename + + @property + def output_file(self): + """ + The specific output file (a DLL) that will be generated. + """ + return self._output_file + + @output_file.setter + def output_file(self, value): + self._output_file = value + + @property + def output_dir(self): + """ + The directory the output file will be put in. + """ + return self._output_dir + + @output_dir.setter + def output_dir(self, value): + self._output_dir = value + + @property + def use_nrt(self): + return self._use_nrt + + @use_nrt.setter + def use_nrt(self, value): + self._use_nrt = value + + @property + def target_cpu(self): + """ + The target CPU model for code generation. + """ + return self._target_cpu + + @target_cpu.setter + def target_cpu(self, value): + self._target_cpu = value + + @property + def verbose(self): + """ + Whether to display detailed information when compiling. + """ + return self._verbose + + @verbose.setter + def verbose(self, value): + self._verbose = value + + def export(self, exported_name, sig): + """ + Mark a function for exporting in the extension module. + """ + fn_args, fn_retty = sigutils.normalize_signature(sig) + sig = typing.signature(fn_retty, *fn_args) + if exported_name in self._exported_functions: + raise KeyError("duplicated export symbol %s" % (exported_name)) + + def decorator(func): + entry = ExportEntry(exported_name, sig, func) + self._exported_functions[exported_name] = entry + return func + + return decorator + + @property + def _export_entries(self): + return sorted(self._exported_functions.values(), + key=lambda entry: entry.symbol) + + def _get_mixin_sources(self): + here = os.path.dirname(__file__) + mixin_sources = self._mixin_sources[:] + if self._use_nrt: + mixin_sources.append('../core/runtime/nrt.cpp') + return [os.path.join(here, f) for f in mixin_sources] + + def _get_mixin_defines(self): + # Macro definitions required by modulemixin.c + return [ + ('PYCC_MODULE_NAME', self._basename), + ('PYCC_USE_NRT', int(self._use_nrt)), + ] + + def _get_extra_cflags(self): + extra_cflags = self._extra_cflags.get(sys.platform, []) + if not extra_cflags: + extra_cflags = self._extra_cflags.get(os.name, []) + return extra_cflags + + def _get_extra_ldflags(self): + extra_ldflags = self._extra_ldflags.get(sys.platform, []) + if not extra_ldflags: + extra_ldflags = self._extra_ldflags.get(os.name, []) + # helperlib uses pthread on linux. make sure we are linking to it. + if sys.platform.startswith("linux"): + if "-pthread" not in extra_ldflags: + extra_ldflags.append('-pthread') + return extra_ldflags + + def _compile_mixins(self, build_dir): + sources = self._get_mixin_sources() + macros = self._get_mixin_defines() + include_dirs = self._toolchain.get_python_include_dirs() + + extra_cflags = self._get_extra_cflags() + # XXX distutils creates a whole subtree inside build_dir, + # e.g. /tmp/test_pycc/home/antoine/numba/numba/pycc/modulemixin.o + objects = self._toolchain.compile_objects(sources, build_dir, + include_dirs=include_dirs, + macros=macros, + extra_cflags=extra_cflags) + return objects + + @global_compiler_lock + def _compile_object_files(self, build_dir): + compiler = ModuleCompiler(self._export_entries, self._basename, + self._use_nrt, cpu_name=self._target_cpu) + compiler.external_init_function = self._init_function + temp_obj = os.path.join(build_dir, + os.path.splitext(self._output_file)[0] + '.o') + log.info("generating LLVM code for '%s' into %s", + self._basename, temp_obj) + compiler.write_native_object(temp_obj, wrap=True) + return [temp_obj], compiler.dll_exports + + @global_compiler_lock + def compile(self): + """ + Compile the extension module. + """ + self._toolchain.verbose = self.verbose + build_dir = tempfile.mkdtemp(prefix='pycc-build-%s-' % self._basename) + + # Compile object file + objects, dll_exports = self._compile_object_files(build_dir) + + # Compile mixins + objects += self._compile_mixins(build_dir) + + # Then create shared library + extra_ldflags = self._get_extra_ldflags() + output_dll = os.path.join(self._output_dir, self._output_file) + libraries = self._toolchain.get_python_libraries() + library_dirs = self._toolchain.get_python_library_dirs() + self._toolchain.link_shared(output_dll, objects, + libraries, library_dirs, + export_symbols=dll_exports, + extra_ldflags=extra_ldflags) + + shutil.rmtree(build_dir) + + def distutils_extension(self, **kwargs): + """ + Create a distutils extension object that can be used in your + setup.py. + """ + macros = kwargs.pop('macros', []) + self._get_mixin_defines() + depends = kwargs.pop('depends', []) + [self._source_path] + extra_compile_args = (kwargs.pop('extra_compile_args', []) + + self._get_extra_cflags()) + extra_link_args = (kwargs.pop('extra_link_args', []) + + self._get_extra_ldflags()) + include_dirs = (kwargs.pop('include_dirs', []) + + self._toolchain.get_python_include_dirs()) + libraries = (kwargs.pop('libraries', []) + + self._toolchain.get_python_libraries()) + library_dirs = (kwargs.pop('library_dirs', []) + + self._toolchain.get_python_library_dirs()) + python_package_path = self._source_module[:self._source_module.rfind('.')+1] + + ext = _CCExtension(name=python_package_path + self._basename, + sources=self._get_mixin_sources(), + depends=depends, + define_macros=macros, + include_dirs=include_dirs, + libraries=libraries, + library_dirs=library_dirs, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + **kwargs) + ext.monkey_patch_distutils() + ext._cc = self + return ext + + +class _CCExtension(Extension): + """ + A Numba-specific Extension subclass to LLVM-compile pure Python code + to an extension module. + """ + + _cc = None + _distutils_monkey_patched = False + + def _prepare_object_files(self, build_ext): + cc = self._cc + dir_util.mkpath(os.path.join(build_ext.build_temp, *self.name.split('.')[:-1])) + objects, _ = cc._compile_object_files(build_ext.build_temp) + # Add generated object files for linking + self.extra_objects = objects + + @classmethod + def monkey_patch_distutils(cls): + """ + Monkey-patch distutils with our own build_ext class knowing + about pycc-compiled extensions modules. + """ + if cls._distutils_monkey_patched: + return + + _orig_build_ext = build_ext.build_ext + + class _CC_build_ext(_orig_build_ext): + + def build_extension(self, ext): + if isinstance(ext, _CCExtension): + ext._prepare_object_files(self) + + _orig_build_ext.build_extension(self, ext) + + build_ext.build_ext = _CC_build_ext + + cls._distutils_monkey_patched = True diff --git a/venv/lib/python3.10/site-packages/numba/pycc/compiler.py b/venv/lib/python3.10/site-packages/numba/pycc/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ad60943fcb3807ae3339db41ee78de1ec905c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/compiler.py @@ -0,0 +1,477 @@ +# -*- coding: utf-8 -*- + +import logging +import os +import sys + +from llvmlite import ir +from llvmlite.binding import Linkage + +from numba.pycc import llvm_types as lt +from numba.core.cgutils import create_constant_array +from numba.core.compiler import compile_extra, Flags +from numba.core.compiler_lock import global_compiler_lock + +from numba.core.registry import cpu_target +from numba.core.runtime import nrtdynmod +from numba.core import cgutils + + +logger = logging.getLogger(__name__) + +__all__ = ['Compiler'] + +NULL = ir.Constant(lt._void_star, None) +ZERO = ir.Constant(lt._int32, 0) +ONE = ir.Constant(lt._int32, 1) +METH_VARARGS_AND_KEYWORDS = ir.Constant(lt._int32, 1|2) + + +def get_header(): + import numpy + import textwrap + + return textwrap.dedent("""\ + #include + + #ifndef HAVE_LONGDOUBLE + #define HAVE_LONGDOUBLE %d + #endif + + typedef struct { + float real; + float imag; + } complex64; + + typedef struct { + double real; + double imag; + } complex128; + + #if HAVE_LONGDOUBLE + typedef struct { + long double real; + long double imag; + } complex256; + #endif + + typedef float float32; + typedef double float64; + #if HAVE_LONGDOUBLE + typedef long double float128; + #endif + """ % hasattr(numpy, 'complex256')) + + +class ExportEntry(object): + """ + A simple record for exporting symbols. + """ + + def __init__(self, symbol, signature, function): + self.symbol = symbol + self.signature = signature + self.function = function + + def __repr__(self): + return "ExportEntry(%r, %r)" % (self.symbol, self.signature) + + +class _ModuleCompiler(object): + """A base class to compile Python modules to a single shared library or + extension module. + + :param export_entries: a list of ExportEntry instances. + :param module_name: the name of the exported module. + """ + + #: Structure used to describe a method of an extension type. + #: struct PyMethodDef { + #: const char *ml_name; /* The name of the built-in function/method */ + #: PyCFunction ml_meth; /* The C function that implements it */ + #: int ml_flags; /* Combination of METH_xxx flags, which mostly + #: describe the args expected by the C func */ + #: const char *ml_doc; /* The __doc__ attribute, or NULL */ + #: }; + method_def_ty = ir.LiteralStructType((lt._int8_star, + lt._void_star, + lt._int32, + lt._int8_star)) + + method_def_ptr = ir.PointerType(method_def_ty) + # The structure type constructed by PythonAPI.serialize_uncached() + # when updating this, also make sure to update `env_def_t` struct in + # numba/pycc/modulemixin.c + env_def_ty = ir.LiteralStructType((lt._void_star, + lt._int32, + lt._void_star, + lt._void_star, + lt._int32)) + env_def_ptr = ir.PointerType(env_def_ty) + + def __init__(self, export_entries, module_name, use_nrt=False, + **aot_options): + self.module_name = module_name + self.export_python_wrap = False + self.dll_exports = [] + self.export_entries = export_entries + # Used by the CC API but not the legacy API + self.external_init_function = None + self.use_nrt = use_nrt + + self.typing_context = cpu_target.typing_context + self.context = cpu_target.target_context.with_aot_codegen( + self.module_name, **aot_options) + + def _mangle_method_symbol(self, func_name): + return "._pycc_method_%s" % (func_name,) + + def _emit_python_wrapper(self, llvm_module): + """Emit generated Python wrapper and extension module code. + """ + raise NotImplementedError + + @global_compiler_lock + def _cull_exports(self): + """Read all the exported functions/modules in the translator + environment, and join them into a single LLVM module. + """ + self.exported_function_types = {} + self.function_environments = {} + self.environment_gvs = {} + + codegen = self.context.codegen() + library = codegen.create_library(self.module_name) + + # Generate IR for all exported functions + flags = Flags() + flags.no_compile = True + if not self.export_python_wrap: + flags.no_cpython_wrapper = True + flags.no_cfunc_wrapper = True + if self.use_nrt: + flags.nrt = True + # Compile NRT helpers + nrt_module, _ = nrtdynmod.create_nrt_module(self.context) + library.add_ir_module(nrt_module) + + for entry in self.export_entries: + cres = compile_extra(self.typing_context, self.context, + entry.function, + entry.signature.args, + entry.signature.return_type, flags, + locals={}, library=library) + + func_name = cres.fndesc.llvm_func_name + llvm_func = cres.library.get_function(func_name) + + if self.export_python_wrap: + llvm_func.linkage = 'internal' + wrappername = cres.fndesc.llvm_cpython_wrapper_name + wrapper = cres.library.get_function(wrappername) + wrapper.name = self._mangle_method_symbol(entry.symbol) + wrapper.linkage = 'external' + fnty = cres.target_context.call_conv.get_function_type( + cres.fndesc.restype, cres.fndesc.argtypes) + self.exported_function_types[entry] = fnty + self.function_environments[entry] = cres.environment + self.environment_gvs[entry] = cres.fndesc.env_name + else: + llvm_func.name = entry.symbol + self.dll_exports.append(entry.symbol) + + if self.export_python_wrap: + wrapper_module = library.create_ir_module("wrapper") + self._emit_python_wrapper(wrapper_module) + library.add_ir_module(wrapper_module) + + # Hide all functions in the DLL except those explicitly exported + library.finalize() + for fn in library.get_defined_functions(): + if fn.name not in self.dll_exports: + if fn.linkage in {Linkage.private, Linkage.internal}: + # Private/Internal linkage must have "default" visibility + fn.visibility = "default" + else: + fn.visibility = 'hidden' + return library + + def write_llvm_bitcode(self, output, wrap=False, **kws): + self.export_python_wrap = wrap + library = self._cull_exports() + with open(output, 'wb') as fout: + fout.write(library.emit_bitcode()) + + def write_native_object(self, output, wrap=False, **kws): + self.export_python_wrap = wrap + library = self._cull_exports() + with open(output, 'wb') as fout: + fout.write(library.emit_native_object()) + + def emit_type(self, tyobj): + ret_val = str(tyobj) + if 'int' in ret_val: + if ret_val.endswith(('8', '16', '32', '64')): + ret_val += "_t" + return ret_val + + def emit_header(self, output): + fname, ext = os.path.splitext(output) + with open(fname + '.h', 'w') as fout: + fout.write(get_header()) + fout.write("\n/* Prototypes */\n") + for export_entry in self.export_entries: + name = export_entry.symbol + restype = self.emit_type(export_entry.signature.return_type) + args = ", ".join(self.emit_type(argtype) + for argtype in export_entry.signature.args) + fout.write("extern %s %s(%s);\n" % (restype, name, args)) + + def _emit_method_array(self, llvm_module): + """ + Collect exported methods and emit a PyMethodDef array. + + :returns: a pointer to the PyMethodDef array. + """ + method_defs = [] + for entry in self.export_entries: + name = entry.symbol + llvm_func_name = self._mangle_method_symbol(name) + fnty = self.exported_function_types[entry] + lfunc = ir.Function(llvm_module, fnty, llvm_func_name) + + method_name = self.context.insert_const_string(llvm_module, name) + method_def_const = ir.Constant.literal_struct( + (method_name, + ir.Constant.bitcast(lfunc, lt._void_star), + METH_VARARGS_AND_KEYWORDS, + NULL)) + method_defs.append(method_def_const) + + sentinel = ir.Constant.literal_struct([NULL, NULL, ZERO, NULL]) + method_defs.append(sentinel) + method_array_init = create_constant_array(self.method_def_ty, method_defs) + method_array = cgutils.add_global_variable(llvm_module, + method_array_init.type, + '.module_methods') + method_array.initializer = method_array_init + method_array.linkage = 'internal' + method_array_ptr = ir.Constant.gep(method_array, [ZERO, ZERO]) + return method_array_ptr + + def _emit_environment_array(self, llvm_module, builder, pyapi): + """ + Emit an array of env_def_t structures (see modulemixin.c) + storing the pickled environment constants for each of the + exported functions. + """ + env_defs = [] + for entry in self.export_entries: + env = self.function_environments[entry] + # Constants may be unhashable so avoid trying to cache them + env_def = pyapi.serialize_uncached(env.consts) + env_defs.append(env_def) + env_defs_init = create_constant_array(self.env_def_ty, env_defs) + gv = self.context.insert_unique_const(llvm_module, + '.module_environments', + env_defs_init) + return gv.gep([ZERO, ZERO]) + + def _emit_envgvs_array(self, llvm_module, builder, pyapi): + """ + Emit an array of Environment pointers that needs to be filled at + initialization. + """ + env_setters = [] + for entry in self.export_entries: + envgv_name = self.environment_gvs[entry] + gv = self.context.declare_env_global(llvm_module, envgv_name) + envgv = gv.bitcast(lt._void_star) + env_setters.append(envgv) + + env_setters_init = create_constant_array(lt._void_star, env_setters) + gv = self.context.insert_unique_const(llvm_module, + '.module_envgvs', + env_setters_init) + return gv.gep([ZERO, ZERO]) + + def _emit_module_init_code(self, llvm_module, builder, modobj, + method_array, env_array, envgv_array): + """ + Emit call to "external" init function, if any. + """ + if self.external_init_function: + fnty = ir.FunctionType(lt._int32, + [modobj.type, self.method_def_ptr, + self.env_def_ptr, envgv_array.type]) + fn = ir.Function(llvm_module, fnty, self.external_init_function) + return builder.call(fn, [modobj, method_array, env_array, + envgv_array]) + else: + return None + + +class ModuleCompiler(_ModuleCompiler): + + _ptr_fun = lambda ret, *args: ir.PointerType(ir.FunctionType(ret, args)) + + #: typedef int (*visitproc)(PyObject *, void *); + visitproc_ty = _ptr_fun(lt._int8, + lt._pyobject_head_p) + + #: typedef int (*inquiry)(PyObject *); + inquiry_ty = _ptr_fun(lt._int8, + lt._pyobject_head_p) + + #: typedef int (*traverseproc)(PyObject *, visitproc, void *); + traverseproc_ty = _ptr_fun(lt._int8, + lt._pyobject_head_p, + visitproc_ty, + lt._void_star) + + # typedef void (*freefunc)(void *) + freefunc_ty = _ptr_fun(lt._int8, + lt._void_star) + + # PyObject* (*m_init)(void); + m_init_ty = _ptr_fun(lt._int8) + + _char_star = lt._int8_star + + #: typedef struct PyModuleDef_Base { + #: PyObject_HEAD + #: PyObject* (*m_init)(void); + #: Py_ssize_t m_index; + #: PyObject* m_copy; + #: } PyModuleDef_Base; + module_def_base_ty = ir.LiteralStructType( + ( + lt._pyobject_head, + m_init_ty, + lt._llvm_py_ssize_t, + lt._pyobject_head_p + )) + + #: This struct holds all information that is needed to create a module object. + #: typedef struct PyModuleDef{ + #: PyModuleDef_Base m_base; + #: const char* m_name; + #: const char* m_doc; + #: Py_ssize_t m_size; + #: PyMethodDef *m_methods; + #: inquiry m_reload; + #: traverseproc m_traverse; + #: inquiry m_clear; + #: freefunc m_free; + #: }PyModuleDef; + module_def_ty = ir.LiteralStructType( + ( + module_def_base_ty, + _char_star, + _char_star, + lt._llvm_py_ssize_t, + _ModuleCompiler.method_def_ptr, + inquiry_ty, + traverseproc_ty, + inquiry_ty, + freefunc_ty + )) + + @property + def module_create_definition(self): + """ + Return the signature and name of the Python C API function to + initialize the module. + """ + signature = ir.FunctionType(lt._pyobject_head_p, + (ir.PointerType(self.module_def_ty), + lt._int32)) + + name = "PyModule_Create2" + if lt._trace_refs_: + name += "TraceRefs" + + return signature, name + + @property + def module_init_definition(self): + """ + Return the name and signature of the module's initialization function. + """ + signature = ir.FunctionType(lt._pyobject_head_p, ()) + + return signature, "PyInit_" + self.module_name + + def _emit_python_wrapper(self, llvm_module): + # Figure out the Python C API module creation function, and + # get a LLVM function for it. + create_module_fn = ir.Function(llvm_module, *self.module_create_definition) + create_module_fn.linkage = 'external' + + # Define a constant string for the module name. + mod_name_const = self.context.insert_const_string(llvm_module, + self.module_name) + + mod_def_base_init = ir.Constant.literal_struct( + (lt._pyobject_head_init, # PyObject_HEAD + ir.Constant(self.m_init_ty, None), # m_init + ir.Constant(lt._llvm_py_ssize_t, None), # m_index + ir.Constant(lt._pyobject_head_p, None), # m_copy + ) + ) + mod_def_base = cgutils.add_global_variable(llvm_module, + mod_def_base_init.type, + '.module_def_base') + mod_def_base.initializer = mod_def_base_init + mod_def_base.linkage = 'internal' + + method_array = self._emit_method_array(llvm_module) + + mod_def_init = ir.Constant.literal_struct( + (mod_def_base_init, # m_base + mod_name_const, # m_name + ir.Constant(self._char_star, None), # m_doc + ir.Constant(lt._llvm_py_ssize_t, -1), # m_size + method_array, # m_methods + ir.Constant(self.inquiry_ty, None), # m_reload + ir.Constant(self.traverseproc_ty, None), # m_traverse + ir.Constant(self.inquiry_ty, None), # m_clear + ir.Constant(self.freefunc_ty, None) # m_free + ) + ) + + # Define a constant string for the module name. + mod_def = cgutils.add_global_variable(llvm_module, mod_def_init.type, + '.module_def') + mod_def.initializer = mod_def_init + mod_def.linkage = 'internal' + + # Define the module initialization function. + mod_init_fn = ir.Function(llvm_module, *self.module_init_definition) + entry = mod_init_fn.append_basic_block('Entry') + builder = ir.IRBuilder(entry) + pyapi = self.context.get_python_api(builder) + + mod = builder.call(create_module_fn, + (mod_def, + ir.Constant(lt._int32, sys.api_version))) + + # Test if module has been created correctly. + # (XXX for some reason comparing with the NULL constant fails llvm + # with an assertion in pydebug mode) + with builder.if_then(cgutils.is_null(builder, mod)): + builder.ret(NULL.bitcast(mod_init_fn.type.pointee.return_type)) + + env_array = self._emit_environment_array(llvm_module, builder, pyapi) + envgv_array = self._emit_envgvs_array(llvm_module, builder, pyapi) + ret = self._emit_module_init_code(llvm_module, builder, mod, + method_array, env_array, envgv_array) + if ret is not None: + with builder.if_then(cgutils.is_not_null(builder, ret)): + # Init function errored out + builder.ret(ir.Constant(mod.type, None)) + + builder.ret(mod) + + self.dll_exports.append(mod_init_fn.name) + diff --git a/venv/lib/python3.10/site-packages/numba/pycc/decorators.py b/venv/lib/python3.10/site-packages/numba/pycc/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..34278903dec5335b0aaa37063a19afc0e5f65865 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/decorators.py @@ -0,0 +1,72 @@ +import re +import warnings + +from numba.core import typing, sigutils +from numba.pycc.compiler import ExportEntry + +# Registry is okay to be a global because we are using pycc as a standalone +# commandline tool. +export_registry = [] + + +def export(prototype): + warnings.warn("export() is deprecated, use the numba.pycc.CC API instead", + DeprecationWarning, stacklevel=2) + + sym, sig = parse_prototype(prototype) + + def wrappped(func): + fn_argtys, fn_retty = sigutils.normalize_signature(sig) + signature = typing.signature(fn_retty, *fn_argtys) + entry = ExportEntry(symbol=sym, signature=signature, function=func) + export_registry.append(entry) + + return wrappped + + +def exportmany(prototypes): + warnings.warn("exportmany() is deprecated, use the numba.pycc.CC API instead", + DeprecationWarning, stacklevel=2) + + def wrapped(func): + for proto in prototypes: + export(proto)(func) + return wrapped + + +def process_input_files(inputs): + """ + Read input source files for execution of legacy @export / @exportmany + decorators. + """ + for ifile in inputs: + with open(ifile) as fin: + exec(compile(fin.read(), ifile, 'exec')) + + +def clear_export_registry(): + export_registry[:] = [] + + +# --------------------------------- Internal --------------------------------- + +re_symbol = re.compile(r'[_a-z][_a-z0-9]*', re.I) + + +def parse_prototype(text): + """Separate the symbol and function-type in a a string with + "symbol function-type" (e.g. "mult float(float, float)") + + Returns + --------- + (symbol_string, functype_string) + """ + m = re_symbol.match(text) + if not m: + raise ValueError("Invalid function name for export prototype") + s = m.start(0) + e = m.end(0) + symbol = text[s:e] + functype = text[e + 1:] + return symbol, functype + diff --git a/venv/lib/python3.10/site-packages/numba/pycc/llvm_types.py b/venv/lib/python3.10/site-packages/numba/pycc/llvm_types.py new file mode 100644 index 0000000000000000000000000000000000000000..4259ced6e6f696425530c250aba947043aa852ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/llvm_types.py @@ -0,0 +1,37 @@ +import sys +import ctypes +import struct as struct_ +import llvmlite.ir +from llvmlite.ir import Constant + +_trace_refs_ = hasattr(sys, 'getobjects') +_plat_bits = struct_.calcsize('@P') * 8 + +_int8 = llvmlite.ir.IntType(8) +_int32 = llvmlite.ir.IntType(32) + +_void_star = llvmlite.ir.PointerType(_int8) + +_int8_star = _void_star + +_sizeof_py_ssize_t = ctypes.sizeof(getattr(ctypes, 'c_size_t')) +_llvm_py_ssize_t = llvmlite.ir.IntType(_sizeof_py_ssize_t * 8) + +if _trace_refs_: + _pyobject_head = llvmlite.ir.LiteralStructType([_void_star, _void_star, + _llvm_py_ssize_t, _void_star]) + _pyobject_head_init = Constant.literal_struct([ + Constant(_void_star, None), # _ob_next + Constant(_void_star, None), # _ob_prev + Constant(_llvm_py_ssize_t, 1), # ob_refcnt + Constant(_void_star, None), # ob_type + ]) + +else: + _pyobject_head = llvmlite.ir.LiteralStructType([_llvm_py_ssize_t, _void_star]) + _pyobject_head_init = Constant.literal_struct([ + Constant(_llvm_py_ssize_t, 1), # ob_refcnt + Constant(_void_star, None), # ob_type + ]) + +_pyobject_head_p = llvmlite.ir.PointerType(_pyobject_head) diff --git a/venv/lib/python3.10/site-packages/numba/pycc/modulemixin.c b/venv/lib/python3.10/site-packages/numba/pycc/modulemixin.c new file mode 100644 index 0000000000000000000000000000000000000000..5e03133d8391918bf1c58c5b04c981cba17a7f9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/modulemixin.c @@ -0,0 +1,208 @@ +/* + * This C file is compiled and linked into pycc-generated shared objects. + * It provides the Numba helper functions for runtime use in pycc-compiled + * functions. + */ + +#include "../_numba_common.h" +#include "../_pymodule.h" + +/* Define all runtime-required symbols in this C module, but do not + export them outside the shared library if possible. */ + +#define NUMBA_EXPORT_FUNC(_rettype) VISIBILITY_HIDDEN _rettype +#define NUMBA_EXPORT_DATA(_vartype) VISIBILITY_HIDDEN _vartype + +#define PYCC_COMPILING + +#include "../_helperlib.c" +#include "../_dynfunc.c" + +#if PYCC_USE_NRT +#include "../core/runtime/_nrt_python.c" +#include "../core/runtime/nrt.h" +#endif + +#if (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION >= 12) + #define Py_BUILD_CORE 1 + #include "internal/pycore_pyhash.h" + #undef Py_BUILD_CORE +#endif + +/* Defines hashsecret variables (see issue #6386) */ +int64_t _numba_hashsecret_siphash_k0; +int64_t _numba_hashsecret_siphash_k1; +int64_t _numba_hashsecret_djbx33a_suffix; + +/* NOTE: import_array() is macro, not a function. It returns NULL on + failure */ +static void * +wrap_import_array(void) { + import_array(); + return (void *) 1; +} + + +static int +init_numpy(void) { + return wrap_import_array() != NULL; +} + + +#ifndef PYCC_MODULE_NAME +#error PYCC_MODULE_NAME must be defined +#endif + +/* Preprocessor trick: need to use two levels of macros otherwise + PYCC_MODULE_NAME would not get expanded */ +#define __PYCC(prefix, modname) prefix ## modname +#define _PYCC(prefix, modname) __PYCC(prefix, modname) +#define PYCC(prefix) _PYCC(prefix, PYCC_MODULE_NAME) + +/* Silence warnings about unused functions */ +VISIBILITY_HIDDEN void **PYCC(_unused_) = { + (void *) Numba_make_generator, +}; + +/* The structure type constructed by PythonAPI.serialize_uncached() */ +typedef struct { + const char *data; + int len; + const char *hashbuf; + const char *func_ptr; // pointer to unwrap dyn args function + int alloc_flag; +} env_def_t; + +/* Environment GlobalVariable address type */ +typedef void **env_gv_t; + +/* + * Recreate an environment object from a env_def_t structure. + */ +static EnvironmentObject * +recreate_environment(PyObject *module, env_def_t env) +{ + EnvironmentObject *envobj; + PyObject *env_consts; + + env_consts = numba_unpickle(env.data, env.len, env.hashbuf); + if (env_consts == NULL) + return NULL; + if (!PyList_Check(env_consts)) { + PyErr_Format(PyExc_TypeError, + "environment constants should be a list, got '%s'", + Py_TYPE(env_consts)->tp_name); + Py_DECREF(env_consts); + return NULL; + } + + envobj = env_new_empty(&EnvironmentType); + if (envobj == NULL) { + Py_DECREF(env_consts); + return NULL; + } + envobj->consts = env_consts; + envobj->globals = PyModule_GetDict(module); + if (envobj->globals == NULL) { + Py_DECREF(envobj); + return NULL; + } + Py_INCREF(envobj->globals); + return envobj; +} + +/* + * Subroutine to initialize all resources required for running the + * pycc-compiled functions. + */ + +int +PYCC(pycc_init_) (PyObject *module, PyMethodDef *defs, + env_def_t *envs, + env_gv_t *envgvs) +{ + /* Aligns hashsecret with values in current python process so that + * hashes computed inside the pycc module are correct if imported + * by the current process. Imports in a new process get the right + * hash secret through: + * `numba.cpython.hashing._load_hashsecret`. + */ + _numba_hashsecret_siphash_k0 = _Py_HashSecret.siphash.k0; + _numba_hashsecret_siphash_k1 = _Py_HashSecret.siphash.k1; + _numba_hashsecret_djbx33a_suffix = _Py_HashSecret.djbx33a.suffix; + + PyMethodDef *fdef; + PyObject *modname = NULL; + PyObject *docobj = NULL; + int i; + + if (!init_numpy()) { + goto error; + } + if (init_dynfunc_module(module)) { + goto error; + } + /* Initialize random generation. */ + numba_rnd_ensure_global_init(); + +#if PYCC_USE_NRT + NRT_MemSys_init(); + if (init_nrt_python_module(module)) { + goto error; + } +#endif + + modname = PyObject_GetAttrString(module, "__name__"); + if (modname == NULL) { + goto error; + } + + /* Empty docstring for all compiled functions */ + docobj = PyString_FromString(""); + if (docobj == NULL) { + goto error; + } + + /* Overwrite C method objects with our own Closure objects, in order + * to make their environments available to the compiled functions. + */ + for (i = 0, fdef = defs; fdef->ml_name != NULL; i++, fdef++) { + PyObject *func; + PyObject *nameobj; + EnvironmentObject *envobj; + + envobj = recreate_environment(module, envs[i]); + if (envobj == NULL) { + goto error; + } + nameobj = PyString_FromString(fdef->ml_name); + if (nameobj == NULL) { + Py_DECREF(envobj); + goto error; + } + // Store the environment pointer into the global + *envgvs[i] = envobj; + + func = pycfunction_new(module, nameobj, docobj, + fdef->ml_meth, envobj, NULL); + Py_DECREF(envobj); + Py_DECREF(nameobj); + + if (func == NULL) { + goto error; + } + if (PyObject_SetAttrString(module, fdef->ml_name, func)) { + Py_DECREF(func); + goto error; + } + Py_DECREF(func); + } + Py_DECREF(docobj); + Py_DECREF(modname); + return 0; + +error: + Py_XDECREF(docobj); + Py_XDECREF(modname); + return -1; +} diff --git a/venv/lib/python3.10/site-packages/numba/pycc/platform.py b/venv/lib/python3.10/site-packages/numba/pycc/platform.py new file mode 100644 index 0000000000000000000000000000000000000000..6f490d7ef6a397472110801a81b213dcbab5cd3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/pycc/platform.py @@ -0,0 +1,218 @@ +import setuptools +from setuptools.command.build_ext import build_ext +from setuptools.dist import Distribution +import numpy as np + +import functools +import os +import subprocess +import sys +from tempfile import mkdtemp +from contextlib import contextmanager +from pathlib import Path + +# Wire in distutils components from setuptools +CCompiler = setuptools.distutils.ccompiler.CCompiler +new_compiler = setuptools.distutils.ccompiler.new_compiler +customize_compiler = setuptools.distutils.sysconfig.customize_compiler +log = setuptools.distutils.log + + +_configs = { + # DLL suffix, Python C extension suffix + 'win': ('.dll', '.pyd'), + 'default': ('.so', '.so'), +} + + +def get_configs(arg): + return _configs.get(sys.platform[:3], _configs['default'])[arg] + + +find_shared_ending = functools.partial(get_configs, 0) +find_pyext_ending = functools.partial(get_configs, 1) + +@contextmanager +def _gentmpfile(suffix): + # windows locks the tempfile so use a tempdir + file, see + # https://github.com/numba/numba/issues/3304 + try: + tmpdir = mkdtemp() + ntf = open(os.path.join(tmpdir, "temp%s" % suffix), 'wt') + yield ntf + finally: + try: + ntf.close() + os.remove(ntf) + except: + pass + else: + os.rmdir(tmpdir) + + +@functools.lru_cache(maxsize=1) +def external_compiler_works(): + """ + Returns True if the "external compiler" bound in numpy.distutil is present + and working, False otherwise. + """ + compiler = new_compiler() + customize_compiler(compiler) + for suffix in ['.c', '.cxx']: + try: + with _gentmpfile(suffix) as ntf: + simple_c = "int main(void) { return 0; }" + ntf.write(simple_c) + ntf.flush() + ntf.close() + # *output_dir* is set to avoid the compiler putting temp files + # in the current directory. + compiler.compile([ntf.name], output_dir=Path(ntf.name).anchor) + except Exception: # likely CompileError or file system issue + return False + return True + + +class _DummyExtension(object): + libraries = [] + + +class Toolchain(object): + + def __init__(self): + if not external_compiler_works(): + self._raise_external_compiler_error() + + self._verbose = False + self._compiler = new_compiler() + customize_compiler(self._compiler) + self._build_ext = build_ext(Distribution()) + self._build_ext.finalize_options() + self._py_lib_dirs = self._build_ext.library_dirs + self._py_include_dirs = self._build_ext.include_dirs + np_compile_args = {'include_dirs': [np.get_include(),],} + if sys.platform == 'win32': + np_compile_args['libraries'] = [] + else: + np_compile_args['libraries'] = ['m',] + self._math_info = np_compile_args + + @property + def verbose(self): + return self._verbose + + @verbose.setter + def verbose(self, value): + self._verbose = value + # DEBUG will let Numpy spew many messages, so stick to INFO + # to print commands executed by distutils + log.set_threshold(log.INFO if value else log.WARN) + + def _raise_external_compiler_error(self): + basemsg = ("Attempted to compile AOT function without the " + "compiler used by `numpy.distutils` present.") + conda_msg = "If using conda try:\n\n#> conda install %s" + plt = sys.platform + if plt.startswith('linux'): + if sys.maxsize <= 2 ** 32: + compilers = ['gcc_linux-32', 'gxx_linux-32'] + else: + compilers = ['gcc_linux-64', 'gxx_linux-64'] + msg = "%s %s" % (basemsg, conda_msg % ' '.join(compilers)) + elif plt.startswith('darwin'): + compilers = ['clang_osx-64', 'clangxx_osx-64'] + msg = "%s %s" % (basemsg, conda_msg % ' '.join(compilers)) + elif plt.startswith('win32'): + winmsg = "Cannot find suitable msvc." + msg = "%s %s" % (basemsg, winmsg) + else: + msg = "Unknown platform %s" % plt + raise RuntimeError(msg) + + def compile_objects(self, sources, output_dir, + include_dirs=(), depends=(), macros=(), + extra_cflags=None): + """ + Compile the given source files into a separate object file each, + all beneath the *output_dir*. A list of paths to object files + is returned. + + *macros* has the same format as in distutils: a list of 1- or 2-tuples. + If a 1-tuple (name,), the given name is considered undefined by + the C preprocessor. + If a 2-tuple (name, value), the given name is expanded into the + given value by the C preprocessor. + """ + objects = self._compiler.compile(sources, + output_dir=output_dir, + include_dirs=include_dirs, + depends=depends, + macros=macros or [], + extra_preargs=extra_cflags) + return objects + + def link_shared(self, output, objects, libraries=(), + library_dirs=(), export_symbols=(), + extra_ldflags=None): + """ + Create a shared library *output* linking the given *objects* + and *libraries* (all strings). + """ + output_dir, output_filename = os.path.split(output) + self._compiler.link(CCompiler.SHARED_OBJECT, objects, + output_filename, output_dir, + libraries, library_dirs, + export_symbols=export_symbols, + extra_preargs=extra_ldflags) + + def get_python_libraries(self): + """ + Get the library arguments necessary to link with Python. + """ + libs = self._build_ext.get_libraries(_DummyExtension()) + if sys.platform == 'win32': + # Under Windows, need to link explicitly against the CRT, + # as the MSVC compiler would implicitly do. + # (XXX msvcrtd in pydebug mode?) + libs = libs + ['msvcrt'] + return libs + self._math_info['libraries'] + + def get_python_library_dirs(self): + """ + Get the library directories necessary to link with Python. + """ + return list(self._py_lib_dirs) + + def get_python_include_dirs(self): + """ + Get the include directories necessary to compile against the Python + and Numpy C APIs. + """ + return list(self._py_include_dirs) + self._math_info['include_dirs'] + + def get_ext_filename(self, ext_name): + """ + Given a C extension's module name, return its intended filename. + """ + return self._build_ext.get_ext_filename(ext_name) + + +def _quote_arg(arg): + """ + Quote the argument for safe use in a shell command line. + """ + # If there is a quote in the string, assume relevants parts of the + # string are already quoted (e.g. '-I"C:\\Program Files\\..."') + if '"' not in arg and ' ' in arg: + return '"%s"' % arg + return arg + + +def _is_sequence(arg): + if isinstance(arg, (str, bytes)): + return False + try: + len(arg) + return True + except Exception: + return False diff --git a/venv/lib/python3.10/site-packages/numba/scripts/__init__.py b/venv/lib/python3.10/site-packages/numba/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/scripts/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/scripts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2221ab4c9ba89326f0774785f392e62ff1337ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/scripts/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/scripts/__pycache__/generate_lower_listing.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/scripts/__pycache__/generate_lower_listing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47f9b7e8fc4c06b3bade8b7d16cffa13b78d2afd Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/scripts/__pycache__/generate_lower_listing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/scripts/generate_lower_listing.py b/venv/lib/python3.10/site-packages/numba/scripts/generate_lower_listing.py new file mode 100644 index 0000000000000000000000000000000000000000..1a93e7a3d0f48007c181ba2ab44439883c749995 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/scripts/generate_lower_listing.py @@ -0,0 +1,169 @@ +""" +Generate documentation for all registered implementation for lowering +using reStructured text. +""" + + +from subprocess import check_output + +import os.path +try: + from StringIO import StringIO # py2 +except ImportError: + from io import StringIO +from collections import defaultdict +import inspect +from functools import partial + +import numba +from numba.core.registry import cpu_target + + +def git_hash(): + out = check_output(['git', 'log', "--pretty=format:'%H'", '-n', '1']) + return out.decode('ascii').strip("'\"") + + +def get_func_name(fn): + return getattr(fn, '__qualname__', fn.__name__) + + +def gather_function_info(backend): + fninfos = defaultdict(list) + basepath = os.path.dirname(os.path.dirname(numba.__file__)) + for fn, osel in backend._defns.items(): + for sig, impl in osel.versions: + info = {} + fninfos[fn].append(info) + info['fn'] = fn + info['sig'] = sig + code, firstlineno = inspect.getsourcelines(impl) + path = inspect.getsourcefile(impl) + info['impl'] = { + 'name': get_func_name(impl), + 'filename': os.path.relpath(path, start=basepath), + 'lines': (firstlineno, firstlineno + len(code) - 1), + 'docstring': impl.__doc__ + } + + return fninfos + + +def bind_file_to_print(fobj): + return partial(print, file=fobj) + + +def format_signature(sig): + def fmt(c): + try: + return c.__name__ + except AttributeError: + return repr(c).strip('\'"') + out = tuple(map(fmt, sig)) + return '`({0})`'.format(', '.join(out)) + + +github_url = ('https://github.com/numba/numba/blob/' + '{commit}/{path}#L{firstline}-L{lastline}') + +description = """ +This lists all lowering definition registered to the CPU target. +Each subsection corresponds to a Python function that is supported by numba +nopython mode. These functions have one or more lower implementation with +different signatures. The compiler chooses the most specific implementation +from all overloads. +""" + + +def format_function_infos(fninfos): + buf = StringIO() + try: + print = bind_file_to_print(buf) + + title_line = "Lowering Listing" + print(title_line) + print('=' * len(title_line)) + + print(description) + + commit = git_hash() + + def format_fname(fn): + try: + fname = "{0}.{1}".format(fn.__module__, get_func_name(fn)) + except AttributeError: + fname = repr(fn) + return fn, fname + + for fn, fname in sorted(map(format_fname, fninfos), key=lambda x: x[1]): + impinfos = fninfos[fn] + header_line = "``{0}``".format(fname) + print(header_line) + print('-' * len(header_line)) + print() + + formatted_sigs = map( + lambda x: format_signature(x['sig']), impinfos) + sorted_impinfos = sorted(zip(formatted_sigs, impinfos), + key=lambda x: x[0]) + + col_signatures = ['Signature'] + col_urls = ['Definition'] + + for fmtsig, info in sorted_impinfos: + impl = info['impl'] + + filename = impl['filename'] + lines = impl['lines'] + fname = impl['name'] + + source = '{0} lines {1}-{2}'.format(filename, *lines) + link = github_url.format(commit=commit, path=filename, + firstline=lines[0], lastline=lines[1]) + url = '``{0}`` `{1} <{2}>`_'.format(fname, source, link) + + col_signatures.append(fmtsig) + col_urls.append(url) + + # table formatting + max_width_col_sig = max(map(len, col_signatures)) + max_width_col_url = max(map(len, col_urls)) + padding = 2 + width_col_sig = padding * 2 + max_width_col_sig + width_col_url = padding * 2 + max_width_col_url + line_format = "{{0:^{0}}} {{1:^{1}}}".format(width_col_sig, + width_col_url) + print(line_format.format('=' * width_col_sig, '=' * width_col_url)) + print(line_format.format(col_signatures[0], col_urls[0])) + print(line_format.format('=' * width_col_sig, '=' * width_col_url)) + for sig, url in zip(col_signatures[1:], col_urls[1:]): + print(line_format.format(sig, url)) + print(line_format.format('=' * width_col_sig, '=' * width_col_url)) + print() + + return buf.getvalue() + finally: + buf.close() + + +# Main routine for this module: + +def gen_lower_listing(path=None): + """ + Generate lowering listing to ``path`` or (if None) to stdout. + """ + cpu_backend = cpu_target.target_context + cpu_backend.refresh() + + fninfos = gather_function_info(cpu_backend) + out = format_function_infos(fninfos) + + if path is None: + print(out) + else: + with open(path, 'w') as fobj: + print(out, file=fobj) + + +if __name__ == '__main__': + gen_lower_listing() diff --git a/venv/lib/python3.10/site-packages/numba/stencils/__init__.py b/venv/lib/python3.10/site-packages/numba/stencils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b469047b2a9d54fc4e25d312fa48bf8b6c04c386 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/stencil.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/stencil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5cc6e877fb86865ce92cbb8f932d6f0fd3712dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/stencil.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/stencilparfor.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/stencilparfor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be1e4ad866fee1b8e2424ca259a4223b80f23159 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/stencils/__pycache__/stencilparfor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/stencils/stencil.py b/venv/lib/python3.10/site-packages/numba/stencils/stencil.py new file mode 100644 index 0000000000000000000000000000000000000000..61df4e0b61cc80068b3d4f729d1ad59b3da3413f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/stencils/stencil.py @@ -0,0 +1,836 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +import copy + +import numpy as np +from llvmlite import ir as lir + +from numba.core import types, typing, utils, ir, config, ir_utils, registry +from numba.core.typing.templates import (CallableTemplate, signature, + infer_global, AbstractTemplate) +from numba.core.imputils import lower_builtin +from numba.core.extending import register_jitable +from numba.core.errors import NumbaValueError +from numba.misc.special import literal_unroll +import numba + +import operator +from numba.np import numpy_support + +class StencilFuncLowerer(object): + '''Callable class responsible for lowering calls to a specific StencilFunc. + ''' + def __init__(self, sf): + self.stencilFunc = sf + + def __call__(self, context, builder, sig, args): + cres = self.stencilFunc.compile_for_argtys(sig.args, {}, + sig.return_type, None) + res = context.call_internal(builder, cres.fndesc, sig, args) + context.add_linking_libs([cres.library]) + return res + +@register_jitable +def raise_if_incompatible_array_sizes(a, *args): + ashape = a.shape + + # We need literal_unroll here because the stencil might take + # multiple input arrays with different types that are not compatible + # (e.g. values as float[:] and flags as bool[:]) + # When more than three total arrays are given, the second and third + # are iterated over in the loop below. Without literal_unroll, their + # types have to match. + # An example failing signature without literal_unroll might be + # (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail) + for arg in literal_unroll(args): + if a.ndim != arg.ndim: + raise ValueError("Secondary stencil array does not have same number " + " of dimensions as the first stencil input.") + argshape = arg.shape + for i in range(len(ashape)): + if ashape[i] > argshape[i]: + raise ValueError("Secondary stencil array has some dimension " + "smaller the same dimension in the first " + "stencil input.") + +def slice_addition(the_slice, addend): + """ Called by stencil in Python mode to add the loop index to a + user-specified slice. + """ + return slice(the_slice.start + addend, the_slice.stop + addend) + +class StencilFunc(object): + """ + A special type to hold stencil information for the IR. + """ + + id_counter = 0 + + def __init__(self, kernel_ir, mode, options): + self.id = type(self).id_counter + type(self).id_counter += 1 + self.kernel_ir = kernel_ir + self.mode = mode + self.options = options + self.kws = [] # remember original kws arguments + + # stencils only supported for CPU context currently + self._typingctx = registry.cpu_target.typing_context + self._targetctx = registry.cpu_target.target_context + self._install_type(self._typingctx) + self.neighborhood = self.options.get("neighborhood") + self._type_cache = {} + self._lower_me = StencilFuncLowerer(self) + + def replace_return_with_setitem(self, blocks, index_vars, out_name): + """ + Find return statements in the IR and replace them with a SetItem + call of the value "returned" by the kernel into the result array. + Returns the block labels that contained return statements. + """ + ret_blocks = [] + + for label, block in blocks.items(): + scope = block.scope + loc = block.loc + new_body = [] + for stmt in block.body: + if isinstance(stmt, ir.Return): + ret_blocks.append(label) + # If 1D array then avoid the tuple construction. + if len(index_vars) == 1: + rvar = ir.Var(scope, out_name, loc) + ivar = ir.Var(scope, index_vars[0], loc) + new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc)) + else: + # Convert the string names of the index variables into + # ir.Var's. + var_index_vars = [] + for one_var in index_vars: + index_var = ir.Var(scope, one_var, loc) + var_index_vars += [index_var] + + s_index_var = scope.redefine("stencil_index", loc) + # Build a tuple from the index ir.Var's. + tuple_call = ir.Expr.build_tuple(var_index_vars, loc) + new_body.append(ir.Assign(tuple_call, s_index_var, loc)) + rvar = ir.Var(scope, out_name, loc) + # Write the return statements original value into + # the array using the tuple index. + si = ir.SetItem(rvar, s_index_var, stmt.value, loc) + new_body.append(si) + else: + new_body.append(stmt) + block.body = new_body + return ret_blocks + + def add_indices_to_kernel(self, kernel, index_names, ndim, + neighborhood, standard_indexed, typemap, calltypes): + """ + Transforms the stencil kernel as specified by the user into one + that includes each dimension's index variable as part of the getitem + calls. So, in effect array[-1] becomes array[index0-1]. + """ + const_dict = {} + kernel_consts = [] + + if config.DEBUG_ARRAY_OPT >= 1: + print("add_indices_to_kernel", ndim, neighborhood) + ir_utils.dump_blocks(kernel.blocks) + + if neighborhood is None: + need_to_calc_kernel = True + else: + need_to_calc_kernel = False + if len(neighborhood) != ndim: + raise NumbaValueError("%d dimensional neighborhood specified " + "for %d dimensional input array" % + (len(neighborhood), ndim)) + + tuple_table = ir_utils.get_tuple_table(kernel.blocks) + + relatively_indexed = set() + + for block in kernel.blocks.values(): + scope = block.scope + loc = block.loc + new_body = [] + for stmt in block.body: + if (isinstance(stmt, ir.Assign) and + isinstance(stmt.value, ir.Const)): + if config.DEBUG_ARRAY_OPT >= 1: + print("remembering in const_dict", stmt.target.name, + stmt.value.value) + # Remember consts for use later. + const_dict[stmt.target.name] = stmt.value.value + if ((isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op in ['setitem', 'static_setitem'] + and stmt.value.value.name in kernel.arg_names) or + (isinstance(stmt, ir.SetItem) + and stmt.target.name in kernel.arg_names)): + raise NumbaValueError("Assignments to arrays passed to " \ + "stencil kernels is not allowed.") + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op in ['getitem', 'static_getitem'] + and stmt.value.value.name in kernel.arg_names + and stmt.value.value.name not in standard_indexed): + # We found a getitem from the input array. + if stmt.value.op == 'getitem': + stmt_index_var = stmt.value.index + else: + stmt_index_var = stmt.value.index_var + # allow static_getitem since rewrite passes are applied + #raise ValueError("Unexpected static_getitem in add_indices_to_kernel.") + + relatively_indexed.add(stmt.value.value.name) + + # Store the index used after looking up the variable in + # the const dictionary. + if need_to_calc_kernel: + assert hasattr(stmt_index_var, 'name') + + if stmt_index_var.name in tuple_table: + kernel_consts += [tuple_table[stmt_index_var.name]] + elif stmt_index_var.name in const_dict: + kernel_consts += [const_dict[stmt_index_var.name]] + else: + raise NumbaValueError("stencil kernel index is not " + "constant, 'neighborhood' option required") + + if ndim == 1: + # Single dimension always has index variable 'index0'. + # tmpvar will hold the real index and is computed by + # adding the relative offset in stmt.value.index to + # the current absolute location in index0. + index_var = ir.Var(scope, index_names[0], loc) + tmpvar = scope.redefine("stencil_index", loc) + stmt_index_var_typ = typemap[stmt_index_var.name] + # If the array is indexed with a slice then we + # have to add the index value with a call to + # slice_addition. + if isinstance(stmt_index_var_typ, types.misc.SliceType): + sa_var = scope.redefine("slice_addition", loc) + sa_func = numba.njit(slice_addition) + sa_func_typ = types.functions.Dispatcher(sa_func) + typemap[sa_var.name] = sa_func_typ + g_sa = ir.Global("slice_addition", sa_func, loc) + new_body.append(ir.Assign(g_sa, sa_var, loc)) + slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc) + calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {}) + new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) + new_body.append(ir.Assign( + ir.Expr.getitem(stmt.value.value, tmpvar, loc), + stmt.target, loc)) + else: + acc_call = ir.Expr.binop(operator.add, stmt_index_var, + index_var, loc) + new_body.append(ir.Assign(acc_call, tmpvar, loc)) + new_body.append(ir.Assign( + ir.Expr.getitem(stmt.value.value, tmpvar, loc), + stmt.target, loc)) + else: + index_vars = [] + sum_results = [] + s_index_var = scope.redefine("stencil_index", loc) + const_index_vars = [] + ind_stencils = [] + + stmt_index_var_typ = typemap[stmt_index_var.name] + # Same idea as above but you have to extract + # individual elements out of the tuple indexing + # expression and add the corresponding index variable + # to them and then reconstitute as a tuple that can + # index the array. + for dim in range(ndim): + tmpvar = scope.redefine("const_index", loc) + new_body.append(ir.Assign(ir.Const(dim, loc), + tmpvar, loc)) + const_index_vars += [tmpvar] + index_var = ir.Var(scope, index_names[dim], loc) + index_vars += [index_var] + + tmpvar = scope.redefine("ind_stencil_index", loc) + ind_stencils += [tmpvar] + getitemvar = scope.redefine("getitem", loc) + getitemcall = ir.Expr.getitem(stmt_index_var, + const_index_vars[dim], loc) + new_body.append(ir.Assign(getitemcall, getitemvar, loc)) + # Get the type of this particular part of the index tuple. + if isinstance(stmt_index_var_typ, types.ConstSized): + one_index_typ = stmt_index_var_typ[dim] + else: + one_index_typ = stmt_index_var_typ[:] + # If the array is indexed with a slice then we + # have to add the index value with a call to + # slice_addition. + if isinstance(one_index_typ, types.misc.SliceType): + sa_var = scope.redefine("slice_addition", loc) + sa_func = numba.njit(slice_addition) + sa_func_typ = types.functions.Dispatcher(sa_func) + typemap[sa_var.name] = sa_func_typ + g_sa = ir.Global("slice_addition", sa_func, loc) + new_body.append(ir.Assign(g_sa, sa_var, loc)) + slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc) + calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {}) + new_body.append(ir.Assign(slice_addition_call, tmpvar, loc)) + else: + acc_call = ir.Expr.binop(operator.add, getitemvar, + index_vars[dim], loc) + new_body.append(ir.Assign(acc_call, tmpvar, loc)) + + tuple_call = ir.Expr.build_tuple(ind_stencils, loc) + new_body.append(ir.Assign(tuple_call, s_index_var, loc)) + new_body.append(ir.Assign( + ir.Expr.getitem(stmt.value.value,s_index_var,loc), + stmt.target,loc)) + else: + new_body.append(stmt) + block.body = new_body + + if need_to_calc_kernel: + # Find the size of the kernel by finding the maximum absolute value + # index used in the kernel specification. + neighborhood = [[0,0] for _ in range(ndim)] + if len(kernel_consts) == 0: + raise NumbaValueError("Stencil kernel with no accesses to " + "relatively indexed arrays.") + + for index in kernel_consts: + if isinstance(index, tuple) or isinstance(index, list): + for i in range(len(index)): + te = index[i] + if isinstance(te, ir.Var) and te.name in const_dict: + te = const_dict[te.name] + if isinstance(te, int): + neighborhood[i][0] = min(neighborhood[i][0], te) + neighborhood[i][1] = max(neighborhood[i][1], te) + else: + raise NumbaValueError( + "stencil kernel index is not constant," + "'neighborhood' option required") + index_len = len(index) + elif isinstance(index, int): + neighborhood[0][0] = min(neighborhood[0][0], index) + neighborhood[0][1] = max(neighborhood[0][1], index) + index_len = 1 + else: + raise NumbaValueError( + "Non-tuple or non-integer used as stencil index.") + if index_len != ndim: + raise NumbaValueError( + "Stencil index does not match array dimensionality.") + + return (neighborhood, relatively_indexed) + + + def get_return_type(self, argtys): + if config.DEBUG_ARRAY_OPT >= 1: + print("get_return_type", argtys) + ir_utils.dump_blocks(self.kernel_ir.blocks) + + if not isinstance(argtys[0], types.npytypes.Array): + raise NumbaValueError("The first argument to a stencil kernel must " + "be the primary input array.") + + from numba.core import typed_passes + typemap, return_type, calltypes, _ = typed_passes.type_inference_stage( + self._typingctx, + self._targetctx, + self.kernel_ir, + argtys, + None, + {}) + if isinstance(return_type, types.npytypes.Array): + raise NumbaValueError( + "Stencil kernel must return a scalar and not a numpy array.") + + real_ret = types.npytypes.Array(return_type, argtys[0].ndim, + argtys[0].layout) + return (real_ret, typemap, calltypes) + + def _install_type(self, typingctx): + """Constructs and installs a typing class for a StencilFunc object in + the input typing context. + """ + _ty_cls = type('StencilFuncTyping_' + + str(self.id), + (AbstractTemplate,), + dict(key=self, generic=self._type_me)) + typingctx.insert_user_function(self, _ty_cls) + + def compile_for_argtys(self, argtys, kwtys, return_type, sigret): + # look in the type cache to find if result array is passed + (_, result, typemap, calltypes) = self._type_cache[argtys] + new_func = self._stencil_wrapper(result, sigret, return_type, + typemap, calltypes, *argtys) + return new_func + + def _type_me(self, argtys, kwtys): + """ + Implement AbstractTemplate.generic() for the typing class + built by StencilFunc._install_type(). + Return the call-site signature. + """ + if (self.neighborhood is not None and + len(self.neighborhood) != argtys[0].ndim): + raise NumbaValueError("%d dimensional neighborhood specified " + "for %d dimensional input array" % + (len(self.neighborhood), argtys[0].ndim)) + + argtys_extra = argtys + sig_extra = "" + result = None + if 'out' in kwtys: + argtys_extra += (kwtys['out'],) + sig_extra += ", out=None" + result = kwtys['out'] + + if 'neighborhood' in kwtys: + argtys_extra += (kwtys['neighborhood'],) + sig_extra += ", neighborhood=None" + + # look in the type cache first + if argtys_extra in self._type_cache: + (_sig, _, _, _) = self._type_cache[argtys_extra] + return _sig + + (real_ret, typemap, calltypes) = self.get_return_type(argtys) + sig = signature(real_ret, *argtys_extra) + dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format( + ",".join(self.kernel_ir.arg_names), sig_extra)) + dct = {} + exec(dummy_text, dct) + dummy_func = dct["__numba_dummy_stencil"] + sig = sig.replace(pysig=utils.pysignature(dummy_func)) + self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)]) + self._type_cache[argtys_extra] = (sig, result, typemap, calltypes) + return sig + + def copy_ir_with_calltypes(self, ir, calltypes): + """ + Create a copy of a given IR along with its calltype information. + We need a copy of the calltypes because copy propagation applied + to the copied IR will change the calltypes and make subsequent + uses of the original IR invalid. + """ + copy_calltypes = {} + kernel_copy = ir.copy() + kernel_copy.blocks = {} + # For each block... + for (block_label, block) in ir.blocks.items(): + new_block = copy.deepcopy(ir.blocks[block_label]) + new_block.body = [] + # For each statement in each block... + for stmt in ir.blocks[block_label].body: + # Copy the statement to the new copy of the kernel + # and if the original statement is in the original + # calltypes then add the type associated with this + # statement to the calltypes copy. + scopy = copy.deepcopy(stmt) + new_block.body.append(scopy) + if stmt in calltypes: + copy_calltypes[scopy] = calltypes[stmt] + kernel_copy.blocks[block_label] = new_block + return (kernel_copy, copy_calltypes) + + def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args): + # Overall approach: + # 1) Construct a string containing a function definition for the stencil function + # that will execute the stencil kernel. This function definition includes a + # unique stencil function name, the parameters to the stencil kernel, loop + # nests across the dimensions of the input array. Those loop nests use the + # computed stencil kernel size so as not to try to compute elements where + # elements outside the bounds of the input array would be needed. + # 2) The but of the loop nest in this new function is a special sentinel + # assignment. + # 3) Get the IR of this new function. + # 4) Split the block containing the sentinel assignment and remove the sentinel + # assignment. Insert the stencil kernel IR into the stencil function IR + # after label and variable renaming of the stencil kernel IR to prevent + # conflicts with the stencil function IR. + # 5) Compile the combined stencil function IR + stencil kernel IR into existence. + + # Copy the kernel so that our changes for this callsite + # won't effect other callsites. + (kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes( + self.kernel_ir, calltypes) + # The stencil kernel body becomes the body of a loop, for which args aren't needed. + ir_utils.remove_args(kernel_copy.blocks) + first_arg = kernel_copy.arg_names[0] + + in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap) + name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks) + ir_utils.apply_copy_propagate( + kernel_copy.blocks, + in_cps, + name_var_table, + typemap, + copy_calltypes) + + if "out" in name_var_table: + raise NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.") + + sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table) + if config.DEBUG_ARRAY_OPT >= 1: + print("name_var_table", name_var_table, sentinel_name) + + the_array = args[0] + + if config.DEBUG_ARRAY_OPT >= 1: + print("_stencil_wrapper", return_type, return_type.dtype, + type(return_type.dtype), args) + ir_utils.dump_blocks(kernel_copy.blocks) + + # We generate a Numba function to execute this stencil and here + # create the unique name of this function. + stencil_func_name = "__numba_stencil_%s_%s" % ( + hex(id(the_array)).replace("-", "_"), + self.id) + + # We will put a loop nest in the generated function for each + # dimension in the input array. Here we create the name for + # the index variable for each dimension. index0, index1, ... + index_vars = [] + for i in range(the_array.ndim): + index_var_name = ir_utils.get_unused_var_name("index" + str(i), + name_var_table) + index_vars += [index_var_name] + + # Create extra signature for out and neighborhood. + out_name = ir_utils.get_unused_var_name("out", name_var_table) + neighborhood_name = ir_utils.get_unused_var_name("neighborhood", + name_var_table) + sig_extra = "" + if result is not None: + sig_extra += ", {}=None".format(out_name) + if "neighborhood" in dict(self.kws): + sig_extra += ", {}=None".format(neighborhood_name) + + # Get a list of the standard indexed array names. + standard_indexed = self.options.get("standard_indexing", []) + + if first_arg in standard_indexed: + raise NumbaValueError("The first argument to a stencil kernel must " + "use relative indexing, not standard indexing.") + + if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0: + raise NumbaValueError("Standard indexing requested for an array name " + "not present in the stencil kernel definition.") + + # Add index variables to getitems in the IR to transition the accesses + # in the kernel from relative to regular Python indexing. Returns the + # computed size of the stencil kernel and a list of the relatively indexed + # arrays. + kernel_size, relatively_indexed = self.add_indices_to_kernel( + kernel_copy, index_vars, the_array.ndim, + self.neighborhood, standard_indexed, typemap, copy_calltypes) + if self.neighborhood is None: + self.neighborhood = kernel_size + + if config.DEBUG_ARRAY_OPT >= 1: + print("After add_indices_to_kernel") + ir_utils.dump_blocks(kernel_copy.blocks) + + # The return in the stencil kernel becomes a setitem for that + # particular point in the iteration space. + ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks, + index_vars, out_name) + + if config.DEBUG_ARRAY_OPT >= 1: + print("After replace_return_with_setitem", ret_blocks) + ir_utils.dump_blocks(kernel_copy.blocks) + + # Start to form the new function to execute the stencil kernel. + func_text = "def {}({}{}):\n".format(stencil_func_name, + ",".join(kernel_copy.arg_names), sig_extra) + + # Get loop ranges for each dimension, which could be either int + # or variable. In the latter case we'll use the extra neighborhood + # argument to the function. + ranges = [] + for i in range(the_array.ndim): + if isinstance(kernel_size[i][0], int): + lo = kernel_size[i][0] + hi = kernel_size[i][1] + else: + lo = "{}[{}][0]".format(neighborhood_name, i) + hi = "{}[{}][1]".format(neighborhood_name, i) + ranges.append((lo, hi)) + + # If there are more than one relatively indexed arrays, add a call to + # a function that will raise an error if any of the relatively indexed + # arrays are of different size than the first input array. + if len(relatively_indexed) > 1: + func_text += " raise_if_incompatible_array_sizes(" + first_arg + for other_array in relatively_indexed: + if other_array != first_arg: + func_text += "," + other_array + func_text += ")\n" + + # Get the shape of the first input array. + shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table) + func_text += " {} = {}.shape\n".format(shape_name, first_arg) + + # Converts cval to a string constant + def cval_as_str(cval): + if not np.isfinite(cval): + # See if this is a string-repr numerical const, issue #7286 + if np.isnan(cval): + return "np.nan" + elif np.isinf(cval): + if cval < 0: + return "-np.inf" + else: + return "np.inf" + else: + return str(cval) + + # If we have to allocate the output array (the out argument was not used) + # then us numpy.full if the user specified a cval stencil decorator option + # or np.zeros if they didn't to allocate the array. + if result is None: + return_type_name = numpy_support.as_dtype( + return_type.dtype).type.__name__ + out_init ="{} = np.empty({}, dtype=np.{})\n".format( + out_name, shape_name, return_type_name) + + if "cval" in self.options: + cval = self.options["cval"] + cval_ty = typing.typeof.typeof(cval) + if not self._typingctx.can_convert(cval_ty, return_type.dtype): + msg = "cval type does not match stencil return type." + raise NumbaValueError(msg) + else: + cval = 0 + func_text += " " + out_init + for dim in range(the_array.ndim): + start_items = [":"] * the_array.ndim + end_items = [":"] * the_array.ndim + start_items[dim] = ":-{}".format(self.neighborhood[dim][0]) + end_items[dim] = "-{}:".format(self.neighborhood[dim][1]) + func_text += " " + "{}[{}] = {}\n".format(out_name, ",".join(start_items), cval_as_str(cval)) + func_text += " " + "{}[{}] = {}\n".format(out_name, ",".join(end_items), cval_as_str(cval)) + else: # result is present, if cval is set then use it + if "cval" in self.options: + cval = self.options["cval"] + cval_ty = typing.typeof.typeof(cval) + if not self._typingctx.can_convert(cval_ty, return_type.dtype): + msg = "cval type does not match stencil return type." + raise NumbaValueError(msg) + out_init = "{}[:] = {}\n".format(out_name, cval_as_str(cval)) + func_text += " " + out_init + + offset = 1 + # Add the loop nests to the new function. + for i in range(the_array.ndim): + for j in range(offset): + func_text += " " + # ranges[i][0] is the minimum index used in the i'th dimension + # but minimum's greater than 0 don't preclude any entry in the array. + # So, take the minimum of 0 and the minimum index found in the kernel + # and this will be a negative number (potentially -0). Then, we do + # unary - on that to get the positive offset in this dimension whose + # use is precluded. + # ranges[i][1] is the maximum of 0 and the observed maximum index + # in this dimension because negative maximums would not cause us to + # preclude any entry in the array from being used. + func_text += ("for {} in range(-min(0,{})," + "{}[{}]-max(0,{})):\n").format( + index_vars[i], + ranges[i][0], + shape_name, + i, + ranges[i][1]) + offset += 1 + + for j in range(offset): + func_text += " " + # Put a sentinel in the code so we can locate it in the IR. We will + # remove this sentinel assignment and replace it with the IR for the + # stencil kernel body. + func_text += "{} = 0\n".format(sentinel_name) + func_text += " return {}\n".format(out_name) + + if config.DEBUG_ARRAY_OPT >= 1: + print("new stencil func text") + print(func_text) + + # Force the new stencil function into existence. + dct = {} + dct.update(globals()) + exec(func_text, dct) + stencil_func = dct[stencil_func_name] + if sigret is not None: + pysig = utils.pysignature(stencil_func) + sigret.pysig = pysig + # Get the IR for the newly created stencil function. + from numba.core import compiler + stencil_ir = compiler.run_frontend(stencil_func) + ir_utils.remove_dels(stencil_ir.blocks) + + # rename all variables in stencil_ir afresh + var_table = ir_utils.get_name_var_table(stencil_ir.blocks) + new_var_dict = {} + reserved_names = ([sentinel_name, out_name, neighborhood_name, + shape_name] + kernel_copy.arg_names + index_vars) + for name, var in var_table.items(): + if not name in reserved_names: + assert isinstance(var, ir.Var) + new_var = var.scope.redefine(var.name, var.loc) + new_var_dict[name] = new_var.name + ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict) + + stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1 + + # Shift labels in the kernel copy so they are guaranteed unique + # and don't conflict with any labels in the stencil_ir. + kernel_copy.blocks = ir_utils.add_offset_to_labels( + kernel_copy.blocks, stencil_stub_last_label) + new_label = max(kernel_copy.blocks.keys()) + 1 + # Adjust ret_blocks to account for addition of the offset. + ret_blocks = [x + stencil_stub_last_label for x in ret_blocks] + + if config.DEBUG_ARRAY_OPT >= 1: + print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label) + print("before replace sentinel stencil_ir") + ir_utils.dump_blocks(stencil_ir.blocks) + print("before replace sentinel kernel_copy") + ir_utils.dump_blocks(kernel_copy.blocks) + + # Search all the block in the stencil outline for the sentinel. + for label, block in stencil_ir.blocks.items(): + for i, inst in enumerate(block.body): + if (isinstance( inst, ir.Assign) and + inst.target.name == sentinel_name): + # We found the sentinel assignment. + loc = inst.loc + scope = block.scope + # split block across __sentinel__ + # A new block is allocated for the statements prior to the + # sentinel but the new block maintains the current block + # label. + prev_block = ir.Block(scope, loc) + prev_block.body = block.body[:i] + # The current block is used for statements after sentinel. + block.body = block.body[i + 1:] + # But the current block gets a new label. + body_first_label = min(kernel_copy.blocks.keys()) + + # The previous block jumps to the minimum labelled block of + # the parfor body. + prev_block.append(ir.Jump(body_first_label, loc)) + # Add all the parfor loop body blocks to the gufunc + # function's IR. + for (l, b) in kernel_copy.blocks.items(): + stencil_ir.blocks[l] = b + + stencil_ir.blocks[new_label] = block + stencil_ir.blocks[label] = prev_block + # Add a jump from all the blocks that previously contained + # a return in the stencil kernel to the block + # containing statements after the sentinel. + for ret_block in ret_blocks: + stencil_ir.blocks[ret_block].append( + ir.Jump(new_label, loc)) + break + else: + continue + break + + stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks) + ir_utils.remove_dels(stencil_ir.blocks) + + assert(isinstance(the_array, types.Type)) + array_types = args + + new_stencil_param_types = list(array_types) + + if config.DEBUG_ARRAY_OPT >= 1: + print("new_stencil_param_types", new_stencil_param_types) + ir_utils.dump_blocks(stencil_ir.blocks) + + # Compile the combined stencil function with the replaced loop + # body in it. + ir_utils.fixup_var_define_in_scope(stencil_ir.blocks) + new_func = compiler.compile_ir( + self._typingctx, + self._targetctx, + stencil_ir, + new_stencil_param_types, + None, + compiler.DEFAULT_FLAGS, + {}) + return new_func + + def __call__(self, *args, **kwargs): + self._typingctx.refresh() + if (self.neighborhood is not None and + len(self.neighborhood) != args[0].ndim): + raise NumbaValueError("{} dimensional neighborhood specified for " + "{} dimensional input array".format( + len(self.neighborhood), args[0].ndim)) + + if 'out' in kwargs: + result = kwargs['out'] + rdtype = result.dtype + rttype = numpy_support.from_dtype(rdtype) + result_type = types.npytypes.Array(rttype, result.ndim, + numpy_support.map_layout(result)) + array_types = tuple([typing.typeof.typeof(x) for x in args]) + array_types_full = tuple([typing.typeof.typeof(x) for x in args] + + [result_type]) + else: + result = None + array_types = tuple([typing.typeof.typeof(x) for x in args]) + array_types_full = array_types + + if config.DEBUG_ARRAY_OPT >= 1: + print("__call__", array_types, args, kwargs) + + (real_ret, typemap, calltypes) = self.get_return_type(array_types) + new_func = self._stencil_wrapper(result, None, real_ret, typemap, + calltypes, *array_types_full) + + if result is None: + return new_func.entry_point(*args) + else: + return new_func.entry_point(*(args+(result,))) + +def stencil(func_or_mode='constant', **options): + # called on function without specifying mode style + if not isinstance(func_or_mode, str): + mode = 'constant' # default style + func = func_or_mode + else: + mode = func_or_mode + func = None + + for option in options: + if option not in ["cval", "standard_indexing", "neighborhood"]: + raise NumbaValueError("Unknown stencil option " + option) + + wrapper = _stencil(mode, options) + if func is not None: + return wrapper(func) + return wrapper + +def _stencil(mode, options): + if mode != 'constant': + raise NumbaValueError("Unsupported mode style " + mode) + + def decorated(func): + from numba.core import compiler + kernel_ir = compiler.run_frontend(func) + return StencilFunc(kernel_ir, mode, options) + + return decorated + +@lower_builtin(stencil) +def stencil_dummy_lower(context, builder, sig, args): + "lowering for dummy stencil calls" + return lir.Constant(lir.IntType(types.intp.bitwidth), 0) diff --git a/venv/lib/python3.10/site-packages/numba/stencils/stencilparfor.py b/venv/lib/python3.10/site-packages/numba/stencils/stencilparfor.py new file mode 100644 index 0000000000000000000000000000000000000000..3b80635d7586135b170c080fe396581ca014f75d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/stencils/stencilparfor.py @@ -0,0 +1,957 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +import numbers +import copy +import types as pytypes +from operator import add +import operator + +import numpy as np + +import numba.parfors.parfor +from numba.core import types, ir, rewrites, config, ir_utils +from numba.core.typing.templates import infer_global, AbstractTemplate +from numba.core.typing import signature +from numba.core import utils, typing +from numba.core.ir_utils import (get_call_table, mk_unique_var, + compile_to_numba_ir, replace_arg_nodes, guard, + find_callname, require, find_const, GuardException) +from numba.core.errors import NumbaValueError +from numba.core.utils import OPERATORS_TO_BUILTINS +from numba.np import numpy_support + + +def _compute_last_ind(dim_size, index_const): + if index_const > 0: + return dim_size - index_const + else: + return dim_size + +class StencilPass(object): + def __init__(self, func_ir, typemap, calltypes, array_analysis, typingctx, + targetctx, flags): + self.func_ir = func_ir + self.typemap = typemap + self.calltypes = calltypes + self.array_analysis = array_analysis + self.typingctx = typingctx + self.targetctx = targetctx + self.flags = flags + + def run(self): + """ Finds all calls to StencilFuncs in the IR and converts them to parfor. + """ + from numba.stencils.stencil import StencilFunc + + # Get all the calls in the function IR. + call_table, _ = get_call_table(self.func_ir.blocks) + stencil_calls = [] + stencil_dict = {} + for call_varname, call_list in call_table.items(): + for one_call in call_list: + if isinstance(one_call, StencilFunc): + # Remember all calls to StencilFuncs. + stencil_calls.append(call_varname) + stencil_dict[call_varname] = one_call + if not stencil_calls: + return # return early if no stencil calls found + + # find and transform stencil calls + for label, block in self.func_ir.blocks.items(): + for i, stmt in reversed(list(enumerate(block.body))): + # Found a call to a StencilFunc. + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op == 'call' + and stmt.value.func.name in stencil_calls): + kws = dict(stmt.value.kws) + # Create dictionary of input argument number to + # the argument itself. + input_dict = {i: stmt.value.args[i] for i in + range(len(stmt.value.args))} + in_args = stmt.value.args + arg_typemap = tuple(self.typemap[i.name] for i in in_args) + for arg_type in arg_typemap: + if isinstance(arg_type, types.BaseTuple): + raise NumbaValueError("Tuple parameters not " \ + "supported for stencil " \ + "kernels in parallel=True " \ + "mode.") + + out_arr = kws.get('out') + + # Get the StencilFunc object corresponding to this call. + sf = stencil_dict[stmt.value.func.name] + stencil_ir, rt, arg_to_arr_dict = get_stencil_ir(sf, + self.typingctx, arg_typemap, + block.scope, block.loc, input_dict, + self.typemap, self.calltypes) + index_offsets = sf.options.get('index_offsets', None) + gen_nodes = self._mk_stencil_parfor(label, in_args, out_arr, + stencil_ir, index_offsets, stmt.target, rt, sf, + arg_to_arr_dict) + block.body = block.body[:i] + gen_nodes + block.body[i+1:] + # Found a call to a stencil via numba.stencil(). + elif (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op == 'call' + and guard(find_callname, self.func_ir, stmt.value) + == ('stencil', 'numba')): + # remove dummy stencil() call + stmt.value = ir.Const(0, stmt.loc) + + def replace_return_with_setitem(self, blocks, exit_value_var, + parfor_body_exit_label): + """ + Find return statements in the IR and replace them with a SetItem + call of the value "returned" by the kernel into the result array. + Returns the block labels that contained return statements. + """ + for label, block in blocks.items(): + scope = block.scope + loc = block.loc + new_body = [] + for stmt in block.body: + if isinstance(stmt, ir.Return): + # previous stmt should have been a cast + prev_stmt = new_body.pop() + assert (isinstance(prev_stmt, ir.Assign) + and isinstance(prev_stmt.value, ir.Expr) + and prev_stmt.value.op == 'cast') + + new_body.append(ir.Assign(prev_stmt.value.value, exit_value_var, loc)) + new_body.append(ir.Jump(parfor_body_exit_label, loc)) + else: + new_body.append(stmt) + block.body = new_body + + def _mk_stencil_parfor(self, label, in_args, out_arr, stencil_ir, + index_offsets, target, return_type, stencil_func, + arg_to_arr_dict): + """ Converts a set of stencil kernel blocks to a parfor. + """ + gen_nodes = [] + stencil_blocks = stencil_ir.blocks + + if config.DEBUG_ARRAY_OPT >= 1: + print("_mk_stencil_parfor", label, in_args, out_arr, index_offsets, + return_type, stencil_func, stencil_blocks) + ir_utils.dump_blocks(stencil_blocks) + + in_arr = in_args[0] + # run copy propagate to replace in_args copies (e.g. a = A) + in_arr_typ = self.typemap[in_arr.name] + in_cps, out_cps = ir_utils.copy_propagate(stencil_blocks, self.typemap) + name_var_table = ir_utils.get_name_var_table(stencil_blocks) + + ir_utils.apply_copy_propagate( + stencil_blocks, + in_cps, + name_var_table, + self.typemap, + self.calltypes) + if config.DEBUG_ARRAY_OPT >= 1: + print("stencil_blocks after copy_propagate") + ir_utils.dump_blocks(stencil_blocks) + ir_utils.remove_dead(stencil_blocks, self.func_ir.arg_names, stencil_ir, + self.typemap) + if config.DEBUG_ARRAY_OPT >= 1: + print("stencil_blocks after removing dead code") + ir_utils.dump_blocks(stencil_blocks) + + # create parfor vars + ndims = self.typemap[in_arr.name].ndim + scope = in_arr.scope + loc = in_arr.loc + parfor_vars = [] + for i in range(ndims): + parfor_var = ir.Var(scope, mk_unique_var( + "$parfor_index_var"), loc) + self.typemap[parfor_var.name] = types.intp + parfor_vars.append(parfor_var) + + start_lengths, end_lengths = self._replace_stencil_accesses( + stencil_ir, parfor_vars, in_args, index_offsets, stencil_func, + arg_to_arr_dict) + + if config.DEBUG_ARRAY_OPT >= 1: + print("stencil_blocks after replace stencil accesses") + print("start_lengths:", start_lengths) + print("end_lengths:", end_lengths) + ir_utils.dump_blocks(stencil_blocks) + + # create parfor loop nests + loopnests = [] + equiv_set = self.array_analysis.get_equiv_set(label) + in_arr_dim_sizes = equiv_set.get_shape(in_arr) + + assert ndims == len(in_arr_dim_sizes) + start_inds = [] + last_inds = [] + for i in range(ndims): + last_ind = self._get_stencil_last_ind(in_arr_dim_sizes[i], + end_lengths[i], gen_nodes, scope, loc) + start_ind = self._get_stencil_start_ind( + start_lengths[i], gen_nodes, scope, loc) + start_inds.append(start_ind) + last_inds.append(last_ind) + # start from stencil size to avoid invalid array access + loopnests.append(numba.parfors.parfor.LoopNest(parfor_vars[i], + start_ind, last_ind, 1)) + + # We have to guarantee that the exit block has maximum label and that + # there's only one exit block for the parfor body. + # So, all return statements will change to jump to the parfor exit block. + parfor_body_exit_label = max(stencil_blocks.keys()) + 1 + stencil_blocks[parfor_body_exit_label] = ir.Block(scope, loc) + exit_value_var = ir.Var(scope, mk_unique_var("$parfor_exit_value"), loc) + self.typemap[exit_value_var.name] = return_type.dtype + + # create parfor index var + for_replacing_ret = [] + if ndims == 1: + parfor_ind_var = parfor_vars[0] + else: + parfor_ind_var = ir.Var(scope, mk_unique_var( + "$parfor_index_tuple_var"), loc) + self.typemap[parfor_ind_var.name] = types.containers.UniTuple( + types.intp, ndims) + tuple_call = ir.Expr.build_tuple(parfor_vars, loc) + tuple_assign = ir.Assign(tuple_call, parfor_ind_var, loc) + for_replacing_ret.append(tuple_assign) + + if config.DEBUG_ARRAY_OPT >= 1: + print("stencil_blocks after creating parfor index var") + ir_utils.dump_blocks(stencil_blocks) + + # empty init block + init_block = ir.Block(scope, loc) + if out_arr is None: + in_arr_typ = self.typemap[in_arr.name] + + shape_name = ir_utils.mk_unique_var("in_arr_shape") + shape_var = ir.Var(scope, shape_name, loc) + shape_getattr = ir.Expr.getattr(in_arr, "shape", loc) + self.typemap[shape_name] = types.containers.UniTuple(types.intp, + in_arr_typ.ndim) + init_block.body.extend([ir.Assign(shape_getattr, shape_var, loc)]) + + zero_name = ir_utils.mk_unique_var("zero_val") + zero_var = ir.Var(scope, zero_name, loc) + if "cval" in stencil_func.options: + cval = stencil_func.options["cval"] + # TODO: Loosen this restriction to adhere to casting rules. + cval_ty = typing.typeof.typeof(cval) + if not self.typingctx.can_convert(cval_ty, return_type.dtype): + raise NumbaValueError("cval type does not match stencil " \ + "return type.") + + temp2 = return_type.dtype(cval) + else: + temp2 = return_type.dtype(0) + full_const = ir.Const(temp2, loc) + self.typemap[zero_name] = return_type.dtype + init_block.body.extend([ir.Assign(full_const, zero_var, loc)]) + + so_name = ir_utils.mk_unique_var("stencil_output") + out_arr = ir.Var(scope, so_name, loc) + self.typemap[out_arr.name] = numba.core.types.npytypes.Array( + return_type.dtype, + in_arr_typ.ndim, + in_arr_typ.layout) + dtype_g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc) + self.typemap[dtype_g_np_var.name] = types.misc.Module(np) + dtype_g_np = ir.Global('np', np, loc) + dtype_g_np_assign = ir.Assign(dtype_g_np, dtype_g_np_var, loc) + init_block.body.append(dtype_g_np_assign) + + return_type_name = numpy_support.as_dtype( + return_type.dtype).type.__name__ + if return_type_name == 'bool': + return_type_name = 'bool_' + dtype_np_attr_call = ir.Expr.getattr(dtype_g_np_var, return_type_name, loc) + dtype_attr_var = ir.Var(scope, mk_unique_var("$np_attr_attr"), loc) + self.typemap[dtype_attr_var.name] = types.functions.NumberClass(return_type.dtype) + dtype_attr_assign = ir.Assign(dtype_np_attr_call, dtype_attr_var, loc) + init_block.body.append(dtype_attr_assign) + + stmts = ir_utils.gen_np_call("empty", + np.empty, + out_arr, + [shape_var, dtype_attr_var], + self.typingctx, + self.typemap, + self.calltypes) + # ------------------ + # Generate the code to fill just the border with zero_var. + + # Generate a none var to use in slicing. + none_var = ir.Var(scope, mk_unique_var("$none_var"), loc) + none_assign = ir.Assign(ir.Const(None, loc), none_var, loc) + stmts.append(none_assign) + self.typemap[none_var.name] = types.none + # Generate a zero var to use in slicing. + zero_index_var = ir.Var(scope, mk_unique_var("$zero_index_var"), loc) + zero_index_assign = ir.Assign(ir.Const(0, loc), zero_index_var, loc) + stmts.append(zero_index_assign) + self.typemap[zero_index_var.name] = types.intp + # Generate generic ":" slice. + # ---- Generate var to hold slice func var. + slice_func_var = ir.Var(scope, mk_unique_var("$slice_func_var"), loc) + slice_fn_ty = self.typingctx.resolve_value_type(slice) + self.typemap[slice_func_var.name] = slice_fn_ty + slice_g = ir.Global('slice', slice, loc) + slice_assign = ir.Assign(slice_g, slice_func_var, loc) + stmts.append(slice_assign) + # ---- Generate call to slice func. + sig = self.typingctx.resolve_function_type(slice_fn_ty, + (types.none,) * 2, + {}) + slice_callexpr = ir.Expr.call(func=slice_func_var, + args=(none_var, none_var), + kws=(), + loc=loc) + self.calltypes[slice_callexpr] = sig + # ---- Generate slice var + slice_var = ir.Var(scope, mk_unique_var("$slice"), loc) + self.typemap[slice_var.name] = types.slice2_type + slice_assign = ir.Assign(slice_callexpr, slice_var, loc) + stmts.append(slice_assign) + + def handle_border(slice_fn_ty, + dim, + scope, + loc, + slice_func_var, + stmts, + border_inds, + border_tuple_items, + other_arg, + other_first): + # Handle the border for start or end of the index range. + # ---- Generate call to slice func. + sig = self.typingctx.resolve_function_type( + slice_fn_ty, + (types.intp,) * 2, + {}) + si = border_inds[dim] + assert(isinstance(si, (int, ir.Var))) + si_var = ir.Var(scope, mk_unique_var("$border_ind"), loc) + self.typemap[si_var.name] = types.intp + if isinstance(si, int): + si_assign = ir.Assign(ir.Const(si, loc), si_var, loc) + else: + si_assign = ir.Assign(si, si_var, loc) + stmts.append(si_assign) + + slice_callexpr = ir.Expr.call( + func=slice_func_var, + args=(other_arg, si_var) if other_first else (si_var, other_arg), + kws=(), + loc=loc) + self.calltypes[slice_callexpr] = sig + # ---- Generate slice var + border_slice_var = ir.Var(scope, mk_unique_var("$slice"), loc) + self.typemap[border_slice_var.name] = types.slice2_type + slice_assign = ir.Assign(slice_callexpr, border_slice_var, loc) + stmts.append(slice_assign) + + border_tuple_items[dim] = border_slice_var + border_ind_var = ir.Var(scope, mk_unique_var( + "$border_index_tuple_var"), loc) + self.typemap[border_ind_var.name] = types.containers.UniTuple( + types.slice2_type, ndims) + tuple_call = ir.Expr.build_tuple(border_tuple_items, loc) + tuple_assign = ir.Assign(tuple_call, border_ind_var, loc) + stmts.append(tuple_assign) + + setitem_call = ir.SetItem(out_arr, border_ind_var, zero_var, loc) + self.calltypes[setitem_call] = signature( + types.none, self.typemap[out_arr.name], + self.typemap[border_ind_var.name], + self.typemap[out_arr.name].dtype + ) + stmts.append(setitem_call) + + # For each dimension, add setitem to set border values. + for dim in range(in_arr_typ.ndim): + # First, fill all entries with ":". + start_tuple_items = [slice_var] * in_arr_typ.ndim + last_tuple_items = [slice_var] * in_arr_typ.ndim + + handle_border(slice_fn_ty, + dim, + scope, + loc, + slice_func_var, + stmts, + start_inds, + start_tuple_items, + zero_index_var, + True) + handle_border(slice_fn_ty, + dim, + scope, + loc, + slice_func_var, + stmts, + last_inds, + last_tuple_items, + in_arr_dim_sizes[dim], + False) + + # ------------------ + + equiv_set.insert_equiv(out_arr, in_arr_dim_sizes) + init_block.body.extend(stmts) + else: # out is present + if "cval" in stencil_func.options: # do out[:] = cval + cval = stencil_func.options["cval"] + # TODO: Loosen this restriction to adhere to casting rules. + cval_ty = typing.typeof.typeof(cval) + if not self.typingctx.can_convert(cval_ty, return_type.dtype): + msg = "cval type does not match stencil return type." + raise NumbaValueError(msg) + + # get slice ref + slice_var = ir.Var(scope, mk_unique_var("$py_g_var"), loc) + slice_fn_ty = self.typingctx.resolve_value_type(slice) + self.typemap[slice_var.name] = slice_fn_ty + slice_g = ir.Global('slice', slice, loc) + slice_assigned = ir.Assign(slice_g, slice_var, loc) + init_block.body.append(slice_assigned) + + sig = self.typingctx.resolve_function_type(slice_fn_ty, + (types.none,) * 2, + {}) + + callexpr = ir.Expr.call(func=slice_var, args=(), kws=(), + loc=loc) + + self.calltypes[callexpr] = sig + slice_inst_var = ir.Var(scope, mk_unique_var("$slice_inst"), + loc) + self.typemap[slice_inst_var.name] = types.slice2_type + slice_assign = ir.Assign(callexpr, slice_inst_var, loc) + init_block.body.append(slice_assign) + + # get const val for cval + cval_const_val = ir.Const(return_type.dtype(cval), loc) + cval_const_var = ir.Var(scope, mk_unique_var("$cval_const"), + loc) + self.typemap[cval_const_var.name] = return_type.dtype + cval_const_assign = ir.Assign(cval_const_val, + cval_const_var, loc) + init_block.body.append(cval_const_assign) + + # do setitem on `out` array + setitemexpr = ir.StaticSetItem(out_arr, slice(None, None), + slice_inst_var, cval_const_var, + loc) + init_block.body.append(setitemexpr) + sig = signature(types.none, self.typemap[out_arr.name], + self.typemap[slice_inst_var.name], + self.typemap[out_arr.name].dtype) + self.calltypes[setitemexpr] = sig + + + self.replace_return_with_setitem(stencil_blocks, exit_value_var, + parfor_body_exit_label) + + if config.DEBUG_ARRAY_OPT >= 1: + print("stencil_blocks after replacing return") + ir_utils.dump_blocks(stencil_blocks) + + setitem_call = ir.SetItem(out_arr, parfor_ind_var, exit_value_var, loc) + self.calltypes[setitem_call] = signature( + types.none, self.typemap[out_arr.name], + self.typemap[parfor_ind_var.name], + self.typemap[out_arr.name].dtype + ) + stencil_blocks[parfor_body_exit_label].body.extend(for_replacing_ret) + stencil_blocks[parfor_body_exit_label].body.append(setitem_call) + + # simplify CFG of parfor body (exit block could be simplified often) + # add dummy return to enable CFG + dummy_loc = ir.Loc("stencilparfor_dummy", -1) + ret_const_var = ir.Var(scope, mk_unique_var("$cval_const"), dummy_loc) + cval_const_assign = ir.Assign(ir.Const(0, loc=dummy_loc), ret_const_var, dummy_loc) + stencil_blocks[parfor_body_exit_label].body.append(cval_const_assign) + + stencil_blocks[parfor_body_exit_label].body.append( + ir.Return(ret_const_var, dummy_loc), + ) + stencil_blocks = ir_utils.simplify_CFG(stencil_blocks) + stencil_blocks[max(stencil_blocks.keys())].body.pop() + + if config.DEBUG_ARRAY_OPT >= 1: + print("stencil_blocks after adding SetItem") + ir_utils.dump_blocks(stencil_blocks) + + pattern = ('stencil', [start_lengths, end_lengths]) + parfor = numba.parfors.parfor.Parfor(loopnests, init_block, stencil_blocks, + loc, parfor_ind_var, equiv_set, pattern, self.flags) + gen_nodes.append(parfor) + gen_nodes.append(ir.Assign(out_arr, target, loc)) + return gen_nodes + + def _get_stencil_last_ind(self, dim_size, end_length, gen_nodes, scope, + loc): + last_ind = dim_size + if end_length != 0: + # set last index to size minus stencil size to avoid invalid + # memory access + index_const = ir.Var(scope, mk_unique_var("stencil_const_var"), + loc) + self.typemap[index_const.name] = types.intp + if isinstance(end_length, numbers.Number): + const_assign = ir.Assign(ir.Const(end_length, loc), + index_const, loc) + else: + const_assign = ir.Assign(end_length, index_const, loc) + + gen_nodes.append(const_assign) + last_ind = ir.Var(scope, mk_unique_var("last_ind"), loc) + self.typemap[last_ind.name] = types.intp + + g_var = ir.Var(scope, mk_unique_var("compute_last_ind_var"), loc) + check_func = numba.njit(_compute_last_ind) + func_typ = types.functions.Dispatcher(check_func) + self.typemap[g_var.name] = func_typ + g_obj = ir.Global("_compute_last_ind", check_func, loc) + g_assign = ir.Assign(g_obj, g_var, loc) + gen_nodes.append(g_assign) + index_call = ir.Expr.call(g_var, [dim_size, index_const], (), loc) + self.calltypes[index_call] = func_typ.get_call_type( + self.typingctx, [types.intp, types.intp], {}) + index_assign = ir.Assign(index_call, last_ind, loc) + gen_nodes.append(index_assign) + + return last_ind + + def _get_stencil_start_ind(self, start_length, gen_nodes, scope, loc): + if isinstance(start_length, int): + return abs(min(start_length, 0)) + def get_start_ind(s_length): + return abs(min(s_length, 0)) + f_ir = compile_to_numba_ir(get_start_ind, {}, self.typingctx, + self.targetctx, (types.intp,), self.typemap, + self.calltypes) + assert len(f_ir.blocks) == 1 + block = f_ir.blocks.popitem()[1] + replace_arg_nodes(block, [start_length]) + gen_nodes += block.body[:-2] + ret_var = block.body[-2].value.value + return ret_var + + def _replace_stencil_accesses(self, stencil_ir, parfor_vars, in_args, + index_offsets, stencil_func, arg_to_arr_dict): + """ Convert relative indexing in the stencil kernel to standard indexing + by adding the loop index variables to the corresponding dimensions + of the array index tuples. + """ + stencil_blocks = stencil_ir.blocks + in_arr = in_args[0] + in_arg_names = [x.name for x in in_args] + + if "standard_indexing" in stencil_func.options: + for x in stencil_func.options["standard_indexing"]: + if x not in arg_to_arr_dict: + raise NumbaValueError("Standard indexing requested for " \ + "an array name not present in the " \ + "stencil kernel definition.") + standard_indexed = [arg_to_arr_dict[x] for x in + stencil_func.options["standard_indexing"]] + else: + standard_indexed = [] + + if in_arr.name in standard_indexed: + raise NumbaValueError("The first argument to a stencil kernel " \ + "must use relative indexing, not standard " \ + "indexing.") + + ndims = self.typemap[in_arr.name].ndim + scope = in_arr.scope + loc = in_arr.loc + # replace access indices, find access lengths in each dimension + need_to_calc_kernel = stencil_func.neighborhood is None + + # If we need to infer the kernel size then initialize the minimum and + # maximum seen indices for each dimension to 0. If we already have + # the neighborhood calculated then just convert from neighborhood format + # to the separate start and end lengths format used here. + if need_to_calc_kernel: + start_lengths = ndims*[0] + end_lengths = ndims*[0] + else: + start_lengths = [x[0] for x in stencil_func.neighborhood] + end_lengths = [x[1] for x in stencil_func.neighborhood] + + # Get all the tuples defined in the stencil blocks. + tuple_table = ir_utils.get_tuple_table(stencil_blocks) + + found_relative_index = False + + # For all blocks in the stencil kernel... + for label, block in stencil_blocks.items(): + new_body = [] + # For all statements in those blocks... + for stmt in block.body: + # Reject assignments to input arrays. + if ((isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op in ['setitem', 'static_setitem'] + and stmt.value.value.name in in_arg_names) or + ((isinstance(stmt, ir.SetItem) or + isinstance(stmt, ir.StaticSetItem)) + and stmt.target.name in in_arg_names)): + raise NumbaValueError("Assignments to arrays passed to " \ + "stencil kernels is not allowed.") + # We found a getitem for some array. If that array is an input + # array and isn't in the list of standard indexed arrays then + # update min and max seen indices if we are inferring the + # kernel size and create a new tuple where the relative offsets + # are added to loop index vars to get standard indexing. + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op in ['static_getitem', 'getitem'] + and stmt.value.value.name in in_arg_names + and stmt.value.value.name not in standard_indexed): + index_list = stmt.value.index + # handle 1D case + if ndims == 1: + index_list = [index_list] + else: + if hasattr(index_list, 'name') and index_list.name in tuple_table: + index_list = tuple_table[index_list.name] + # indices can be inferred as constant in simple expressions + # like -c where c is constant + # handled here since this is a common stencil index pattern + stencil_ir._definitions = ir_utils.build_definitions(stencil_blocks) + index_list = [_get_const_index_expr( + stencil_ir, self.func_ir, v) for v in index_list] + if index_offsets: + index_list = self._add_index_offsets(index_list, + list(index_offsets), new_body, scope, loc) + + # update min and max indices + if need_to_calc_kernel: + # all indices should be integer to be able to calculate + # neighborhood automatically + if (isinstance(index_list, ir.Var) or + any([not isinstance(v, int) for v in index_list])): + raise NumbaValueError("Variable stencil index " \ + "only possible with known " \ + "neighborhood") + start_lengths = list(map(min, start_lengths, + index_list)) + end_lengths = list(map(max, end_lengths, index_list)) + found_relative_index = True + + # update access indices + index_vars = self._add_index_offsets(parfor_vars, + list(index_list), new_body, scope, loc) + + # new access index tuple + if ndims == 1: + ind_var = index_vars[0] + else: + ind_var = ir.Var(scope, mk_unique_var( + "$parfor_index_ind_var"), loc) + self.typemap[ind_var.name] = types.containers.UniTuple( + types.intp, ndims) + tuple_call = ir.Expr.build_tuple(index_vars, loc) + tuple_assign = ir.Assign(tuple_call, ind_var, loc) + new_body.append(tuple_assign) + + # getitem return type is scalar if all indices are integer + if all([self.typemap[v.name] == types.intp + for v in index_vars]): + getitem_return_typ = self.typemap[ + stmt.value.value.name].dtype + else: + # getitem returns an array + getitem_return_typ = self.typemap[stmt.value.value.name] + # new getitem with the new index var + getitem_call = ir.Expr.getitem(stmt.value.value, ind_var, + loc) + self.calltypes[getitem_call] = signature( + getitem_return_typ, + self.typemap[stmt.value.value.name], + self.typemap[ind_var.name]) + stmt.value = getitem_call + + new_body.append(stmt) + block.body = new_body + if need_to_calc_kernel and not found_relative_index: + raise NumbaValueError("Stencil kernel with no accesses to " \ + "relatively indexed arrays.") + + return start_lengths, end_lengths + + def _add_index_offsets(self, index_list, index_offsets, new_body, + scope, loc): + """ Does the actual work of adding loop index variables to the + relative index constants or variables. + """ + assert len(index_list) == len(index_offsets) + + # shortcut if all values are integer + if all([isinstance(v, int) for v in index_list+index_offsets]): + # add offsets in all dimensions + return list(map(add, index_list, index_offsets)) + + out_nodes = [] + index_vars = [] + for i in range(len(index_list)): + # new_index = old_index + offset + old_index_var = index_list[i] + if isinstance(old_index_var, int): + old_index_var = ir.Var(scope, + mk_unique_var("old_index_var"), loc) + self.typemap[old_index_var.name] = types.intp + const_assign = ir.Assign(ir.Const(index_list[i], loc), + old_index_var, loc) + out_nodes.append(const_assign) + + offset_var = index_offsets[i] + if isinstance(offset_var, int): + offset_var = ir.Var(scope, + mk_unique_var("offset_var"), loc) + self.typemap[offset_var.name] = types.intp + const_assign = ir.Assign(ir.Const(index_offsets[i], loc), + offset_var, loc) + out_nodes.append(const_assign) + + if (isinstance(old_index_var, slice) + or isinstance(self.typemap[old_index_var.name], + types.misc.SliceType)): + # only one arg can be slice + assert self.typemap[offset_var.name] == types.intp + index_var = self._add_offset_to_slice(old_index_var, offset_var, + out_nodes, scope, loc) + index_vars.append(index_var) + continue + + if (isinstance(offset_var, slice) + or isinstance(self.typemap[offset_var.name], + types.misc.SliceType)): + # only one arg can be slice + assert self.typemap[old_index_var.name] == types.intp + index_var = self._add_offset_to_slice(offset_var, old_index_var, + out_nodes, scope, loc) + index_vars.append(index_var) + continue + + index_var = ir.Var(scope, + mk_unique_var("offset_stencil_index"), loc) + self.typemap[index_var.name] = types.intp + index_call = ir.Expr.binop(operator.add, old_index_var, + offset_var, loc) + self.calltypes[index_call] = self.typingctx.resolve_function_type( + operator.add, (types.intp, types.intp), {}) + index_assign = ir.Assign(index_call, index_var, loc) + out_nodes.append(index_assign) + index_vars.append(index_var) + + new_body.extend(out_nodes) + return index_vars + + def _add_offset_to_slice(self, slice_var, offset_var, out_nodes, scope, + loc): + if isinstance(slice_var, slice): + f_text = """def f(offset): + return slice({} + offset, {} + offset) + """.format(slice_var.start, slice_var.stop) + loc = {} + exec(f_text, {}, loc) + f = loc['f'] + args = [offset_var] + arg_typs = (types.intp,) + else: + def f(old_slice, offset): + return slice(old_slice.start + offset, old_slice.stop + offset) + args = [slice_var, offset_var] + slice_type = self.typemap[slice_var.name] + arg_typs = (slice_type, types.intp,) + _globals = self.func_ir.func_id.func.__globals__ + f_ir = compile_to_numba_ir(f, _globals, self.typingctx, self.targetctx, + arg_typs, self.typemap, self.calltypes) + _, block = f_ir.blocks.popitem() + replace_arg_nodes(block, args) + new_index = block.body[-2].value.value + out_nodes.extend(block.body[:-2]) # ignore return nodes + return new_index + +def get_stencil_ir(sf, typingctx, args, scope, loc, input_dict, typemap, + calltypes): + """get typed IR from stencil bytecode + """ + from numba.core.cpu import CPUContext + from numba.core.registry import cpu_target + from numba.core.annotations import type_annotations + from numba.core.typed_passes import type_inference_stage + + # get untyped IR + stencil_func_ir = sf.kernel_ir.copy() + # copy the IR nodes to avoid changing IR in the StencilFunc object + stencil_blocks = copy.deepcopy(stencil_func_ir.blocks) + stencil_func_ir.blocks = stencil_blocks + + name_var_table = ir_utils.get_name_var_table(stencil_func_ir.blocks) + if "out" in name_var_table: + raise NumbaValueError("Cannot use the reserved word 'out' in stencil " \ + "kernels.") + + # get typed IR with a dummy pipeline (similar to test_parfors.py) + from numba.core.registry import cpu_target + targetctx = cpu_target.target_context + + tp = DummyPipeline(typingctx, targetctx, args, stencil_func_ir) + + rewrites.rewrite_registry.apply('before-inference', tp.state) + + tp.state.typemap, tp.state.return_type, tp.state.calltypes, _ = type_inference_stage( + tp.state.typingctx, tp.state.targetctx, tp.state.func_ir, + tp.state.args, None) + + type_annotations.TypeAnnotation( + func_ir=tp.state.func_ir, + typemap=tp.state.typemap, + calltypes=tp.state.calltypes, + lifted=(), + lifted_from=None, + args=tp.state.args, + return_type=tp.state.return_type, + html_output=config.HTML) + + # make block labels unique + stencil_blocks = ir_utils.add_offset_to_labels(stencil_blocks, + ir_utils.next_label()) + min_label = min(stencil_blocks.keys()) + max_label = max(stencil_blocks.keys()) + ir_utils._the_max_label.update(max_label) + + if config.DEBUG_ARRAY_OPT >= 1: + print("Initial stencil_blocks") + ir_utils.dump_blocks(stencil_blocks) + + # rename variables, + var_dict = {} + for v, typ in tp.state.typemap.items(): + new_var = ir.Var(scope, mk_unique_var(v), loc) + var_dict[v] = new_var + typemap[new_var.name] = typ # add new var type for overall function + ir_utils.replace_vars(stencil_blocks, var_dict) + + if config.DEBUG_ARRAY_OPT >= 1: + print("After replace_vars") + ir_utils.dump_blocks(stencil_blocks) + + # add call types to overall function + for call, call_typ in tp.state.calltypes.items(): + calltypes[call] = call_typ + + arg_to_arr_dict = {} + # replace arg with arr + for block in stencil_blocks.values(): + for stmt in block.body: + if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Arg): + if config.DEBUG_ARRAY_OPT >= 1: + print("input_dict", input_dict, stmt.value.index, + stmt.value.name, stmt.value.index in input_dict) + arg_to_arr_dict[stmt.value.name] = input_dict[stmt.value.index].name + stmt.value = input_dict[stmt.value.index] + + if config.DEBUG_ARRAY_OPT >= 1: + print("arg_to_arr_dict", arg_to_arr_dict) + print("After replace arg with arr") + ir_utils.dump_blocks(stencil_blocks) + + ir_utils.remove_dels(stencil_blocks) + stencil_func_ir.blocks = stencil_blocks + return stencil_func_ir, sf.get_return_type(args)[0], arg_to_arr_dict + +class DummyPipeline(object): + def __init__(self, typingctx, targetctx, args, f_ir): + from numba.core.compiler import StateDict + self.state = StateDict() + self.state.typingctx = typingctx + self.state.targetctx = targetctx + self.state.args = args + self.state.func_ir = f_ir + self.state.typemap = None + self.state.return_type = None + self.state.calltypes = None + + +def _get_const_index_expr(stencil_ir, func_ir, index_var): + """ + infer index_var as constant if it is of a expression form like c-1 where c + is a constant in the outer function. + index_var is assumed to be inside stencil kernel + """ + const_val = guard( + _get_const_index_expr_inner, stencil_ir, func_ir, index_var) + if const_val is not None: + return const_val + return index_var + +def _get_const_index_expr_inner(stencil_ir, func_ir, index_var): + """inner constant inference function that calls constant, unary and binary + cases. + """ + require(isinstance(index_var, ir.Var)) + # case where the index is a const itself in outer function + var_const = guard(_get_const_two_irs, stencil_ir, func_ir, index_var) + if var_const is not None: + return var_const + # get index definition + index_def = ir_utils.get_definition(stencil_ir, index_var) + # match inner_var = unary(index_var) + var_const = guard( + _get_const_unary_expr, stencil_ir, func_ir, index_def) + if var_const is not None: + return var_const + # match inner_var = arg1 + arg2 + var_const = guard( + _get_const_binary_expr, stencil_ir, func_ir, index_def) + if var_const is not None: + return var_const + raise GuardException + +def _get_const_two_irs(ir1, ir2, var): + """get constant in either of two IRs if available + otherwise, throw GuardException + """ + var_const = guard(find_const, ir1, var) + if var_const is not None: + return var_const + var_const = guard(find_const, ir2, var) + if var_const is not None: + return var_const + raise GuardException + +def _get_const_unary_expr(stencil_ir, func_ir, index_def): + """evaluate constant unary expr if possible + otherwise, raise GuardException + """ + require(isinstance(index_def, ir.Expr) and index_def.op == 'unary') + inner_var = index_def.value + # return -c as constant + const_val = _get_const_index_expr_inner(stencil_ir, func_ir, inner_var) + op = OPERATORS_TO_BUILTINS[index_def.fn] + return eval("{}{}".format(op, const_val)) + +def _get_const_binary_expr(stencil_ir, func_ir, index_def): + """evaluate constant binary expr if possible + otherwise, raise GuardException + """ + require(isinstance(index_def, ir.Expr) and index_def.op == 'binop') + arg1 = _get_const_index_expr_inner(stencil_ir, func_ir, index_def.lhs) + arg2 = _get_const_index_expr_inner(stencil_ir, func_ir, index_def.rhs) + op = OPERATORS_TO_BUILTINS[index_def.fn] + return eval("{}{}{}".format(arg1, op, arg2)) diff --git a/venv/lib/python3.10/site-packages/numba/testing/__init__.py b/venv/lib/python3.10/site-packages/numba/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4eb89a92fcb80765a6f54b11bebde867ced37aa4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/testing/__init__.py @@ -0,0 +1,61 @@ +import os +import sys +import functools +import unittest +import traceback +from fnmatch import fnmatch +from os.path import join, isfile, relpath, normpath, splitext + +from .main import NumbaTestProgram, SerialSuite, make_tag_decorator +from numba.core import config + + +def load_testsuite(loader, dir): + """Find tests in 'dir'.""" + try: + suite = unittest.TestSuite() + files = [] + for f in os.listdir(dir): + path = join(dir, f) + if isfile(path) and fnmatch(f, 'test_*.py'): + files.append(f) + elif isfile(join(path, '__init__.py')): + suite.addTests(loader.discover(path)) + for f in files: + # turn 'f' into a filename relative to the toplevel dir... + f = relpath(join(dir, f), loader._top_level_dir) + # ...and translate it to a module name. + f = splitext(normpath(f.replace(os.path.sep, '.')))[0] + suite.addTests(loader.loadTestsFromName(f)) + return suite + except Exception: + traceback.print_exc(file=sys.stderr) + sys.exit(-1) + + +def run_tests(argv=None, defaultTest=None, topleveldir=None, + xmloutput=None, verbosity=1, nomultiproc=False): + """ + args + ---- + - xmloutput [str or None] + Path of XML output directory (optional) + - verbosity [int] + Verbosity level of tests output + + Returns the TestResult object after running the test *suite*. + """ + + if xmloutput is not None: + import xmlrunner + runner = xmlrunner.XMLTestRunner(output=xmloutput) + else: + runner = None + prog = NumbaTestProgram(argv=argv, + module=None, + defaultTest=defaultTest, + topleveldir=topleveldir, + testRunner=runner, exit=False, + verbosity=verbosity, + nomultiproc=nomultiproc) + return prog.result diff --git a/venv/lib/python3.10/site-packages/numba/testing/__main__.py b/venv/lib/python3.10/site-packages/numba/testing/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..964b0ce910f027dfb48790b49e2360dcd806ad7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/testing/__main__.py @@ -0,0 +1,4 @@ +import sys +from numba.testing import run_tests + +sys.exit(0 if run_tests(sys.argv).wasSuccessful() else 1) diff --git a/venv/lib/python3.10/site-packages/numba/testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8daec138f03ed65cf1d0563b884e687c94cef5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/testing/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcce8fba6a4c7aad00037af90a2d17831313d6d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/testing/__pycache__/_runtests.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/_runtests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21acd2cac79c5e7cd99fa1d744794e968a372619 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/_runtests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/testing/__pycache__/loader.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eb7d2c0a2727456e1799ccc374dbf45dc49e2b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/loader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/testing/__pycache__/main.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7360eb72e6039acb2110d5af74448cb869ec6092 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/main.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/testing/__pycache__/notebook.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/notebook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aab6f51c9c8b7ab75c4371a1e708a6470ada11b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/testing/__pycache__/notebook.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/testing/_runtests.py b/venv/lib/python3.10/site-packages/numba/testing/_runtests.py new file mode 100644 index 0000000000000000000000000000000000000000..7df2bd7435897ec837b188eb857302d47ea6942b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/testing/_runtests.py @@ -0,0 +1,114 @@ +import json +import re +import logging + + +def _main(argv, **kwds): + from numba.testing import run_tests + # This helper function assumes the first element of argv + # is the name of the calling program. + # The 'main' API function is invoked in-process, and thus + # will synthesize that name. + + if '--log' in argv: + logging.basicConfig(level=logging.DEBUG) + argv.remove('--log') + + if '--failed-first' in argv: + # Failed first + argv.remove('--failed-first') + return _FailedFirstRunner().main(argv, kwds) + elif '--last-failed' in argv: + argv.remove('--last-failed') + return _FailedFirstRunner(last_failed=True).main(argv, kwds) + else: + return run_tests(argv, defaultTest='numba.tests', + **kwds).wasSuccessful() + + +def main(*argv, **kwds): + """keyword arguments are accepted for backward compatibility only. + See `numba.testing.run_tests()` documentation for details.""" + return _main(['
    '] + list(argv), **kwds) + + +class _FailedFirstRunner(object): + """ + Test Runner to handle the failed-first (--failed-first) option. + """ + cache_filename = '.runtests_lastfailed' + + def __init__(self, last_failed=False): + self.last_failed = last_failed + + def main(self, argv, kwds): + from numba.testing import run_tests + prog = argv[0] + argv = argv[1:] + flags = [a for a in argv if a.startswith('-')] + + all_tests, failed_tests = self.find_last_failed(argv) + # Prepare tests to run + if failed_tests: + ft = "There were {} previously failed tests" + print(ft.format(len(failed_tests))) + remaing_tests = [t for t in all_tests + if t not in failed_tests] + if self.last_failed: + tests = list(failed_tests) + else: + tests = failed_tests + remaing_tests + else: + if self.last_failed: + tests = [] + else: + tests = list(all_tests) + + if not tests: + print("No tests to run") + return True + # Run the testsuite + print("Running {} tests".format(len(tests))) + print('Flags', flags) + result = run_tests([prog] + flags + tests, **kwds) + # Update failed tests records only if we have run the all the tests + # last failed. + if len(tests) == result.testsRun: + self.save_failed_tests(result, all_tests) + return result.wasSuccessful() + + def save_failed_tests(self, result, all_tests): + print("Saving failed tests to {}".format(self.cache_filename)) + cache = [] + # Find failed tests + failed = set() + for case in result.errors + result.failures: + failed.add(case[0].id()) + # Build cache + for t in all_tests: + if t in failed: + cache.append(t) + # Write cache + with open(self.cache_filename, 'w') as fout: + json.dump(cache, fout) + + def find_last_failed(self, argv): + from numba.tests.support import captured_output + + # Find all tests + listargv = ['-l'] + [a for a in argv if not a.startswith('-')] + with captured_output("stdout") as stream: + main(*listargv) + + pat = re.compile(r"^(\w+\.)+\w+$") + lines = stream.getvalue().splitlines() + all_tests = [x for x in lines if pat.match(x) is not None] + + try: + fobj = open(self.cache_filename) + except OSError: + failed_tests = [] + else: + with fobj as fin: + failed_tests = json.load(fin) + return all_tests, failed_tests diff --git a/venv/lib/python3.10/site-packages/numba/testing/loader.py b/venv/lib/python3.10/site-packages/numba/testing/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..66ba417d62b2bf7e7789c8a54eba972c8d7b7624 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/testing/loader.py @@ -0,0 +1,26 @@ +from unittest import loader, case +from os.path import isdir, isfile, join, dirname, basename + + +class TestLoader(loader.TestLoader): + + def __init__(self, topleveldir=None): + super(TestLoader, self).__init__() + self._top_level_dir = topleveldir or dirname(dirname(dirname(__file__))) + + def _find_tests(self, start_dir, pattern, namespace=False): + # Upstream doesn't look for 'load_tests' in start_dir. + + if isdir(start_dir) and not namespace and isfile(join(start_dir, '__init__.py')): + name = self._get_name_from_path(start_dir) + package = self._get_module_from_name(name) + load_tests = getattr(package, 'load_tests', None) + tests = self.loadTestsFromModule(package) + if load_tests is not None: + try: + yield load_tests(self, tests, pattern) + except Exception as e: + yield loader._make_failed_load_tests(package.__name__, e, self.suiteClass) + else: + for t in super(TestLoader, self)._find_tests(start_dir, pattern): + yield t diff --git a/venv/lib/python3.10/site-packages/numba/testing/main.py b/venv/lib/python3.10/site-packages/numba/testing/main.py new file mode 100644 index 0000000000000000000000000000000000000000..6a7a2e01a70818d6c53713efd2def8461ec987ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/testing/main.py @@ -0,0 +1,830 @@ +import collections +import contextlib +import cProfile +import inspect +import gc +import multiprocessing +import os +import random +import sys +import time +import unittest +import warnings +import zlib + +from functools import lru_cache +from io import StringIO +from unittest import result, runner, signals, suite, loader, case + +from .loader import TestLoader +from numba.core import config + +try: + from multiprocessing import TimeoutError +except ImportError: + from Queue import Empty as TimeoutError + + +def make_tag_decorator(known_tags): + """ + Create a decorator allowing tests to be tagged with the *known_tags*. + """ + + def tag(*tags): + """ + Tag a test method with the given tags. + Can be used in conjunction with the --tags command-line argument + for runtests.py. + """ + for t in tags: + if t not in known_tags: + raise ValueError("unknown tag: %r" % (t,)) + + def decorate(func): + if (not callable(func) or isinstance(func, type) + or not func.__name__.startswith('test_')): + raise TypeError("@tag(...) should be used on test methods") + try: + s = func.tags + except AttributeError: + s = func.tags = set() + s.update(tags) + return func + return decorate + + return tag + + +# Chances are the next queried class is the same as the previous, locally 128 +# entries seems to be fastest. +# Current number of test classes can be found with: +# $ ./runtests.py -l|sed -e 's/\(.*\)\..*/\1/'|grep ^numba|sort|uniq|wc -l +# as of writing it's 658. +@lru_cache(maxsize=128) +def _get_mtime(cls): + """ + Gets the mtime of the file in which a test class is defined. + """ + return str(os.path.getmtime(inspect.getfile(cls))) + + +def cuda_sensitive_mtime(x): + """ + Return a key for sorting tests bases on mtime and test name. For CUDA + tests, interleaving tests from different classes is dangerous as the CUDA + context might get reset unexpectedly between methods of a class, so for + CUDA tests the key prioritises the test module and class ahead of the + mtime. + """ + cls = x.__class__ + key = _get_mtime(cls) + str(x) + + from numba.cuda.testing import CUDATestCase + if CUDATestCase in cls.mro(): + key = "%s.%s %s" % (str(cls.__module__), str(cls.__name__), key) + + return key + + +def parse_slice(useslice): + """Parses the argument string "useslice" as a shard index and number and + returns a function that filters on those arguments. i.e. input + useslice="1:3" leads to output something like `lambda x: zlib.crc32(x) % 3 + == 1`. + """ + if callable(useslice): + return useslice + if not useslice: + return lambda x: True + try: + (index, count) = useslice.split(":") + index = int(index) + count = int(count) + except Exception: + msg = ( + "Expected arguments shard index and count to follow " + "option `-j i:t`, where i is the shard number and t " + "is the total number of shards, found '%s'" % useslice) + raise ValueError(msg) + if count == 0: + return lambda x: True + elif count < 0 or index < 0 or index >= count: + raise ValueError("Sharding out of range") + else: + def decide(test): + func = getattr(test, test._testMethodName) + if "always_test" in getattr(func, 'tags', {}): + return True + return abs(zlib.crc32(test.id().encode('utf-8'))) % count == index + return decide + + +class TestLister(object): + """Simply list available tests rather than running them.""" + def __init__(self, useslice): + self.useslice = parse_slice(useslice) + + def run(self, test): + result = runner.TextTestResult(sys.stderr, descriptions=True, + verbosity=1) + self._test_list = _flatten_suite(test) + masked_list = list(filter(self.useslice, self._test_list)) + self._test_list.sort(key=cuda_sensitive_mtime) + for t in masked_list: + print(t.id()) + print('%d tests found. %s selected' % (len(self._test_list), + len(masked_list))) + return result + + +class SerialSuite(unittest.TestSuite): + """A simple marker to make sure tests in this suite are run serially. + + Note: As the suite is going through internals of unittest, + it may get unpacked and stuffed into a plain TestSuite. + We need to set an attribute on the TestCase objects to + remember they should not be run in parallel. + """ + + def addTest(self, test): + if not isinstance(test, unittest.TestCase): + # It's a sub-suite, recurse + for t in test: + self.addTest(t) + else: + # It's a test case, mark it serial + test._numba_parallel_test_ = False + super(SerialSuite, self).addTest(test) + + +class BasicTestRunner(runner.TextTestRunner): + def __init__(self, useslice, **kwargs): + runner.TextTestRunner.__init__(self, **kwargs) + self.useslice = parse_slice(useslice) + + def run(self, test): + run = list(filter(self.useslice, _flatten_suite(test))) + run.sort(key=cuda_sensitive_mtime) + wrapped = unittest.TestSuite(run) + return super(BasicTestRunner, self).run(wrapped) + + +# "unittest.main" is really the TestProgram class! +# (defined in a module named itself "unittest.main"...) + +class NumbaTestProgram(unittest.main): + """ + A TestProgram subclass adding the following options: + * a -R option to enable reference leak detection + * a --profile option to enable profiling of the test run + * a -m option for parallel execution + * a -l option to (only) list tests + + Currently the options are only added in 3.4+. + """ + + refleak = False + profile = False + multiprocess = False + useslice = None + list = False + tags = None + exclude_tags = None + random_select = None + random_seed = 42 + + def __init__(self, *args, **kwargs): + topleveldir = kwargs.pop('topleveldir', None) + kwargs['testLoader'] = TestLoader(topleveldir) + + # HACK to force unittest not to change warning display options + # (so that NumbaWarnings don't appear all over the place) + sys.warnoptions.append(':x') + self.nomultiproc = kwargs.pop('nomultiproc', False) + super(NumbaTestProgram, self).__init__(*args, **kwargs) + + def _getParentArgParser(self): + # NOTE: this hook only exists on Python 3.4+. The options won't be + # added in earlier versions (which use optparse - 3.3 - or getopt() + # - 2.x). + parser = super(NumbaTestProgram, self)._getParentArgParser() + if self.testRunner is None: + parser.add_argument('-R', '--refleak', dest='refleak', + action='store_true', + help='Detect reference / memory leaks') + parser.add_argument('-m', '--multiprocess', dest='multiprocess', + nargs='?', + type=int, + const=multiprocessing.cpu_count(), + help='Parallelize tests') + parser.add_argument('-l', '--list', dest='list', + action='store_true', + help='List tests without running them') + parser.add_argument('--tags', dest='tags', type=str, + help='Comma-separated list of tags to select ' + 'a subset of the test suite') + parser.add_argument('--exclude-tags', dest='exclude_tags', type=str, + help='Comma-separated list of tags to de-select ' + 'a subset of the test suite') + parser.add_argument('--random', dest='random_select', type=float, + help='Random proportion of tests to select') + parser.add_argument('--profile', dest='profile', + action='store_true', + help='Profile the test run') + parser.add_argument('-j', '--slice', dest='useslice', nargs='?', + type=str, const="None", + help='Shard the test sequence') + + def git_diff_str(x): + if x != 'ancestor': + raise ValueError("invalid option for --gitdiff") + return x + + parser.add_argument('-g', '--gitdiff', dest='gitdiff', type=git_diff_str, + default=False, nargs='?', + help=('Run tests from changes made against ' + 'origin/release0.61 as identified by `git diff`. ' + 'If set to "ancestor", the diff compares ' + 'against the common ancestor.')) + return parser + + def _handle_tags(self, argv, tagstr): + found = None + for x in argv: + if tagstr in x: + if found is None: + found = x + else: + raise ValueError("argument %s supplied repeatedly" % tagstr) + + if found is not None: + posn = argv.index(found) + try: + if found == tagstr: # --tagstr + tag_args = argv[posn + 1].strip() + argv.remove(tag_args) + else: # --tagstr= + if '=' in found: + tag_args = found.split('=')[1].strip() + else: + raise AssertionError('unreachable') + except IndexError: + # at end of arg list, raise + msg = "%s requires at least one tag to be specified" + raise ValueError(msg % tagstr) + # see if next arg is "end options" or some other flag + if tag_args.startswith('-'): + raise ValueError("tag starts with '-', probably a syntax error") + # see if tag is something like "=" which is likely a syntax + # error of form `--tags =`, note the space prior to `=`. + if '=' in tag_args: + msg = "%s argument contains '=', probably a syntax error" + raise ValueError(msg % tagstr) + attr = tagstr[2:].replace('-', '_') + setattr(self, attr, tag_args) + argv.remove(found) + + + def parseArgs(self, argv): + if '-l' in argv: + argv.remove('-l') + self.list = True + + super(NumbaTestProgram, self).parseArgs(argv) + + # If at this point self.test doesn't exist, it is because + # no test ID was given in argv. Use the default instead. + if not hasattr(self, 'test') or not self.test.countTestCases(): + self.testNames = (self.defaultTest,) + self.createTests() + + if self.tags: + tags = [s.strip() for s in self.tags.split(',')] + self.test = _choose_tagged_tests(self.test, tags, mode='include') + + if self.exclude_tags: + tags = [s.strip() for s in self.exclude_tags.split(',')] + self.test = _choose_tagged_tests(self.test, tags, mode='exclude') + + if self.random_select: + self.test = _choose_random_tests(self.test, self.random_select, + self.random_seed) + + if self.gitdiff is not False: + self.test = _choose_gitdiff_tests( + self.test, + use_common_ancestor=(self.gitdiff == 'ancestor'), + ) + + if self.verbosity <= 0: + # We aren't interested in informational messages / warnings when + # running with '-q'. + self.buffer = True + + def _do_discovery(self, argv, Loader=None): + # Disable unittest's implicit test discovery when parsing + # CLI arguments, as it can select other tests than Numba's + # (e.g. some test_xxx module that may happen to be directly + # reachable from sys.path) + return + + def runTests(self): + if self.refleak: + self.testRunner = RefleakTestRunner + + if not hasattr(sys, "gettotalrefcount"): + warnings.warn("detecting reference leaks requires a debug build " + "of Python, only memory leaks will be detected") + + elif self.list: + self.testRunner = TestLister(self.useslice) + + elif self.testRunner is None: + self.testRunner = BasicTestRunner(self.useslice, + verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer) + + if self.multiprocess and not self.nomultiproc: + if self.multiprocess < 1: + msg = ("Value specified for the number of processes to use in " + "running the suite must be > 0") + raise ValueError(msg) + self.testRunner = ParallelTestRunner(runner.TextTestRunner, + self.multiprocess, + self.useslice, + verbosity=self.verbosity, + failfast=self.failfast, + buffer=self.buffer) + + def run_tests_real(): + super(NumbaTestProgram, self).runTests() + + if self.profile: + filename = os.path.splitext( + os.path.basename(sys.modules['__main__'].__file__) + )[0] + '.prof' + p = cProfile.Profile(timer=time.perf_counter) # 3.3+ + p.enable() + try: + p.runcall(run_tests_real) + finally: + p.disable() + print("Writing test profile data into %r" % (filename,)) + p.dump_stats(filename) + else: + run_tests_real() + + +# These are tests which are generated and injected into the test suite, what +# gets injected depends on features of the test environment, e.g. TBB presence +# it's important for doing the CI "slice tests" that these are run at the end +# See notes in `_flatten_suite` for why. Simple substring matching is used to +# determine a match. +_GENERATED = ( + "numba.cuda.tests.cudapy.test_libdevice.TestLibdeviceCompilation", + "numba.tests.test_num_threads", + "numba.tests.test_parallel_backend", + "numba.tests.test_svml", + "numba.tests.test_ufuncs", +) + + +def _flatten_suite_inner(test): + """ + Workhorse for _flatten_suite + """ + tests = [] + if isinstance(test, (unittest.TestSuite, list, tuple)): + for x in test: + tests.extend(_flatten_suite_inner(x)) + else: + tests.append(test) + return tests + + +def _flatten_suite(test): + """ + Expand nested suite into list of test cases. + """ + tests = _flatten_suite_inner(test) + # Strip out generated tests and stick them at the end, this is to make sure + # that tests appear in a consistent order regardless of features available. + # This is so that a slice through the test suite e.g. (1::N) would likely be + # consistent up to the point of the generated tests, which rely on specific + # features. + generated = set() + for t in tests: + for g in _GENERATED: + if g in str(t): + generated.add(t) + normal = set(tests) - generated + def key(x): + return x.__module__, type(x).__name__, x._testMethodName + tests = sorted(normal, key=key) + tests.extend(sorted(list(generated), key=key)) + return tests + + +def _choose_gitdiff_tests(tests, *, use_common_ancestor=False): + try: + from git import Repo + except ImportError: + raise ValueError("gitpython needed for git functionality") + repo = Repo('.') + path = os.path.join('numba', 'tests') + if use_common_ancestor: + print(f"Git diff by common ancestor") + target = 'origin/release0.61...HEAD' + else: + target = 'origin/release0.61..HEAD' + gdiff_paths = repo.git.diff(target, path, name_only=True).split() + # normalise the paths as they are unix style from repo.git.diff + gdiff_paths = [os.path.normpath(x) for x in gdiff_paths] + selected = [] + gdiff_paths = [os.path.join(repo.working_dir, x) for x in gdiff_paths] + for test in _flatten_suite(tests): + assert isinstance(test, unittest.TestCase) + fname = inspect.getsourcefile(test.__class__) + if fname in gdiff_paths: + selected.append(test) + print("Git diff identified %s tests" % len(selected)) + return unittest.TestSuite(selected) + +def _choose_tagged_tests(tests, tags, mode='include'): + """ + Select tests that are tagged/not tagged with at least one of the given tags. + Set mode to 'include' to include the tests with tags, or 'exclude' to + exclude the tests with the tags. + """ + selected = [] + tags = set(tags) + for test in _flatten_suite(tests): + assert isinstance(test, unittest.TestCase) + func = getattr(test, test._testMethodName) + try: + # Look up the method's underlying function (Python 2) + func = func.im_func + except AttributeError: + pass + + found_tags = getattr(func, 'tags', None) + # only include the test if the tags *are* present + if mode == 'include': + if found_tags is not None and found_tags & tags: + selected.append(test) + elif mode == 'exclude': + # only include the test if the tags *are not* present + if found_tags is None or not (found_tags & tags): + selected.append(test) + else: + raise ValueError("Invalid 'mode' supplied: %s." % mode) + return unittest.TestSuite(selected) + + +def _choose_random_tests(tests, ratio, seed): + """ + Choose a given proportion of tests at random. + """ + rnd = random.Random() + rnd.seed(seed) + if isinstance(tests, unittest.TestSuite): + tests = _flatten_suite(tests) + tests = rnd.sample(tests, int(len(tests) * ratio)) + tests = sorted(tests, key=lambda case: case.id()) + return unittest.TestSuite(tests) + + +# The reference leak detection code is liberally taken and adapted from +# Python's own Lib/test/regrtest.py. + +def _refleak_cleanup(): + # Collect cyclic trash and read memory statistics immediately after. + func1 = sys.getallocatedblocks + try: + func2 = sys.gettotalrefcount + except AttributeError: + func2 = lambda: 42 + + # Flush standard output, so that buffered data is sent to the OS and + # associated Python objects are reclaimed. + for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__): + if stream is not None: + stream.flush() + + sys._clear_type_cache() + # This also clears the various internal CPython freelists. + gc.collect() + return func1(), func2() + + +class ReferenceLeakError(RuntimeError): + pass + + +class IntPool(collections.defaultdict): + + def __missing__(self, key): + return key + + +class RefleakTestResult(runner.TextTestResult): + + warmup = 3 + repetitions = 6 + + def _huntLeaks(self, test): + self.stream.flush() + + repcount = self.repetitions + nwarmup = self.warmup + rc_deltas = [0] * (repcount - nwarmup) + alloc_deltas = [0] * (repcount - nwarmup) + # Preallocate ints likely to be stored in rc_deltas and alloc_deltas, + # to make sys.getallocatedblocks() less flaky. + _int_pool = IntPool() + for i in range(-200, 200): + _int_pool[i] + + for i in range(repcount): + # Use a pristine, silent result object to avoid recursion + res = result.TestResult() + test.run(res) + # Poorly-written tests may fail when run several times. + # In this case, abort the refleak run and report the failure. + if not res.wasSuccessful(): + self.failures.extend(res.failures) + self.errors.extend(res.errors) + raise AssertionError + del res + alloc_after, rc_after = _refleak_cleanup() + if i >= nwarmup: + rc_deltas[i - nwarmup] = _int_pool[rc_after - rc_before] + alloc_deltas[i - nwarmup] = _int_pool[alloc_after - alloc_before] + alloc_before, rc_before = alloc_after, rc_after + return rc_deltas, alloc_deltas + + def addSuccess(self, test): + try: + rc_deltas, alloc_deltas = self._huntLeaks(test) + except AssertionError: + # Test failed when repeated + assert not self.wasSuccessful() + return + + # These checkers return False on success, True on failure + def check_rc_deltas(deltas): + return any(deltas) + + def check_alloc_deltas(deltas): + # At least 1/3rd of 0s + if 3 * deltas.count(0) < len(deltas): + return True + # Nothing else than 1s, 0s and -1s + if not set(deltas) <= set((1, 0, -1)): + return True + return False + + failed = False + + for deltas, item_name, checker in [ + (rc_deltas, 'references', check_rc_deltas), + (alloc_deltas, 'memory blocks', check_alloc_deltas)]: + if checker(deltas): + msg = '%s leaked %s %s, sum=%s' % ( + test, deltas, item_name, sum(deltas)) + failed = True + try: + raise ReferenceLeakError(msg) + except Exception: + exc_info = sys.exc_info() + if self.showAll: + self.stream.write("%s = %r " % (item_name, deltas)) + self.addFailure(test, exc_info) + + if not failed: + super(RefleakTestResult, self).addSuccess(test) + + +class RefleakTestRunner(runner.TextTestRunner): + resultclass = RefleakTestResult + + +class ParallelTestResult(runner.TextTestResult): + """ + A TestResult able to inject results from other results. + """ + + def add_results(self, result): + """ + Add the results from the other *result* to this result. + """ + self.stream.write(result.stream.getvalue()) + self.stream.flush() + self.testsRun += result.testsRun + self.failures.extend(result.failures) + self.errors.extend(result.errors) + self.skipped.extend(result.skipped) + self.expectedFailures.extend(result.expectedFailures) + self.unexpectedSuccesses.extend(result.unexpectedSuccesses) + + +class _MinimalResult(object): + """ + A minimal, picklable TestResult-alike object. + """ + + __slots__ = ( + 'failures', 'errors', 'skipped', 'expectedFailures', + 'unexpectedSuccesses', 'stream', 'shouldStop', 'testsRun', + 'test_id') + + def fixup_case(self, case): + """ + Remove any unpicklable attributes from TestCase instance *case*. + """ + # Python 3.3 doesn't reset this one. + case._outcomeForDoCleanups = None + + def __init__(self, original_result, test_id=None): + for attr in self.__slots__: + setattr(self, attr, getattr(original_result, attr, None)) + for case, _ in self.expectedFailures: + self.fixup_case(case) + for case, _ in self.errors: + self.fixup_case(case) + for case, _ in self.failures: + self.fixup_case(case) + self.test_id = test_id + + +class _FakeStringIO(object): + """ + A trivial picklable StringIO-alike for Python 2. + """ + + def __init__(self, value): + self._value = value + + def getvalue(self): + return self._value + + +class _MinimalRunner(object): + """ + A minimal picklable object able to instantiate a runner in a + child process and run a test case with it. + """ + + def __init__(self, runner_cls, runner_args): + self.runner_cls = runner_cls + self.runner_args = runner_args + + # Python 2 doesn't know how to pickle instance methods, so we use __call__ + # instead. + + def __call__(self, test): + # Executed in child process + kwargs = self.runner_args + # Force recording of output in a buffer (it will be printed out + # by the parent). + kwargs['stream'] = StringIO() + runner = self.runner_cls(**kwargs) + result = runner._makeResult() + # Avoid child tracebacks when Ctrl-C is pressed. + signals.installHandler() + signals.registerResult(result) + result.failfast = runner.failfast + result.buffer = runner.buffer + with self.cleanup_object(test): + test(result) + # HACK as cStringIO.StringIO isn't picklable in 2.x + result.stream = _FakeStringIO(result.stream.getvalue()) + return _MinimalResult(result, test.id()) + + @contextlib.contextmanager + def cleanup_object(self, test): + """ + A context manager which cleans up unwanted attributes on a test case + (or any other object). + """ + vanilla_attrs = set(test.__dict__) + try: + yield test + finally: + spurious_attrs = set(test.__dict__) - vanilla_attrs + for name in spurious_attrs: + del test.__dict__[name] + + +def _split_nonparallel_tests(test, sliced): + """ + Split test suite into parallel and serial tests. + """ + ptests = [] + stests = [] + + flat = [*filter(sliced, _flatten_suite(test))] + + def is_parallelizable_test_case(test): + # Guard for the fake test case created by unittest when test + # discovery fails, as it isn't picklable (e.g. "LoadTestsFailure") + method_name = test._testMethodName + method = getattr(test, method_name) + if method.__name__ != method_name and method.__name__ == "testFailure": + return False + # Was parallel execution explicitly disabled? + return getattr(test, "_numba_parallel_test_", True) + + for t in flat: + if is_parallelizable_test_case(t): + ptests.append(t) + else: + stests.append(t) + + return ptests, stests + +# A test can't run longer than 10 minutes +_TIMEOUT = 1200 + +class ParallelTestRunner(runner.TextTestRunner): + """ + A test runner which delegates the actual running to a pool of child + processes. + """ + + resultclass = ParallelTestResult + timeout = _TIMEOUT + + def __init__(self, runner_cls, nprocs, useslice, **kwargs): + runner.TextTestRunner.__init__(self, **kwargs) + self.runner_cls = runner_cls + self.nprocs = nprocs + self.useslice = parse_slice(useslice) + self.runner_args = kwargs + + def _run_inner(self, result): + # We hijack TextTestRunner.run()'s inner logic by passing this + # method as if it were a test case. + child_runner = _MinimalRunner(self.runner_cls, self.runner_args) + + # Split the tests and recycle the worker process to tame memory usage. + chunk_size = 100 + splitted_tests = [self._ptests[i:i + chunk_size] + for i in range(0, len(self._ptests), chunk_size)] + + for tests in splitted_tests: + pool = multiprocessing.Pool(self.nprocs) + try: + self._run_parallel_tests(result, pool, child_runner, tests) + except: + # On exception, kill still active workers immediately + pool.terminate() + # Make sure exception is reported and not ignored + raise + else: + # Close the pool cleanly unless asked to early out + if result.shouldStop: + pool.terminate() + break + else: + pool.close() + finally: + # Always join the pool (this is necessary for coverage.py) + pool.join() + if not result.shouldStop: + stests = SerialSuite(self._stests) + stests.run(result) + return result + + def _run_parallel_tests(self, result, pool, child_runner, tests): + remaining_ids = set(t.id() for t in tests) + tests.sort(key=cuda_sensitive_mtime) + it = pool.imap_unordered(child_runner, tests) + while True: + try: + child_result = it.__next__(self.timeout) + except StopIteration: + return + except TimeoutError as e: + # Diagnose the names of unfinished tests + msg = ("Tests didn't finish before timeout (or crashed):\n%s" + % "".join("- %r\n" % tid for tid in sorted(remaining_ids)) + ) + e.args = (msg,) + e.args[1:] + raise e + else: + result.add_results(child_result) + remaining_ids.discard(child_result.test_id) + if child_result.shouldStop: + result.shouldStop = True + return + + def run(self, test): + self._ptests, self._stests = _split_nonparallel_tests(test, + self.useslice) + print("Parallel: %s. Serial: %s" % (len(self._ptests), + len(self._stests))) + # This will call self._run_inner() on the created result object, + # and print out the detailed test results at the end. + return super(ParallelTestRunner, self).run(self._run_inner) diff --git a/venv/lib/python3.10/site-packages/numba/testing/notebook.py b/venv/lib/python3.10/site-packages/numba/testing/notebook.py new file mode 100644 index 0000000000000000000000000000000000000000..646a92d739c755f91efc42b6591d3d07ed1e3af8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/testing/notebook.py @@ -0,0 +1,171 @@ +from unittest import TestCase + +from ipykernel.tests import utils +from nbformat.converter import convert +from nbformat.reader import reads + +import re +import json +from copy import copy +import unittest + +try: + # py3 + from queue import Empty + + def isstr(s): + return isinstance(s, str) +except ImportError: + # py2 + from Queue import Empty + + def isstr(s): + return isinstance(s, basestring) # noqa + +class NotebookTest(TestCase): + """Validate a notebook. All code cells are executed in order. The output is either checked + for errors (if no reference output is present), or is compared against expected output. + + + Useful references: + http://nbformat.readthedocs.org/en/latest/format_description.html + http://jupyter-client.readthedocs.org/en/latest/messaging.html +""" + + + IGNORE_TYPES = ["execute_request", "execute_input", "status", "pyin"] + STRIP_KEYS = ["execution_count", "traceback", "prompt_number", "source"] + NBFORMAT_VERSION = 4 + + def _test_notebook(self, notebook, test): + + with open(notebook) as f: + nb = convert(reads(f.read()), self.NBFORMAT_VERSION) + _, kernel = utils.start_new_kernel() + for i, c in enumerate([c for c in nb.cells if c.cell_type == 'code']): + self._test_notebook_cell(self.sanitize_cell(c), i, kernel, test) + + def _test_notebook_cell(self, cell, i, kernel, test): + + if hasattr(cell, 'source'): # nbformat 4.0 and later + code = cell.source + else: + code = cell.input + iopub = kernel.iopub_channel + kernel.execute(code) + outputs = [] + msg = None + no_error = True + first_error = -1 + error_msg = '' + while self.should_continue(msg): + try: + msg = iopub.get_msg(block=True, timeout=1) + except Empty: + continue + if msg['msg_type'] not in self.IGNORE_TYPES: + if msg['msg_type'] == 'error': + error_msg = ' ' + msg['content']['ename'] + '\n ' + msg['content']['evalue'] + no_error = False + if first_error == -1: + first_error = i + i = len(outputs) + expected = i < len(cell.outputs) and cell.outputs[i] or [] + o = self.transform_message(msg, expected) + outputs.append(o) + + if (test == 'check_error'): + self.assertTrue(no_error, 'Executing cell %d resulted in an error:\n%s'%(first_error, error_msg)) + else: + # Compare computed output against stored output. + # TODO: This doesn't work right now as the generated output is too diverse to + # be verifiable. + scrub = lambda x: self.dump_canonical(list(self.scrub_outputs(x))) + scrubbed = scrub(outputs) + expected = scrub(cell.outputs) + #print('output=%s'%outputs) + #print('expected=%s'%expected) + #self.assertEqual(scrubbed, expected, "\n{}\n\n{}".format(scrubbed, expected)) + + def dump_canonical(self, obj): + return json.dumps(obj, indent=2, sort_keys=True) + + def scrub_outputs(self, outputs): + """ + remove all scrubs from output data and text + """ + for output in outputs: + out = copy(output) + + for scrub, sub in []:#self.scrubs.items(): + def _scrubLines(lines): + if isstr(lines): + return re.sub(scrub, sub, lines) + else: + return [re.sub(scrub, sub, line) for line in lines] + + if "text" in out: + out["text"] = _scrubLines(out["text"]) + + if "data" in out: + if isinstance(out["data"], dict): + for mime, data in out["data"].items(): + out["data"][mime] = _scrubLines(data) + else: + out["data"] = _scrubLines(out["data"]) + yield out + + def strip_keys(self, d): + """ + remove keys from STRIP_KEYS to ensure comparability + """ + for key in self.STRIP_KEYS: + d.pop(key, None) + return d + + def sanitize_cell(self, cell): + """ + remove non-reproducible things + """ + for output in cell.outputs: + self.strip_keys(output) + return cell + + def transform_message(self, msg, expected): + """ + transform a message into something like the notebook + """ + SWAP_KEYS = { + "output_type": { + "pyout": "execute_result", + "pyerr": "error" + } + } + + output = { + u"output_type": msg["msg_type"] + } + output.update(msg["content"]) + + output = self.strip_keys(output) + for key, swaps in SWAP_KEYS.items(): + if key in output and output[key] in swaps: + output[key] = swaps[output[key]] + + if "data" in output and "data" not in expected: + output["text"] = output["data"] + del output["data"] + + return output + + def should_continue(self, msg): + """ + determine whether the current message is the last for this cell + """ + if msg is None: + return True + + return not (msg["msg_type"] == "status" and + msg["content"]["execution_state"] == "idle") + + diff --git a/venv/lib/python3.10/site-packages/numba/tests/__init__.py b/venv/lib/python3.10/site-packages/numba/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d2a38c69038c9ed68838f08b0a7014d2728b5855 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/__init__.py @@ -0,0 +1,33 @@ +import gc +from os.path import dirname, join +import multiprocessing +import sys +import time +import unittest +import warnings + +from unittest.suite import TestSuite +from numba.testing import load_testsuite + + +try: + import faulthandler +except ImportError: + faulthandler = None +else: + try: + # May fail in IPython Notebook with UnsupportedOperation + faulthandler.enable() + except Exception as e: + msg = "Failed to enable faulthandler due to:\n{err}" + warnings.warn(msg.format(err=e)) + +def load_tests(loader, tests, pattern): + suite = TestSuite() + suite.addTests(load_testsuite(loader, dirname(__file__))) + # Numba CUDA tests are located in a separate directory: + cuda_dir = join(dirname(dirname(__file__)), 'cuda/tests') + suite.addTests(loader.discover(cuda_dir)) + + return suite + diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde08f3bcaec1661720f28f3cf772936f2849997 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/annotation_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/annotation_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffd2605eaa9dd9456bfedfa0f4f1235f995d3f56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/annotation_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cache_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cache_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f03eb366aae332cd59a80e074dec5977838beb08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cache_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cffi_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cffi_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70dabf6f2cf09f1f1b7273879f48ad0544aed474 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cffi_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cfunc_cache_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cfunc_cache_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95479c5e110fbc700259b3b489cb5f576395c55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cfunc_cache_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/chained_assign_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/chained_assign_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac63665525e9da93e441ccc25c1f644e2ad84605 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/chained_assign_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cloudpickle_main_class.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cloudpickle_main_class.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8494c1b444d1b4dcab639ccca91b89676f5109d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/cloudpickle_main_class.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/compile_with_pycc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/compile_with_pycc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84d36a1f551da71f23a5c2af672d7795bc088409 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/compile_with_pycc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/complex_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/complex_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f244cc11aebbaeac155400a860e78c17efb8adf6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/complex_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/ctypes_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/ctypes_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61d658116612d98fc6c98ac6d64ae9053b98c6d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/ctypes_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/doctest_usecase.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/doctest_usecase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad33c31b2e8875f6b99d0f3625413eb7798c16aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/doctest_usecase.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/dummy_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/dummy_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e20991df276e67e5bcda2b2b49c71a7eceb16d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/dummy_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/enum_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/enum_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbc78c2e8427e24ee99d95cbf515dae3b8bd2a22 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/enum_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/error_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/error_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbe4ff0bbcbe6964e9261130c0cf47ff6b592aa7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/error_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/errorhandling_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/errorhandling_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1b9ee61ed2ed226ef858e2a0350f15648a2781b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/errorhandling_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/gdb_support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/gdb_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7e2022d8e1485db0fbeec4b0f2e69dbbc28a47f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/gdb_support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/inlining_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/inlining_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dccb3e0def99b7d7002e6c6215efeea6539e58f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/inlining_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/matmul_usecase.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/matmul_usecase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b48fc1943480b71ac0502e36e1d93300c14ed4f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/matmul_usecase.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/orphaned_semaphore_usecase.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/orphaned_semaphore_usecase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca9da6e713ece9ff0c5f69036867a61fb9120ca4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/orphaned_semaphore_usecase.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/overload_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/overload_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af31cd48c75868edab87d3058a867d903e5d8040 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/overload_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/parfor_iss9490_usecase.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/parfor_iss9490_usecase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc283d888d48edc0e18bf27c83bd85d5d4fb7990 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/parfor_iss9490_usecase.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/parfors_cache_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/parfors_cache_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4af3eccd43a125ac1b21b67ffc096e20093c9c2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/parfors_cache_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/pdlike_usecase.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/pdlike_usecase.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28b13b15c7f381a3de846f9b1c2c878ab23aaadb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/pdlike_usecase.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/recursion_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/recursion_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..473697bcc8e1c24314f7a7a3aae00e912750e923 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/recursion_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/serialize_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/serialize_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeeecc272533a37f70effb19b4ed8102b41481fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/serialize_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b75f764766778ffffef4319263361422bd19b2a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_alignment.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_alignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb01b54307cd91451cedf6bca8e7df139761c6f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_alignment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61414e328934959801728a0b0320c3e1ed9f8f1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_annotations.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_annotations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f057c3403ff152955d2109859f83d5191c413bf9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_annotations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..700dbb0adce1be8c43ca08d586307422f7f3c7a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_analysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91a189b982c7109ba9282278a3d3208e3a19d75a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_analysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_attr.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_attr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60d0680725dcd9cd8522247325a7bd9628af5e47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_attr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddcaea02c2ac0adf63e037bee9a4b5a0a646103f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_exprs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_exprs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6c674deacc299b2b675fc80dbf5336efb2470ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_exprs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_iterators.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_iterators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dca214db5db654f6f1b8797be3719d1451adbdfd Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_iterators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_manipulation.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_manipulation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89af3f2ec1a29b1683ecec17939e4092e4b41d41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_manipulation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffa9c39baab07a9833f33ab799d18f665bbb67ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_reductions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba5054821b2bea990e259e4b7307c5c00b9985bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_reductions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_return.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_return.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0a58e1c8bf6a4f7943181e51811b057f80af761 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_array_return.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_asnumbatype.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_asnumbatype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d15f0b833de6b25ab8527c84c15b15b4dbd5d692 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_asnumbatype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_auto_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_auto_constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfc6d2c754c4dd8ebbbe9400693b58f331d444d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_auto_constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_blackscholes.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_blackscholes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b23d3385d4507d0ebfa508b25965252442aea445 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_blackscholes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_boundscheck.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_boundscheck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..835081e766eb4d7d346a29940059f02cf72b590e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_boundscheck.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_buffer_protocol.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_buffer_protocol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d80ddcfd026212d2ac0a8393d3ee81b0cfe2b6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_buffer_protocol.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_builtins.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_builtins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e39dc6cbd2871541ed7c75011f6605849ef598e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_builtins.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_byteflow.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_byteflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e355b628d79454a245deca9c6fbea540258b7f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_byteflow.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_caching.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_caching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cec47202d02a8a1ef0037b69f095c7184fb8686e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_caching.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_casting.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_casting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..272a01d613cfaefe433e773254c47b58dc2f9e73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_casting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cffi.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..684242556d76212fe03e129be77a83e7c3c9955e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cffi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cfunc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cfunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2082ec41f99b8468e74639976d03d102b83c53ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cfunc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cgutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cgutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65d30b80b77e60c48900bc2c185b4865cf505425 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cgutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_chained_assign.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_chained_assign.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb14ea3963daca3c2b60fb7462cea2d7a7438ac8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_chained_assign.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_chrome_trace.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_chrome_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a36b1ac384740eed5dbe3b3ccec179c5f8e72a18 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_chrome_trace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cli.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580e6926a5d82223a896ca8ed2ff1edf9fe2655a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_cli.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_closure.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_closure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f70a1d19d4348d0d86459f5c8f590b9e0829b57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_closure.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_codegen.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_codegen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66c50da8ab10ddb7c755600c281f87769a891cb0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_codegen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compile_cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compile_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c424903f3c22cb3bc012b1c076cd184a9f273637 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compile_cache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compiler_flags.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compiler_flags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..207104dbf587830b06b1290d75a84fc3eeb9590f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compiler_flags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compiler_lock.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compiler_lock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d15f1a7315973184ad58ee47efbe987ffd382b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_compiler_lock.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_complex.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bd46036dabf51c8fa2fd19777c8bbb870995990 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_complex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_comprehension.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_comprehension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ac9c1de596ed737c3a57eefff931f6ede67a707 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_comprehension.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_conditions_as_predicates.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_conditions_as_predicates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7daf083d9f56ddd8b2afc538963bbac2dc89a463 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_conditions_as_predicates.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f108d1423280735c007f53696406c787279e657 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_conversion.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4b319b889c090b66c5e234e8524b1ca95bf4caa Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_conversion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_copy_propagate.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_copy_propagate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d51140b3ee47ec2fd01242a1a14eb1f0b9dddd0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_copy_propagate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ctypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db79ba1a7221cb139e3957e07f9bf75aa94e6f28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ctypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dataflow.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dataflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ca6b8f5bda4f5984165133410e42ec690fc5fdd Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dataflow.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_datamodel.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_datamodel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cf35c3df9af0bc0166340ca9558d41d612812c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_datamodel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_debug.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de2a7ab7ec03ee55bf05d76df92d81be46028b27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_debug.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_debuginfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_debuginfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c8b72d25cdc1f37d32b3de9949beee947b5655f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_debuginfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_deprecations.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_deprecations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6489392b34f335b695366e1dc7dc1d05e748e54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_deprecations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dictimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dictimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0ec3faecffe3c132f8f016baa539f763fa00520 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dictimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dictobject.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dictobject.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5041f80d031335e488a60a02dc02072cda7894d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dictobject.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dicts.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dicts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f39cff33f4c6b65591fdff33d41b91cbb0bf246 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dicts.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dispatcher.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dispatcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e12a4f56b087b0144e91f5a1f927ffe2a29c804 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dispatcher.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_doctest.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_doctest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86438d32c3af70c7a8851ae880f466e6ca4d1850 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_doctest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dyn_array.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dyn_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98e947bf328cbc0c259d57008c803cab5ef5842d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dyn_array.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dyn_func.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dyn_func.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddc431caeb6510192ce87dd55ae8817b44f5296a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_dyn_func.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_entrypoints.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_entrypoints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d64acc5a7f402b3d29da8d6687aa3ce397c55e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_entrypoints.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_enums.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_enums.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a03947f485e49c5e6f8b0d95bdad83f341bfa0ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_enums.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_errorhandling.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_errorhandling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65d074d10eea3d06aafe355e1f34a5fa83792a23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_errorhandling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_errormodels.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_errormodels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c4ce4ce009154f0d57e500100ef47fa48acf909 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_errormodels.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_event.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_event.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab37f012fce07c48c3c30f9e1821c3b6939bf82f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_event.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7108233d48fccd564a698e5848fb3d1ff1e2ee4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extended_arg.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extended_arg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf0a21d44514847eed3ca9219483e64df9d95109 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extended_arg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extending.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extending.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47c47328b65c84a69a64648ae6ebbe2e0416ead4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extending.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extending_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extending_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a084a8660600a7e9ceed3052555ebb396e206b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_extending_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_fancy_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_fancy_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a38a5e91c830d9a4a12404b35ae2dd5f8fa0dbc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_fancy_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_fastmath.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_fastmath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e291f3b26db66962c83665da714860b5ad9c7aa1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_fastmath.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_findlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_findlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b832578e2e282833c4c4be24f4d5b6e97470c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_findlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_firstlinefinder.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_firstlinefinder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd50bc4e9f9be8cfb22e004a8ab1c5a0c108d0cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_firstlinefinder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_flow_control.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_flow_control.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5244f48cc21e6f472f8eaaaf87009c7ff39133b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_flow_control.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_func_interface.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_func_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1270e43bcf6c5c24f49f7523011cef90644476d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_func_interface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_func_lifetime.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_func_lifetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bfc70992be7eef89b2681f66cd75490b203df82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_func_lifetime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_funcdesc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_funcdesc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63cdb3984c8327ce622f18a969c880ace9a17018 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_funcdesc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_function_type.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_function_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..290a43c69606aeb0b895ce68f1052c9170386502 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_function_type.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gdb_bindings.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gdb_bindings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd24ca433227185568792078795b4e1fff5f73f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gdb_bindings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gdb_dwarf.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gdb_dwarf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a15d9ce452f97247d6ffe59374031cdfc4769ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gdb_dwarf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_generators.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_generators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df842ea48491af6f0ef17806f68390a1f89cc4cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_generators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_getitem_on_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_getitem_on_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd22fb30e928428f1e0d42e76b90c861fe73f178 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_getitem_on_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gil.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dde49a74dd631de19cda791ab57998b1a9afd3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_gil.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_globals.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_globals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e078942f19f7b221401f7b980ab74bbfb8977aed Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_globals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_hashing.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ba32e050330516321dff6b21c9f6ded21816c58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_hashing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_heapq.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_heapq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da207cc1b0b56355ccebdd0498eee71ca2de6236 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_heapq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_help.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_help.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfe74ee2986944a148b5728c23e209591b18ccc5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_help.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_import.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5558ba02933d8fdde8bf88d0609290f71b5b1198 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_import.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9af9242f4b856e8578687471f5945f9f33e5b883 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_init_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_init_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6503efb9c6a48cca5dda6b2ea235c913d2d22c2c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_init_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_inlining.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_inlining.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6271d589319039a6dee0f934af38cad0ca2cddb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_inlining.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_interpreter.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_interpreter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0303224f48f479f90643256ca850a468841f7e76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_interpreter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_interproc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_interproc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d47e53bd096936d130a825f677d47b6f190454fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_interproc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_intwidth.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_intwidth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9038571dcbb56f6f1ae492e41d498015ccd7966 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_intwidth.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3edc8333989b301a026a2f205729791c64ab8a8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir_inlining.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir_inlining.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57f054e04e8cce05986ae21bb5c449de489a6711 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir_inlining.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..403b2b2463bc6147e54863b6b5335ee693d9789d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ir_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_itanium_mangler.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_itanium_mangler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..944500d0dbd37e5cec5d569cfd65003e9ead1623 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_itanium_mangler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_iteration.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_iteration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e10fc447a00025d65f34765399dd1bfc0d542245 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_iteration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jit_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jit_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bb66f1ff624621113c7ac96cc3a24d8b63e16b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jit_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jitclasses.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jitclasses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75f18799c64fd403618ed1cf93b458d43b224cb9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jitclasses.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jitmethod.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jitmethod.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30bb96a392f198e440aae44449edd563b6cf80e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_jitmethod.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c423421c4827a4de94b30e64be38261b3bc2b9cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_listimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_listimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd4bc28b52857c9cfbafb6c16d74474f19a540c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_listimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_listobject.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_listobject.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d46b0640f5c81f546a3b3428373d7129844b51a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_listobject.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_lists.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_lists.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4572e1a77c6334cd5e001f3347ac0335d1194175 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_lists.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_literal_dispatch.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_literal_dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f0125f30b7da37d52fa00d2277dd748e301195d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_literal_dispatch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_llvm_pass_timings.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_llvm_pass_timings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffe8a0685138d0794a01a27d70eeb0d7dfc07f08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_llvm_pass_timings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_llvm_version_check.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_llvm_version_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd0f79b18916416dd904594f06fc0b4f6c4cce58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_llvm_version_check.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_locals.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_locals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c127bd9b6021751a53d05478df95c0f2dd24898 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_locals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_looplifting.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_looplifting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdd625a9f065692f7455295bd0a2100da6b9c947 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_looplifting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_make_function_to_jit_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_make_function_to_jit_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2826ab6ff6d36e448dc60f50544690a14d54b731 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_make_function_to_jit_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mandelbrot.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mandelbrot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6a38ad2604a23472f5f82a61f7b939784d610da Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mandelbrot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mangling.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mangling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..decb4440c5f946b38d539c0ceb60c7bae411c16e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mangling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_map_filter_reduce.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_map_filter_reduce.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6744052e23e9fd22caf9afe1cb667d31a455fa23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_map_filter_reduce.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mathlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mathlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b941237ae3648981801e75c10c0fb1c399a33d66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mathlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_maxmin.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_maxmin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1277c0a1e22cffc2dc372852739407832ff292a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_maxmin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_misc_coverage_support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_misc_coverage_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a4cb1a88b8a871cdbbf8b3067a06f3061661194 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_misc_coverage_support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mixed_tuple_unroller.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mixed_tuple_unroller.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f51f8a7815b00dd8b458f7dca29f94f415c9e35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_mixed_tuple_unroller.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_moved_modules.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_moved_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aabd336a82abdf2d027698e2daa8950687b1354 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_moved_modules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_multi3.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_multi3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a65fbc97280f554ccf67c746a0612041dcb0575b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_multi3.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nan.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bccd6e776c29df36473a71755c4723dfe9e93dab Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ndarray_subclasses.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ndarray_subclasses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4d0a2967cd50d737bf7131019c7f564b00f019d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ndarray_subclasses.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nested_calls.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nested_calls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d456625731a2020e17fa8477fd6ab1065e7644a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nested_calls.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_new_type_system.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_new_type_system.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afd440131a6dbf3bdf3433e44ef149b403ec58dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_new_type_system.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_np_randomgen.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_np_randomgen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e42fe551bf2543050964587f6b54ba8d8a150dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_np_randomgen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_npdatetime.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_npdatetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..937f11950351043c7d283a3b9ea8acbd36453f00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_npdatetime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nrt.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nrt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bae8ec7db2282e4fe61b03a0bdd953b10421b199 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nrt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nrt_refct.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nrt_refct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45f106f050aee8454c08186ec9322b96e485a4fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_nrt_refct.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_num_threads.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_num_threads.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55294efc7b0536d880bc9feafec6840aed09168f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_num_threads.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numberctor.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numberctor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..343655c4ccf788899d2716335cda896acee20c46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numberctor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numbers.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numbers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658a0d2a0e75684a0d25a4788198311292c272f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numbers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numconv.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numconv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b885713b43c9ca3f0b794d5d1672e355bccf138f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numconv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numpy_support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numpy_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32076d3b8bb51b83ad68e3158be954d88cb74b15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numpy_support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numpyadapt.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numpyadapt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b175e280d653b37b2fdee7ae0c7b5f368d6bf58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_numpyadapt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_obj_lifetime.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_obj_lifetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1acfd71e6cb2360d0e17690fbd47e2a08de56a0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_obj_lifetime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_object_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_object_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13a01a7ad08dfe8a09321da2598c08f7a4056e4b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_object_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_objects.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cd69b3c83bf403157d602128f7bee11df73494e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_objects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_operators.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c6d72672bd98ac506e68931fcc0bdbef621a775 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_operators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_optimisation_pipelines.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_optimisation_pipelines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6eab4ceec9352166e05ba892218ff9e45846dde2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_optimisation_pipelines.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_optional.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_optional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5275132bdba21e919b8f98203d1a531f5181694 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_optional.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_overlap.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_overlap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d738617520679f42b42d7d6626225a7ff4c8426 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_overlap.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parallel_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parallel_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e1dd13c0b82fc6d7538bcefb5c96b1197dd37bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parallel_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parfors_caching.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parfors_caching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d228f76b7ec6dc15e5af10ed74c2c16a514a750 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parfors_caching.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parfors_passes.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parfors_passes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7cc8c63186747a9ed828158b3d78495792349af Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_parfors_passes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38a7da9c7f8833f8bcfaf93b1cd74646661a9725 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_polynomial.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_polynomial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be5cddf70b97f18188739e73441b0019f39b5b28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_polynomial.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_practical_lowering_issues.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_practical_lowering_issues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24d3c827230a4bea60287b4cd205304a8d201011 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_practical_lowering_issues.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_print.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_print.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..304f1f44e8cfecb378fc6298c651ed939d0cf55a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_print.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b87daa0a280f963d4c98a1714ec00476428057d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pycc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pycc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..681ed104cad2d94ce06eb4465f2b3719eadd935d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pycc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_python_int.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_python_int.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c625dad94b2ec29bee4aa917872b5ede3f0a5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_python_int.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pythonapi.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pythonapi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10318c0a12e4bd0c325a5e5abe4475d28c47cafc Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_pythonapi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_random.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3d9cbd17388d4bf76f916da0c153b93a2b8d88d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_range.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_range.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4275ac7f35ed3ff7769b0303e1ce179636c9ccb4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_range.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_recarray_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_recarray_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d003112328a101885daade3403d0313d644c19ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_recarray_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_record_dtype.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_record_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99cde735b5c1f1247f0f9c0a7e4226b27fe72fc8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_record_dtype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_recursion.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_recursion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6bcb83c973430e7e9d19e9c2e6d9aec9f86080a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_recursion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_refop_pruning.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_refop_pruning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e0b476338dd7bc1f8b850cf00d277845622db1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_refop_pruning.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_remove_dead.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_remove_dead.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13b8244cb6f4733c79c2911b84d453a4be717267 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_remove_dead.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_repr.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ce76b06adc1e7e152519697667a2ec1de5b9ca9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_repr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_return_values.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_return_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12718fbfbe078f3ef6bba291183e2d161b737a57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_return_values.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_runtests.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_runtests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ac45bd6f9aef02fd1e125f3e4cb892172b6fc3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_runtests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_serialize.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_serialize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9432662bfb3bfd3a2e655c326bbbda824ae43799 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_serialize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sets.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71f4bada8267093a5c33bbd71a7c83e980c314db Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sets.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_slices.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_slices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebc9e2514f4dbe9642c5f2be16b06c221059888d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_slices.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sort.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a655a06e9a3bb637890c7b1e7afaed04828c0119 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sort.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ssa.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ssa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acc8900a4f232ddc8925521dcf00cd83b4c412d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ssa.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_stencils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_stencils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c31e0a9765d7d77d86dad4ea946dc46988544717 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_stencils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_storeslice.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_storeslice.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..655f7e67a26d24a6d9315d2086002d7f1bef23d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_storeslice.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_struct_ref.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_struct_ref.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84f2c015b20e7c02248ee335541054a06ffe6eb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_struct_ref.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_support.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d0a314568554c47d740bd0ca66fa14f7982fc96 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_support.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_svml.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_svml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1192097c5bf1ac219214838708a49f0624daddf Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_svml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sys_monitoring.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sys_monitoring.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efb67f059a132b15a45a04962ff96f05ee9f29c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sys_monitoring.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sys_stdin_assignment.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sys_stdin_assignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c6e436b17cd2976613ea605756ec7bac57b4093 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sys_stdin_assignment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sysinfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sysinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40723b861f1b98ab213fb0e6b141ebe7c156d370 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_sysinfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_target_extension.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_target_extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46e3d1b9695cbef08f1fa80a2019de73a4955630 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_target_extension.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_target_overloadselector.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_target_overloadselector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d8898942354cc1a011172860f442f2db30e731f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_target_overloadselector.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_threadsafety.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_threadsafety.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..032b8a7076247a8a13064c7dfade9b7518328f30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_threadsafety.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_tracing.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_tracing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b8dc7ca00f8ef5e7fea7eae708fcba4b15db5c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_tracing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_try_except.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_try_except.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67223ecc18ed4d767ae04eef2b8a279d8f7faaf6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_try_except.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_tuples.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_tuples.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a556eec2bced39da3295979ac4e8f3ef00c54f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_tuples.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeconv.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeconv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7246f8309b5a06e67a742bcab5c55b7b7f2d346e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeconv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typedlist.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typedlist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c54638396e58069b4b16727eba1c2acbf82b272 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typedlist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typedobjectutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typedobjectutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d7db2d73260ea3fee2b4eccf5e5ed2c0c7d7f1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typedobjectutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeguard.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeguard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..590a045da0e5ed50d53b3b4fe8c066e8e39a3bda Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeguard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeinfer.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeinfer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9ec2a712b04eaab9be6e82e9813d5a044c88806 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeinfer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typenames.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typenames.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea36fe4f0f8ff3a1dff4b669def3b67c33e086e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typenames.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeof.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeof.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98ff1e0b09b4f1951b00234a2531cd8b98ca21af Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typeof.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eefc918b6ac09dbd2791f89aba8465b8ac1d718 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typingerror.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typingerror.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bcf47733dc2049380b52afedb82991782c8d9bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_typingerror.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ufuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ufuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..451a3cf45e177932ac222c2bcbd8ce4cc3fae74d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_ufuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9123d0b968cf910ef498fcb84c85de438fe45a28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode_array.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5afae0455c8883027ad5bee26510b9f6c699fb40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode_array.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode_names.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode_names.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bf0eaffe60f2eecd333a4e4b8a4d8e9cf90c0eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unicode_names.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unpack_sequence.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unpack_sequence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4cd9303f74234e0253c1f314903d666e347c618 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unpack_sequence.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unpickle_without_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unpickle_without_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a96b1f5bfc01f365bcc705697ad1626f7264fcc0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unpickle_without_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unsafe_intrinsics.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unsafe_intrinsics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2566e1aebb225e69131a00a8006641e1eebcb28b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_unsafe_intrinsics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c24664f204b51f9531fe42350949462451c8f15c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_vectorization.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_vectorization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..347b5bc5c1dbeb4a6947c4a40646d2f6c900883b Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_vectorization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_vectorization_type_inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_vectorization_type_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59a1ea098b253d3fffdfa0171bbfa017cd69116d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_vectorization_type_inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_warnings.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6734e62fc21f96ca761a8360029e34dfa03640b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_warnings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_withlifting.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_withlifting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5e51c0b1d654a93827a8626f2ebc482ac931338 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/test_withlifting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/threading_backend_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/threading_backend_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5510794d56a939b0f254d943132fe5e7fca38dd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/threading_backend_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/typedlist_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/typedlist_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25a45f4fbbf44f6bdf250dd56633f67314b4b0b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/typedlist_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/__pycache__/usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21392cb420b4501b37b2362d6654a3a1c22c3cb0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/__pycache__/usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/annotation_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/annotation_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..a249ccbd5614ada1ddb035e929c5a2627539d319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/annotation_usecases.py @@ -0,0 +1,16 @@ +""" +Usecases with Python 3 function annotations. This is a separate module +in order to avoid syntax errors with Python 2. +""" + + +class AnnotatedClass: + """ + A class with annotated methods. + """ + + def __init__(self, v: int): + self.x = v + + def add(self, v: int) -> int: + return self.x + v diff --git a/venv/lib/python3.10/site-packages/numba/tests/cache_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/cache_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..372311777526d160603e862a6c1d2d58eb316b1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/cache_usecases.py @@ -0,0 +1,172 @@ +""" +This file will be copied to a temporary directory in order to +exercise caching compiled Numba functions. + +See test_dispatcher.py. +""" + +import sys + +import numpy as np + +from numba import jit, prange +from numba.core import types + +from numba.tests.ctypes_usecases import c_sin +from numba.tests.support import TestCase, captured_stderr + + +@jit(cache=True, nopython=True) +def simple_usecase(x): + return x + +def simple_usecase_caller(x): + return simple_usecase(x) + + +@jit(cache=True, nopython=True) +def add_usecase(x, y): + return x + y + Z + + +@jit(cache=True, forceobj=True) +def add_objmode_usecase(x, y): + object() + return x + y + Z + + +@jit(nopython=True) +def add_nocache_usecase(x, y): + return x + y + Z + + +@jit(cache=True, nopython=True) +def inner(x, y): + return x + y + Z + +@jit(cache=True, nopython=True) +def outer(x, y): + return inner(-y, x) + +@jit(cache=False, nopython=True) +def outer_uncached(x, y): + return inner(-y, x) + + +@jit(cache=True, forceobj=True) +def looplifted(n): + object() + res = 0 + for i in range(n): + res = res + i + return res + + +@jit(cache=True, nopython=True) +def use_c_sin(x): + return c_sin(x) + +@jit(cache=True, nopython=True) +def use_c_sin_nest1(x): + return use_c_sin(x) + +@jit(cache=True, nopython=True) +def use_c_sin_nest2(x): + return use_c_sin_nest1(x) + + +@jit(cache=True, nopython=True) +def ambiguous_function(x): + return x + 2 + +renamed_function1 = ambiguous_function + +@jit(cache=True, nopython=True) +def ambiguous_function(x): + return x + 6 + +renamed_function2 = ambiguous_function + + +def make_closure(x): + @jit(cache=True, nopython=True) + def closure(y): + return x + y + + return closure + +closure1 = make_closure(3) +closure2 = make_closure(5) +closure3 = make_closure(7) +closure4 = make_closure(9) + + +biggie = np.arange(10**6) + +@jit(cache=True, nopython=True) +def use_big_array(): + return biggie + + +Z = 1 + +# Exercise returning a record instance. This used to hardcode the dtype +# pointer's value in the bitcode. + +packed_record_type = np.dtype([('a', np.int8), ('b', np.float64)]) +aligned_record_type = np.dtype([('a', np.int8), ('b', np.float64)], align=True) + +packed_arr = np.empty(2, dtype=packed_record_type) +for i in range(packed_arr.size): + packed_arr[i]['a'] = i + 1 + packed_arr[i]['b'] = i + 42.5 + +aligned_arr = np.array(packed_arr, dtype=aligned_record_type) + +@jit(cache=True, nopython=True) +def record_return(ary, i): + return ary[i] + + +class _TestModule(TestCase): + """ + Tests for functionality of this module's functions. + Note this does not define any "test_*" method, instead check_module() + should be called by hand. + """ + + def check_module(self, mod): + self.assertPreciseEqual(mod.add_usecase(2, 3), 6) + self.assertPreciseEqual(mod.add_objmode_usecase(2, 3), 6) + self.assertPreciseEqual(mod.outer_uncached(3, 2), 2) + self.assertPreciseEqual(mod.outer(3, 2), 2) + + packed_rec = mod.record_return(mod.packed_arr, 1) + self.assertPreciseEqual(tuple(packed_rec), (2, 43.5)) + aligned_rec = mod.record_return(mod.aligned_arr, 1) + self.assertPreciseEqual(tuple(aligned_rec), (2, 43.5)) + + +@jit(cache=True) +def first_class_function_mul(x): + return x * x + + +@jit(cache=True) +def first_class_function_add(x): + return x + x + + +@jit(cache=True) +def first_class_function_usecase(f, x): + return f(x) + + +def self_test(): + mod = sys.modules[__name__] + _TestModule().check_module(mod) + + +@jit(parallel=True, cache=True, nopython=True) +def parfor_usecase(ary): + return ary * ary + ary diff --git a/venv/lib/python3.10/site-packages/numba/tests/cffi_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/cffi_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..a101b977d8895846abba5cd96157380afe2d6926 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/cffi_usecases.py @@ -0,0 +1,197 @@ +import sys + +import numpy as np + +import numba.core.typing.cffi_utils as cffi_support +from numba.tests.support import import_dynamic, temp_directory +from numba.core.types import complex128 + + +def load_inline_module(): + """ + Create an inline module, return the corresponding ffi and dll objects. + """ + from cffi import FFI + + # We can't rely on libc availability on Windows anymore, so we use our + # own compiled wrappers (see https://bugs.python.org/issue23606). + + defs = """ + double _numba_test_sin(double x); + double _numba_test_cos(double x); + double _numba_test_funcptr(double (*func)(double)); + bool _numba_test_boolean(void); + """ + + ffi = FFI() + ffi.cdef(defs) + # Load the _helperlib namespace + from numba import _helperlib + return ffi, ffi.dlopen(_helperlib.__file__) + + +def load_ool_module(): + """ + Compile an out-of-line module, return the corresponding ffi and + module objects. + """ + from cffi import FFI + + numba_complex = """ + typedef struct _numba_complex { + double real; + double imag; + } numba_complex; + """ + + bool_define = """ + #ifdef _MSC_VER + #define false 0 + #define true 1 + #define bool int + #else + #include + #endif + """ + + defs = numba_complex + """ + bool boolean(void); + double sin(double x); + double cos(double x); + int foo(int a, int b, int c); + void vsSin(int n, float* x, float* y); + void vdSin(int n, double* x, double* y); + void vector_real(numba_complex *c, double *real, int n); + void vector_imag(numba_complex *c, double *imag, int n); + """ + + source = numba_complex + bool_define + """ + static bool boolean(void) + { + return true; + } + + static int foo(int a, int b, int c) + { + return a + b * c; + } + + void vsSin(int n, float* x, float* y) { + int i; + for (i=0; i 0: + return fa(x) + else: + return fb(x) + +def use_user_defined_symbols(): + return cffi_foo(1, 2, 3) + +# The from_buffer method is member of cffi.FFI, and also of CompiledFFI objects +# (cffi_usecases_ool.ffi is a CompiledFFI object) so we use both in these +# functions. + +def vector_sin_float32(x, y): + vsSin(len(x), ffi.from_buffer(x), ffi_ool.from_buffer(y)) + +def vector_sin_float64(x, y): + vdSin(len(x), ffi.from_buffer(x), ffi_ool.from_buffer(y)) + + +# For testing pointer to structs from buffers + +def vector_extract_real(x, y): + vector_real(ffi.from_buffer(x), ffi.from_buffer(y), len(x)) + +def vector_extract_imag(x, y): + vector_imag(ffi.from_buffer(x), ffi.from_buffer(y), len(x)) diff --git a/venv/lib/python3.10/site-packages/numba/tests/cfunc_cache_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/cfunc_cache_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..24c3079ceaa1ee74463d40ff57ed68cf9a1ae9a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/cfunc_cache_usecases.py @@ -0,0 +1,70 @@ +""" +This file will be copied to a temporary directory in order to +exercise caching compiled C callbacks. + +See test_cfunc.py. +""" + +import sys + +from numba import cfunc, jit +from numba.tests.support import TestCase, captured_stderr + + +Z = 1 + +add_sig = "float64(float64, float64)" + +div_sig = "float64(int64, int64)" + + +@cfunc(add_sig, cache=True, nopython=True) +def add_usecase(x, y): + return x + y + Z + +@cfunc(add_sig, nopython=True) +def add_nocache_usecase(x, y): + return x + y + Z + +@cfunc(div_sig, cache=True, nopython=True) +def div_usecase(a, b): + return a / b + + +@jit(nopython=True) +def inner(x, y): + return x + y + Z + +@cfunc(add_sig, cache=True, nopython=True) +def outer(x, y): + return inner(-y, x) + + +class _TestModule(TestCase): + """ + Tests for functionality of this module's cfuncs. + Note this does not define any "test_*" method, instead check_module() + should be called by hand. + """ + + def check_module(self, mod): + f = mod.add_usecase + self.assertPreciseEqual(f.ctypes(2.0, 3.0), 6.0) + f = mod.add_nocache_usecase + self.assertPreciseEqual(f.ctypes(2.0, 3.0), 6.0) + f = mod.outer + self.assertPreciseEqual(f.ctypes(5.0, 2.0), 4.0) + + f = mod.div_usecase + with captured_stderr() as err: + self.assertPreciseEqual(f.ctypes(7, 2), 3.5) + self.assertEqual(err.getvalue(), "") + with captured_stderr() as err: + f.ctypes(7, 0) + err = err.getvalue() + self.assertIn("ZeroDivisionError", err) + + +def self_test(): + mod = sys.modules[__name__] + _TestModule().check_module(mod) diff --git a/venv/lib/python3.10/site-packages/numba/tests/chained_assign_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/chained_assign_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3232d9ed043cb98a72e2e425dd18d4046c6e15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/chained_assign_usecases.py @@ -0,0 +1,69 @@ +from numba import jit +import numpy as np + + +@jit +def inc(a): + for i in range(len(a)): + a[i] += 1 + return a + + +@jit +def inc1(a): + a[0] += 1 + return a[0] + + +@jit +def inc2(a): + a[0] += 1 + return a[0], a[0] + 1 + + +def chain1(a): + x = y = z = inc(a) + return x + y + z + + +def chain2(v): + a = np.zeros(2) + a[0] = x = a[1] = v + return a[0] + a[1] + (x / 2) + + +def unpack1(x, y): + a, b = x, y + return a + b / 2 + + +def unpack2(x, y): + a, b = c, d = inc1(x), inc1(y) + return a + c / 2, b + d / 2 + + +def chain3(x, y): + a = (b, c) = (inc1(x), inc1(y)) + (d, e) = f = (inc1(x), inc1(y)) + return (a[0] + b / 2 + d + f[0]), (a[1] + c + e / 2 + f[1]) + + +def unpack3(x): + a, b = inc2(x) + return a + b / 2 + + +def unpack4(x): + a, b = c, d = inc2(x) + return a + c / 2, b + d / 2 + + +def unpack5(x): + a = b, c = inc2(x) + d, e = f = inc2(x) + return (a[0] + b / 2 + d + f[0]), (a[1] + c + e / 2 + f[1]) + + +def unpack6(x, y): + (a, b), (c, d) = (x, y), (y + 1, x + 1) + return a + c / 2, b / 2 + d diff --git a/venv/lib/python3.10/site-packages/numba/tests/cloudpickle_main_class.py b/venv/lib/python3.10/site-packages/numba/tests/cloudpickle_main_class.py new file mode 100644 index 0000000000000000000000000000000000000000..0e29315b37219aa23e9d59051538bf6d311cbd14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/cloudpickle_main_class.py @@ -0,0 +1,6 @@ +# Expected to run this module as __main__ + + +# Cloudpickle will think this is a dynamic class when this module is __main__ +class Klass: + classvar = None diff --git a/venv/lib/python3.10/site-packages/numba/tests/compile_with_pycc.py b/venv/lib/python3.10/site-packages/numba/tests/compile_with_pycc.py new file mode 100644 index 0000000000000000000000000000000000000000..3f3ae500d3e3520f56160c4512910b4a7c58e7e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/compile_with_pycc.py @@ -0,0 +1,134 @@ +import cmath + +import numpy as np + +from numba import float32 +from numba.types import unicode_type, i8 +from numba.pycc import CC, exportmany, export +from numba.tests.support import has_blas +from numba import typed + + +# +# New API +# + +cc = CC('pycc_test_simple') +cc.use_nrt = False + +# Note the first signature omits the return type +@cc.export('multf', (float32, float32)) +@cc.export('multi', 'i4(i4, i4)') +def mult(a, b): + return a * b + +# Test imported C globals such as Py_None, PyExc_ZeroDivisionError +@cc.export('get_none', 'none()') +def get_none(): + return None + +@cc.export('div', 'f8(f8, f8)') +def div(x, y): + return x / y + +_two = 2 + +# This one can't be compiled by the legacy API as it doesn't execute +# the script in a proper module. +@cc.export('square', 'i8(i8)') +def square(u): + return u ** _two + +# These ones need helperlib +cc_helperlib = CC('pycc_test_helperlib') +cc_helperlib.use_nrt = False + +@cc_helperlib.export('power', 'i8(i8, i8)') +def power(u, v): + return u ** v + +@cc_helperlib.export('sqrt', 'c16(c16)') +def sqrt(u): + return cmath.sqrt(u) + +@cc_helperlib.export('size', 'i8(f8[:])') +def size(arr): + return arr.size + +# Exercise linking to Numpy math functions +@cc_helperlib.export('np_sqrt', 'f8(f8)') +def np_sqrt(u): + return np.sqrt(u) + +@cc_helperlib.export('spacing', 'f8(f8)') +def np_spacing(u): + return np.spacing(u) + + +# This one clashes with libc random() unless pycc is careful with naming. +@cc_helperlib.export('random', 'f8(i4)') +def random_impl(seed): + if seed != -1: + np.random.seed(seed) + return np.random.random() + +# These ones need NRT +cc_nrt = CC('pycc_test_nrt') + +@cc_nrt.export('zero_scalar', 'f8(i4)') +def zero_scalar(n): + arr = np.zeros(n) + return arr[-1] + +if has_blas: + # This one also needs BLAS + @cc_nrt.export('vector_dot', 'f8(i4)') + def vector_dot(n): + a = np.linspace(1, n, n) + return np.dot(a, a) + +# This one needs an environment +@cc_nrt.export('zeros', 'f8[:](i4)') +def zeros(n): + return np.zeros(n) + +# requires list dtor, #issue3535 +@cc_nrt.export('np_argsort', 'intp[:](float64[:])') +def np_argsort(arr): + return np.argsort(arr) + +# +# Legacy API +# + +exportmany(['multf f4(f4,f4)', 'multi i4(i4,i4)'])(mult) +# Needs to link to helperlib to due with complex arguments +# export('multc c16(c16,c16)')(mult) +export('mult f8(f8, f8)')(mult) + + +@cc_nrt.export('dict_usecase', 'intp[:](intp[:])') +def dict_usecase(arr): + d = typed.Dict() + for i in range(arr.size): + d[i] = arr[i] + out = np.zeros_like(arr) + for k, v in d.items(): + out[k] = k * v + return out + +# checks for issue #6386 +@cc_nrt.export('internal_str_dict', i8(unicode_type)) +def internal_str_dict(x): + d = typed.Dict.empty(unicode_type,i8) + if(x not in d): + d[x] = len(d) + return len(d) + +@cc_nrt.export('hash_str', i8(unicode_type)) +def internal_str_dict(x): + return hash(x) + +@cc_nrt.export('hash_literal_str_A', i8()) +def internal_str_dict(): + return hash("A") diff --git a/venv/lib/python3.10/site-packages/numba/tests/complex_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/complex_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..113cd69217adacc21dcdba1d9a27155e376625e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/complex_usecases.py @@ -0,0 +1,93 @@ +import cmath + + +def div_usecase(x, y): + return x / y + + +def real_usecase(x): + return x.real + +def imag_usecase(x): + return x.imag + +def conjugate_usecase(x): + return x.conjugate() + + +def acos_usecase(x): + return cmath.acos(x) + +def cos_usecase(x): + return cmath.cos(x) + +def asin_usecase(x): + return cmath.asin(x) + +def sin_usecase(x): + return cmath.sin(x) + +def atan_usecase(x): + return cmath.atan(x) + +def tan_usecase(x): + return cmath.tan(x) + +def acosh_usecase(x): + return cmath.acosh(x) + +def cosh_usecase(x): + return cmath.cosh(x) + +def asinh_usecase(x): + return cmath.asinh(x) + +def sinh_usecase(x): + return cmath.sinh(x) + +def atanh_usecase(x): + return cmath.atanh(x) + +def tanh_usecase(x): + return cmath.tanh(x) + +def exp_usecase(x): + return cmath.exp(x) + +def isfinite_usecase(x): + return cmath.isfinite(x) + +def isinf_usecase(x): + return cmath.isinf(x) + +def isnan_usecase(x): + return cmath.isnan(x) + +def log_usecase(x): + return cmath.log(x) + +def log_base_usecase(x, base): + return cmath.log(x, base) + +def log10_usecase(x): + return cmath.log10(x) + +def phase_usecase(x): + return cmath.phase(x) + +def polar_usecase(x): + return cmath.polar(x) + +_two = 2.0 + +def polar_as_complex_usecase(x): + # HACK: clear errno by invoking float.__pow__ + # (workaround for http://bugs.python.org/issue24489) + _two ** _two + return complex(*cmath.polar(x)) + +def rect_usecase(r, phi): + return cmath.rect(r, phi) + +def sqrt_usecase(x): + return cmath.sqrt(x) diff --git a/venv/lib/python3.10/site-packages/numba/tests/ctypes_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/ctypes_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..5ed8d3ab8e1a480bd1f0f82127b176eca648686f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/ctypes_usecases.py @@ -0,0 +1,114 @@ +from ctypes import * +import sys + +import numpy as np + + +is_windows = sys.platform.startswith('win32') + +# We can't rely on libc availability on Windows anymore, so we use our +# own compiled wrappers (see https://bugs.python.org/issue23606). + +from numba import _helperlib +libnumba = CDLL(_helperlib.__file__) +del _helperlib + +# A typed C function (cdecl under Windows) + +c_sin = libnumba._numba_test_sin +c_sin.argtypes = [c_double] +c_sin.restype = c_double + +def use_c_sin(x): + return c_sin(x) + +c_cos = libnumba._numba_test_cos +c_cos.argtypes = [c_double] +c_cos.restype = c_double + +def use_two_funcs(x): + return c_sin(x) - c_cos(x) + +# Typed C functions accepting an array pointer +# (either as a "void *" or as a typed pointer) + +c_vsquare = libnumba._numba_test_vsquare +c_vsquare.argtypes = [c_int, c_void_p, c_void_p] + +c_vcube = libnumba._numba_test_vsquare +c_vcube.argtypes = [c_int, POINTER(c_double), POINTER(c_double)] + +def use_c_vsquare(x): + out = np.empty_like(x) + c_vsquare(x.size, x.ctypes, out.ctypes) + return out + +def use_c_vcube(x): + out = np.empty_like(x) + c_vcube(x.size, x.ctypes, out.ctypes) + return out + +# An untyped C function + +c_untyped = libnumba._numba_test_exp + +def use_c_untyped(x): + return c_untyped(x) + +# A C function wrapped in a CFUNCTYPE + +ctype_wrapping = CFUNCTYPE(c_double, c_double)(use_c_sin) + +def use_ctype_wrapping(x): + return ctype_wrapping(x) + +# A Python API function + +savethread = pythonapi.PyEval_SaveThread +savethread.argtypes = [] +savethread.restype = c_void_p + +restorethread = pythonapi.PyEval_RestoreThread +restorethread.argtypes = [c_void_p] +restorethread.restype = None + +if is_windows: + # A function with the stdcall calling convention + c_sleep = windll.kernel32.Sleep + c_sleep.argtypes = [c_uint] + c_sleep.restype = None + + def use_c_sleep(x): + c_sleep(x) + + +def use_c_pointer(x): + """ + Running in Python will cause a segfault. + """ + threadstate = savethread() + x += 1 + restorethread(threadstate) + return x + + +def use_func_pointer(fa, fb, x): + if x > 0: + return fa(x) + else: + return fb(x) + + +mydct = {'what': 1232121} + +def call_me_maybe(arr): + return mydct[arr[0].decode('ascii')] + +# Create a callback into the python interpreter +py_call_back = CFUNCTYPE(c_int, py_object)(call_me_maybe) + + +def take_array_ptr(ptr): + return ptr + +c_take_array_ptr = CFUNCTYPE(c_void_p, c_void_p)(take_array_ptr) diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__init__.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2958441fe7a098eeb66fe183b62bd4ff0edbbe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__init__.py @@ -0,0 +1,10 @@ +from os.path import dirname +import unittest +from unittest.suite import TestSuite + +from numba.testing import load_testsuite + +def load_tests(loader, tests, pattern): + suite = TestSuite() + suite.addTests(load_testsuite(loader, dirname(__file__))) + return suite diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec8920504ae56c1b4b9316b1c5ad2d16438c1b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_examples.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_examples.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c94c03deceead664a6c0471fa9350a56d8ff479 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_examples.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_interval_example.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_interval_example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae033b5b677e57dcf01a954dbf8e555707c771c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_interval_example.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_jitclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_jitclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..168b773d56266961ff4903d0b3483d3f7eb56116 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_jitclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_literal_container_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_literal_container_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d46b0a3d3b6e9c5ec0ad94457a24f1be0eb61f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_literal_container_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_literally_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_literally_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8d147f15eea28648bff3f1508eceeae8ccbd18f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_literally_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_llvm_pass_timings.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_llvm_pass_timings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cbc1c0b960f38d29bbb545651b97dc2f710ce5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_llvm_pass_timings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_numpy_generators.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_numpy_generators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae3bdcfb4b7fd0be4f886ea166636265b92fb82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_numpy_generators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_parallel_chunksize.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_parallel_chunksize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6fc1ed048a4c566552106bbfc75ca57b6523752 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_parallel_chunksize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_rec_array.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_rec_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4801ac072c2cff19fb6b99829317edf2f8196e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_rec_array.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_structref_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_structref_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c49865e9fdb0e3cd78f9bf497368e369fdc40088 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_structref_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_typed_dict_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_typed_dict_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73b25b43c9da1306c89ec1b7d3ab0a2c3256e504 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_typed_dict_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_typed_list_usage.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_typed_list_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b1e6849bc18999e8b94d160af2cba102b43f88f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/__pycache__/test_typed_list_usage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_examples.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..a0f7078b17d9efd2339c25b229b7e0e3281c3cbc --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_examples.py @@ -0,0 +1,660 @@ +# Contents in this file are referenced from the sphinx-generated docs. +# "magictoken" is used for markers as beginning and ending of example text. + +import sys +import unittest + +from numba.tests.support import TestCase, captured_stdout +from numba.core.config import IS_WIN32 +from numba.np.numpy_support import numpy_version + + +class MatplotlibBlocker: + '''Blocks the import of matplotlib, so that doc examples that attempt to + plot the output don't result in plots popping up and blocking testing.''' + + def find_spec(self, fullname, path, target=None): + if fullname == 'matplotlib': + msg = 'Blocked import of matplotlib for test suite run' + raise ImportError(msg) + + +class DocsExamplesTest(TestCase): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._mpl_blocker = MatplotlibBlocker() + + def setUp(self): + sys.meta_path.insert(0, self._mpl_blocker) + + def tearDown(self): + sys.meta_path.remove(self._mpl_blocker) + + def test_mandelbrot(self): + with captured_stdout(): + # magictoken.ex_mandelbrot.begin + from timeit import default_timer as timer + try: + from matplotlib.pylab import imshow, show + have_mpl = True + except ImportError: + have_mpl = False + import numpy as np + from numba import jit + + @jit(nopython=True) + def mandel(x, y, max_iters): + """ + Given the real and imaginary parts of a complex number, + determine if it is a candidate for membership in the Mandelbrot + set given a fixed number of iterations. + """ + i = 0 + c = complex(x,y) + z = 0.0j + for i in range(max_iters): + z = z * z + c + if (z.real * z.real + z.imag * z.imag) >= 4: + return i + + return 255 + + @jit(nopython=True) + def create_fractal(min_x, max_x, min_y, max_y, image, iters): + height = image.shape[0] + width = image.shape[1] + + pixel_size_x = (max_x - min_x) / width + pixel_size_y = (max_y - min_y) / height + for x in range(width): + real = min_x + x * pixel_size_x + for y in range(height): + imag = min_y + y * pixel_size_y + color = mandel(real, imag, iters) + image[y, x] = color + + return image + + image = np.zeros((500 * 2, 750 * 2), dtype=np.uint8) + s = timer() + create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20) + e = timer() + print(e - s) + if have_mpl: + imshow(image) + show() + # magictoken.ex_mandelbrot.end + + def test_moving_average(self): + with captured_stdout(): + # magictoken.ex_moving_average.begin + import numpy as np + + from numba import guvectorize + + @guvectorize(['void(float64[:], intp[:], float64[:])'], + '(n),()->(n)') + def move_mean(a, window_arr, out): + window_width = window_arr[0] + asum = 0.0 + count = 0 + for i in range(window_width): + asum += a[i] + count += 1 + out[i] = asum / count + for i in range(window_width, len(a)): + asum += a[i] - a[i - window_width] + out[i] = asum / count + + arr = np.arange(20, dtype=np.float64).reshape(2, 10) + print(arr) + print(move_mean(arr, 3)) + # magictoken.ex_moving_average.end + + def test_nogil(self): + with captured_stdout(): + # magictoken.ex_no_gil.begin + import math + import threading + from timeit import repeat + + import numpy as np + from numba import jit + + nthreads = 4 + size = 10**6 + + def func_np(a, b): + """ + Control function using Numpy. + """ + return np.exp(2.1 * a + 3.2 * b) + + @jit('void(double[:], double[:], double[:])', nopython=True, + nogil=True) + def inner_func_nb(result, a, b): + """ + Function under test. + """ + for i in range(len(result)): + result[i] = math.exp(2.1 * a[i] + 3.2 * b[i]) + + def timefunc(correct, s, func, *args, **kwargs): + """ + Benchmark *func* and print out its runtime. + """ + print(s.ljust(20), end=" ") + # Make sure the function is compiled before the benchmark is + # started + res = func(*args, **kwargs) + if correct is not None: + assert np.allclose(res, correct), (res, correct) + # time it + print('{:>5.0f} ms'.format(min(repeat( + lambda: func(*args, **kwargs), number=5, repeat=2)) * 1000)) + return res + + def make_singlethread(inner_func): + """ + Run the given function inside a single thread. + """ + def func(*args): + length = len(args[0]) + result = np.empty(length, dtype=np.float64) + inner_func(result, *args) + return result + return func + + def make_multithread(inner_func, numthreads): + """ + Run the given function inside *numthreads* threads, splitting + its arguments into equal-sized chunks. + """ + def func_mt(*args): + length = len(args[0]) + result = np.empty(length, dtype=np.float64) + args = (result,) + args + chunklen = (length + numthreads - 1) // numthreads + # Create argument tuples for each input chunk + chunks = [[arg[i * chunklen:(i + 1) * chunklen] for arg in + args] for i in range(numthreads)] + # Spawn one thread per chunk + threads = [threading.Thread(target=inner_func, args=chunk) + for chunk in chunks] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + return result + return func_mt + + func_nb = make_singlethread(inner_func_nb) + func_nb_mt = make_multithread(inner_func_nb, nthreads) + + a = np.random.rand(size) + b = np.random.rand(size) + + correct = timefunc(None, "numpy (1 thread)", func_np, a, b) + timefunc(correct, "numba (1 thread)", func_nb, a, b) + timefunc(correct, "numba (%d threads)" % nthreads, func_nb_mt, a, b) + # magictoken.ex_no_gil.end + + def test_vectorize_one_signature(self): + with captured_stdout(): + # magictoken.ex_vectorize_one_signature.begin + from numba import vectorize, float64 + + @vectorize([float64(float64, float64)]) + def f(x, y): + return x + y + # magictoken.ex_vectorize_one_signature.end + + def test_vectorize_multiple_signatures(self): + with captured_stdout(): + # magictoken.ex_vectorize_multiple_signatures.begin + from numba import vectorize, int32, int64, float32, float64 + import numpy as np + + @vectorize([int32(int32, int32), + int64(int64, int64), + float32(float32, float32), + float64(float64, float64)]) + def f(x, y): + return x + y + # magictoken.ex_vectorize_multiple_signatures.end + + # magictoken.ex_vectorize_return_call_one.begin + a = np.arange(6) + result = f(a, a) + # result == array([ 0, 2, 4, 6, 8, 10]) + # magictoken.ex_vectorize_return_call_one.end + + self.assertIsInstance(result, np.ndarray) + correct = np.array([0, 2, 4, 6, 8, 10]) + np.testing.assert_array_equal(result, correct) + + # magictoken.ex_vectorize_return_call_two.begin + a = np.linspace(0, 1, 6) + result = f(a, a) + # Now, result == array([0. , 0.4, 0.8, 1.2, 1.6, 2. ]) + # magictoken.ex_vectorize_return_call_two.end + + self.assertIsInstance(result, np.ndarray) + correct = np.array([0., 0.4, 0.8, 1.2, 1.6, 2. ]) + np.testing.assert_allclose(result, correct) + + # magictoken.ex_vectorize_return_call_three.begin + a = np.arange(12).reshape(3, 4) + # a == array([[ 0, 1, 2, 3], + # [ 4, 5, 6, 7], + # [ 8, 9, 10, 11]]) + + result1 = f.reduce(a, axis=0) + # result1 == array([12, 15, 18, 21]) + + result2 = f.reduce(a, axis=1) + # result2 == array([ 6, 22, 38]) + + result3 = f.accumulate(a) + # result3 == array([[ 0, 1, 2, 3], + # [ 4, 6, 8, 10], + # [12, 15, 18, 21]]) + + result4 = f.accumulate(a, axis=1) + # result3 == array([[ 0, 1, 3, 6], + # [ 4, 9, 15, 22], + # [ 8, 17, 27, 38]]) + # magictoken.ex_vectorize_return_call_three.end + + self.assertIsInstance(result1, np.ndarray) + correct = np.array([12, 15, 18, 21]) + np.testing.assert_array_equal(result1, correct) + + self.assertIsInstance(result2, np.ndarray) + correct = np.array([6, 22, 38]) + np.testing.assert_array_equal(result2, correct) + + self.assertIsInstance(result3, np.ndarray) + correct = np.array([ + [0, 1, 2, 3], + [4, 6, 8, 10], + [12, 15, 18, 21] + ]) + np.testing.assert_array_equal(result3, correct) + + self.assertIsInstance(result4, np.ndarray) + correct = np.array([ + [0, 1, 3, 6], + [4, 9, 15, 22], + [8, 17, 27, 38] + ]) + np.testing.assert_array_equal(result4, correct) + + def test_guvectorize(self): + with captured_stdout(): + # magictoken.ex_guvectorize.begin + from numba import guvectorize, int64 + import numpy as np + + @guvectorize([(int64[:], int64, int64[:])], '(n),()->(n)') + def g(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + # magictoken.ex_guvectorize.end + + # magictoken.ex_guvectorize_call_one.begin + a = np.arange(5) + result = g(a, 2) + # result == array([2, 3, 4, 5, 6]) + # magictoken.ex_guvectorize_call_one.end + + self.assertIsInstance(result, np.ndarray) + correct = np.array([2, 3, 4, 5, 6]) + np.testing.assert_array_equal(result, correct) + + # magictoken.ex_guvectorize_call_two.begin + a = np.arange(6).reshape(2, 3) + # a == array([[0, 1, 2], + # [3, 4, 5]]) + + result1 = g(a, 10) + # result1 == array([[10, 11, 12], + # [13, 14, 15]]) + + result2 = g(a, np.array([10, 20])) + g(a, np.array([10, 20])) + # result2 == array([[10, 11, 12], + # [23, 24, 25]]) + # magictoken.ex_guvectorize_call_two.end + + self.assertIsInstance(result1, np.ndarray) + correct = np.array([[10, 11, 12], [13, 14, 15]]) + np.testing.assert_array_equal(result1, correct) + + self.assertIsInstance(result2, np.ndarray) + correct = np.array([[10, 11, 12], [23, 24, 25]]) + np.testing.assert_array_equal(result2, correct) + + def test_guvectorize_scalar_return(self): + with captured_stdout(): + # magictoken.ex_guvectorize_scalar_return.begin + from numba import guvectorize, int64 + import numpy as np + + @guvectorize([(int64[:], int64, int64[:])], '(n),()->()') + def g(x, y, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + y + res[0] = acc + # magictoken.ex_guvectorize_scalar_return.end + + # magictoken.ex_guvectorize_scalar_return_call.begin + a = np.arange(5) + result = g(a, 2) + # At this point, result == 20. + # magictoken.ex_guvectorize_scalar_return_call.end + + self.assertIsInstance(result, np.integer) + self.assertEqual(result, 20) + + def test_guvectorize_jit(self): + with captured_stdout(): + # magictoken.gufunc_jit.begin + import numpy as np + + from numba import jit, guvectorize + + @guvectorize('(n)->(n)') + def copy(x, res): + for i in range(x.shape[0]): + res[i] = x[i] + + @jit(nopython=True) + def jit_fn(x, res): + copy(x, res) + # magictoken.gufunc_jit.end + + # magictoken.gufunc_jit_call.begin + x = np.arange(5, dtype='i4') + res = np.zeros_like(x) + jit_fn(x, res) + # At this point, res == np.array([0, 1, 2, 3, 4], 'i4'). + # magictoken.gufunc_jit_call.end + self.assertPreciseEqual(x, res) + + def test_guvectorize_jit_fail(self): + with captured_stdout(): + # magictoken.gufunc_jit_fail.begin + import numpy as np + from numba import jit, guvectorize + + @guvectorize('(n)->(n)') + def copy(x, res): + for i in range(x.shape[0]): + res[i] = x[i] + + @jit(nopython=True) + def jit_fn(x, res): + copy(x, res) + + x = np.ones((1, 5)) + res = np.empty((5,)) + with self.assertRaises(ValueError) as raises: + jit_fn(x, res) + # magictoken.gufunc_jit_fail.end + self.assertIn('Loop and array shapes are incompatible', + str(raises.exception)) + + def test_guvectorize_overwrite(self): + with captured_stdout(): + # magictoken.ex_guvectorize_overwrite.begin + from numba import guvectorize, float64 + import numpy as np + + @guvectorize([(float64[:], float64[:])], '()->()') + def init_values(invals, outvals): + invals[0] = 6.5 + outvals[0] = 4.2 + # magictoken.ex_guvectorize_overwrite.end + + # magictoken.ex_guvectorize_overwrite_call_one.begin + invals = np.zeros(shape=(3, 3), dtype=np.float64) + # invals == array([[6.5, 6.5, 6.5], + # [6.5, 6.5, 6.5], + # [6.5, 6.5, 6.5]]) + + outvals = init_values(invals) + # outvals == array([[4.2, 4.2, 4.2], + # [4.2, 4.2, 4.2], + # [4.2, 4.2, 4.2]]) + # magictoken.ex_guvectorize_overwrite_call_one.end + + self.assertIsInstance(invals, np.ndarray) + correct = np.array([ + [6.5, 6.5, 6.5], + [6.5, 6.5, 6.5], + [6.5, 6.5, 6.5]]) + np.testing.assert_array_equal(invals, correct) + + self.assertIsInstance(outvals, np.ndarray) + correct = np.array([ + [4.2, 4.2, 4.2], + [4.2, 4.2, 4.2], + [4.2, 4.2, 4.2]]) + np.testing.assert_array_equal(outvals, correct) + + # magictoken.ex_guvectorize_overwrite_call_two.begin + invals = np.zeros(shape=(3, 3), dtype=np.float32) + # invals == array([[0., 0., 0.], + # [0., 0., 0.], + # [0., 0., 0.]], dtype=float32) + outvals = init_values(invals) + # outvals == array([[4.2, 4.2, 4.2], + # [4.2, 4.2, 4.2], + # [4.2, 4.2, 4.2]]) + print(invals) + # invals == array([[0., 0., 0.], + # [0., 0., 0.], + # [0., 0., 0.]], dtype=float32) + # magictoken.ex_guvectorize_overwrite_call_two.end + + self.assertIsInstance(invals, np.ndarray) + correct = np.array([ + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=np.float32) + np.testing.assert_array_equal(invals, correct) + + self.assertIsInstance(outvals, np.ndarray) + correct = np.array([ + [4.2, 4.2, 4.2], + [4.2, 4.2, 4.2], + [4.2, 4.2, 4.2]]) + np.testing.assert_array_equal(outvals, correct) + + # magictoken.ex_guvectorize_overwrite_call_three.begin + @guvectorize( + [(float64[:], float64[:])], + '()->()', + writable_args=('invals',) + ) + def init_values(invals, outvals): + invals[0] = 6.5 + outvals[0] = 4.2 + + invals = np.zeros(shape=(3, 3), dtype=np.float32) + # invals == array([[0., 0., 0.], + # [0., 0., 0.], + # [0., 0., 0.]], dtype=float32) + outvals = init_values(invals) + # outvals == array([[4.2, 4.2, 4.2], + # [4.2, 4.2, 4.2], + # [4.2, 4.2, 4.2]]) + print(invals) + # invals == array([[6.5, 6.5, 6.5], + # [6.5, 6.5, 6.5], + # [6.5, 6.5, 6.5]], dtype=float32) + # magictoken.ex_guvectorize_overwrite_call_three.end + + self.assertIsInstance(invals, np.ndarray) + correct = np.array([ + [6.5, 6.5, 6.5], + [6.5, 6.5, 6.5], + [6.5, 6.5, 6.5]]) + np.testing.assert_array_equal(invals, correct) + + self.assertIsInstance(outvals, np.ndarray) + correct = np.array([ + [4.2, 4.2, 4.2], + [4.2, 4.2, 4.2], + [4.2, 4.2, 4.2]]) + np.testing.assert_array_equal(outvals, correct) + + def test_vectorize_dynamic(self): + with captured_stdout(): + # magictoken.ex_vectorize_dynamic.begin + from numba import vectorize + + @vectorize + def f(x, y): + return x * y + # magictoken.ex_vectorize_dynamic.end + + # magictoken.ex_vectorize_dynamic_call_one.begin + result = f(3,4) + # result == 12 + + print(f.types) + # ['ll->l'] + # magictoken.ex_vectorize_dynamic_call_one.end + + self.assertEqual(result, 12) + if IS_WIN32: + if numpy_version < (2, 0): + correct = ['ll->q'] + else: + correct = ['qq->q'] + else: + correct = ['ll->l'] + self.assertEqual(f.types, correct) + + # magictoken.ex_vectorize_dynamic_call_two.begin + result = f(1.,2.) + # result == 2.0 + + print(f.types) + # ['ll->l', 'dd->d'] + # magictoken.ex_vectorize_dynamic_call_two.end + + self.assertEqual(result, 2.0) + if IS_WIN32: + if numpy_version < (2, 0): + correct = ['ll->q', 'dd->d'] + else: + correct = ['qq->q', 'dd->d'] + else: + correct = ['ll->l', 'dd->d'] + self.assertEqual(f.types, correct) + + # magictoken.ex_vectorize_dynamic_call_three.begin + result = f(1,2.) + # result == 2.0 + + print(f.types) + # ['ll->l', 'dd->d'] + # magictoken.ex_vectorize_dynamic_call_three.end + + self.assertEqual(result, 2.0) + if IS_WIN32: + if numpy_version < (2, 0): + correct = ['ll->q', 'dd->d'] + else: + correct = ['qq->q', 'dd->d'] + else: + correct = ['ll->l', 'dd->d'] + self.assertEqual(f.types, correct) + + # magictoken.ex_vectorize_dynamic_call_four.begin + @vectorize + def g(a, b): + return a / b + + print(g(2.,3.)) + # 0.66666666666666663 + + print(g(2,3)) + # 0.66666666666666663 + + print(g.types) + # ['dd->d'] + # magictoken.ex_vectorize_dynamic_call_four.end + + correct = ['dd->d'] + self.assertEqual(g.types, correct) + + def test_guvectorize_dynamic(self): + with captured_stdout(): + # magictoken.ex_guvectorize_dynamic.begin + from numba import guvectorize + import numpy as np + + @guvectorize('(n),()->(n)') + def g(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + # magictoken.ex_guvectorize_dynamic.end + + # magictoken.ex_guvectorize_dynamic_call_one.begin + x = np.arange(5, dtype=np.int64) + y = 10 + res = np.zeros_like(x) + g(x, y, res) + # res == array([10, 11, 12, 13, 14]) + print(g.types) + # ['ll->l'] + # magictoken.ex_guvectorize_dynamic_call_one.end + + correct = np.array([10, 11, 12, 13, 14]) + np.testing.assert_array_equal(res, correct) + if IS_WIN32: + correct = ['qq->q'] + else: + correct = ['ll->l'] + self.assertEqual(g.types, correct) + + # magictoken.ex_guvectorize_dynamic_call_two.begin + x = np.arange(5, dtype=np.double) + y = 2.2 + res = np.zeros_like(x) + g(x, y, res) + # res == array([2.2, 3.2, 4.2, 5.2, 6.2]) + # magictoken.ex_guvectorize_dynamic_call_two.end + + # magictoken.ex_guvectorize_dynamic_call_three.begin + print(g.types) # shorthand for g.ufunc.types + # ['ll->l', 'dd->d'] + # magictoken.ex_guvectorize_dynamic_call_three.end + + if IS_WIN32: + correct = ['qq->q', 'dd->d'] + else: + correct = ['ll->l', 'dd->d'] + self.assertEqual(g.types, correct) + + # magictoken.ex_guvectorize_dynamic_call_four.begin + x = np.arange(5, dtype=np.int64) + y = 2 + res = np.zeros_like(x) + g(x, y, res) + print(res) + # res == array([2, 3, 4, 5, 6]) + # magictoken.ex_guvectorize_dynamic_call_four.end + + correct = np.array([2, 3, 4, 5, 6]) + np.testing.assert_array_equal(res, correct) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_interval_example.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_interval_example.py new file mode 100644 index 0000000000000000000000000000000000000000..78cf7935ef20a7b57452f7802b8069c3327f9eba --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_interval_example.py @@ -0,0 +1,242 @@ +""" +This test is used by `docs/source/extending/interval-example.rst`. + +The "magictoken" comments are used as markers for the beginning and ending of +example code. +""" +import unittest + + +class IntervalExampleTest(unittest.TestCase): + + def test_interval_class_usage(self): + # magictoken.interval_py_class.begin + class Interval(object): + """ + A half-open interval on the real number line. + """ + def __init__(self, lo, hi): + self.lo = lo + self.hi = hi + + def __repr__(self): + return 'Interval(%f, %f)' % (self.lo, self.hi) + + @property + def width(self): + return self.hi - self.lo + # magictoken.interval_py_class.end + + # magictoken.interval_type_class.begin + from numba import types + + class IntervalType(types.Type): + def __init__(self): + super(IntervalType, self).__init__(name='Interval') + + interval_type = IntervalType() + # magictoken.interval_type_class.end + + # magictoken.interval_typeof_register.begin + from numba.extending import typeof_impl + + @typeof_impl.register(Interval) + def typeof_index(val, c): + return interval_type + # magictoken.interval_typeof_register.end + + # magictoken.numba_type_register.begin + from numba.extending import as_numba_type + + as_numba_type.register(Interval, interval_type) + # magictoken.numba_type_register.end + + # magictoken.numba_type_callable.begin + from numba.extending import type_callable + + @type_callable(Interval) + def type_interval(context): + def typer(lo, hi): + if isinstance(lo, types.Float) and isinstance(hi, types.Float): + return interval_type + return typer + # magictoken.numba_type_callable.end + + # magictoken.interval_model.begin + from numba.extending import models, register_model + + @register_model(IntervalType) + class IntervalModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [('lo', types.float64), + ('hi', types.float64),] + models.StructModel.__init__(self, dmm, fe_type, members) + # magictoken.interval_model.end + + # magictoken.interval_attribute_wrapper.begin + from numba.extending import make_attribute_wrapper + + make_attribute_wrapper(IntervalType, 'lo', 'lo') + make_attribute_wrapper(IntervalType, 'hi', 'hi') + # magictoken.interval_attribute_wrapper.end + + # magictoken.interval_overload_attribute.begin + from numba.extending import overload_attribute + + @overload_attribute(IntervalType, "width") + def get_width(interval): + def getter(interval): + return interval.hi - interval.lo + return getter + # magictoken.interval_overload_attribute.end + + # magictoken.interval_lower_builtin.begin + from numba.extending import lower_builtin + from numba.core import cgutils + + @lower_builtin(Interval, types.Float, types.Float) + def impl_interval(context, builder, sig, args): + typ = sig.return_type + lo, hi = args + interval = cgutils.create_struct_proxy(typ)(context, builder) + interval.lo = lo + interval.hi = hi + return interval._getvalue() + # magictoken.interval_lower_builtin.end + + # magictoken.interval_unbox.begin + from numba.extending import unbox, NativeValue + from contextlib import ExitStack + + @unbox(IntervalType) + def unbox_interval(typ, obj, c): + """ + Convert a Interval object to a native interval structure. + """ + is_error_ptr = cgutils.alloca_once_value(c.builder, cgutils.false_bit) + interval = cgutils.create_struct_proxy(typ)(c.context, c.builder) + + with ExitStack() as stack: + lo_obj = c.pyapi.object_getattr_string(obj, "lo") + with cgutils.early_exit_if_null(c.builder, stack, lo_obj): + c.builder.store(cgutils.true_bit, is_error_ptr) + lo_native = c.unbox(types.float64, lo_obj) + c.pyapi.decref(lo_obj) + with cgutils.early_exit_if(c.builder, stack, lo_native.is_error): + c.builder.store(cgutils.true_bit, is_error_ptr) + + hi_obj = c.pyapi.object_getattr_string(obj, "hi") + with cgutils.early_exit_if_null(c.builder, stack, hi_obj): + c.builder.store(cgutils.true_bit, is_error_ptr) + hi_native = c.unbox(types.float64, hi_obj) + c.pyapi.decref(hi_obj) + with cgutils.early_exit_if(c.builder, stack, hi_native.is_error): + c.builder.store(cgutils.true_bit, is_error_ptr) + + interval.lo = lo_native.value + interval.hi = hi_native.value + + return NativeValue(interval._getvalue(), is_error=c.builder.load(is_error_ptr)) + # magictoken.interval_unbox.end + + # magictoken.interval_box.begin + from numba.extending import box + + @box(IntervalType) + def box_interval(typ, val, c): + """ + Convert a native interval structure to an Interval object. + """ + ret_ptr = cgutils.alloca_once(c.builder, c.pyapi.pyobj) + fail_obj = c.pyapi.get_null_object() + + with ExitStack() as stack: + interval = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) + lo_obj = c.box(types.float64, interval.lo) + with cgutils.early_exit_if_null(c.builder, stack, lo_obj): + c.builder.store(fail_obj, ret_ptr) + + hi_obj = c.box(types.float64, interval.hi) + with cgutils.early_exit_if_null(c.builder, stack, hi_obj): + c.pyapi.decref(lo_obj) + c.builder.store(fail_obj, ret_ptr) + + class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Interval)) + with cgutils.early_exit_if_null(c.builder, stack, class_obj): + c.pyapi.decref(lo_obj) + c.pyapi.decref(hi_obj) + c.builder.store(fail_obj, ret_ptr) + + # NOTE: The result of this call is not checked as the clean up + # has to occur regardless of whether it is successful. If it + # fails `res` is set to NULL and a Python exception is set. + res = c.pyapi.call_function_objargs(class_obj, (lo_obj, hi_obj)) + c.pyapi.decref(lo_obj) + c.pyapi.decref(hi_obj) + c.pyapi.decref(class_obj) + c.builder.store(res, ret_ptr) + + return c.builder.load(ret_ptr) + # magictoken.interval_box.end + + # magictoken.interval_usage.begin + from numba import njit + + @njit + def inside_interval(interval, x): + return interval.lo <= x < interval.hi + + @njit + def interval_width(interval): + return interval.width + + @njit + def sum_intervals(i, j): + return Interval(i.lo + j.lo, i.hi + j.hi) + # magictoken.interval_usage.end + + def check_equal_intervals(x, y): + self.assertIsInstance(x, Interval) + self.assertIsInstance(y, Interval) + self.assertEqual(x.lo, y.lo) + self.assertEqual(x.hi, y.hi) + + a = Interval(2, 3) + b = Interval(4, 5) + c = Interval(6, 8) + + # Test box-unbox + return_func = njit(lambda x: x) + check_equal_intervals(a, return_func(a)) + + # Test .width attribute + self.assertEqual(a.width, interval_width(a)) + + # Test exceptions + class NotAFloat: + def __float__(self): + raise RuntimeError("I am not a float") + + # TODO: This should produce a `RuntimeError`, but the `unbox` handler for `float` ignores + # the error raised by `__float__`, leading to a subsequent `TypeError` cause by passing + # `NULL` to `PyFloat_AsDouble`. + # This isn't the fault of the `Interval` extension that is being testing + # in this file. + with self.assertRaises(TypeError): + interval_width(Interval(2, NotAFloat())) + + bad_interval = Interval(1, 2) + del bad_interval.hi + + with self.assertRaises(AttributeError): + interval_width(bad_interval) + + # Test .lo and .hi usage + self.assertFalse(inside_interval(a, 5)) + + # Test native Interval constructor + check_equal_intervals(c, sum_intervals(a, b)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_jitclass.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_jitclass.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5faea423427b081cc49a9617e68f1bdb3936b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_jitclass.py @@ -0,0 +1,97 @@ +# Contents in this file are referenced from the sphinx-generated docs. +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import TestCase + + +class DocsJitclassUsageTest(TestCase): + + def test_ex_jitclass(self): + # magictoken.ex_jitclass.begin + import numpy as np + from numba import int32, float32 # import the types + from numba.experimental import jitclass + + spec = [ + ('value', int32), # a simple scalar field + ('array', float32[:]), # an array field + ] + + @jitclass(spec) + class Bag(object): + def __init__(self, value): + self.value = value + self.array = np.zeros(value, dtype=np.float32) + + @property + def size(self): + return self.array.size + + def increment(self, val): + for i in range(self.size): + self.array[i] += val + return self.array + + @staticmethod + def add(x, y): + return x + y + + n = 21 + mybag = Bag(n) + # magictoken.ex_jitclass.end + + self.assertTrue(isinstance(mybag, Bag)) + self.assertPreciseEqual(mybag.value, n) + np.testing.assert_allclose(mybag.array, np.zeros(n, dtype=np.float32)) + self.assertPreciseEqual(mybag.size, n) + np.testing.assert_allclose(mybag.increment(3), + 3 * np.ones(n, dtype=np.float32)) + np.testing.assert_allclose(mybag.increment(6), + 9 * np.ones(n, dtype=np.float32)) + self.assertPreciseEqual(mybag.add(1, 1), 2) + self.assertPreciseEqual(Bag.add(1, 2), 3) + + def test_ex_jitclass_type_hints(self): + # magictoken.ex_jitclass_type_hints.begin + from typing import List + from numba.experimental import jitclass + from numba.typed import List as NumbaList + + @jitclass + class Counter: + value: int + + def __init__(self): + self.value = 0 + + def get(self) -> int: + ret = self.value + self.value += 1 + return ret + + @jitclass + class ListLoopIterator: + counter: Counter + items: List[float] + + def __init__(self, items: List[float]): + self.items = items + self.counter = Counter() + + def get(self) -> float: + idx = self.counter.get() % len(self.items) + return self.items[idx] + + items = NumbaList([3.14, 2.718, 0.123, -4.]) + loop_itr = ListLoopIterator(items) + # magictoken.ex_jitclass_type_hints.end + + for idx in range(10): + self.assertEqual(loop_itr.counter.value, idx) + self.assertAlmostEqual(loop_itr.get(), items[idx % len(items)]) + self.assertEqual(loop_itr.counter.value, idx + 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_literal_container_usage.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_literal_container_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..4872faa4c0aa689e1a83a1b570a4fbed0d540964 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_literal_container_usage.py @@ -0,0 +1,161 @@ +# Contents in this file are referenced from the sphinx-generated docs. +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import captured_stdout +from numba import typed + + +class DocsLiteralContainerUsageTest(unittest.TestCase): + + def test_ex_literal_dict_compile_time_consts(self): + with captured_stdout(): + # magictoken.test_ex_literal_dict_compile_time_consts.begin + import numpy as np + from numba import njit, types + from numba.extending import overload + + # overload this function + def specialize(x): + pass + + @overload(specialize) + def ol_specialize(x): + ld = x.literal_value + const_expr = [] + for k, v in ld.items(): + if isinstance(v, types.Literal): + lv = v.literal_value + if lv == 'cat': + const_expr.append("Meow!") + elif lv == 'dog': + const_expr.append("Woof!") + elif isinstance(lv, int): + const_expr.append(k.literal_value * lv) + else: # it's an array + const_expr.append("Array(dim={dim}".format(dim=v.ndim)) + const_strings = tuple(const_expr) + + def impl(x): + return const_strings + return impl + + @njit + def foo(): + pets_ints_and_array = {'a': 1, + 'b': 2, + 'c': 'cat', + 'd': 'dog', + 'e': np.ones(5,)} + return specialize(pets_ints_and_array) + + result = foo() + print(result) # ('a', 'bb', 'Meow!', 'Woof!', 'Array(dim=1') + # magictoken.test_ex_literal_dict_compile_time_consts.end + + self.assertEqual(result, ('a', 'bb', 'Meow!', 'Woof!', 'Array(dim=1')) + + def test_ex_initial_value_dict_compile_time_consts(self): + with captured_stdout(): + # magictoken.test_ex_initial_value_dict_compile_time_consts.begin + from numba import njit, literally + from numba.extending import overload + + # overload this function + def specialize(x): + pass + + @overload(specialize) + def ol_specialize(x): + iv = x.initial_value + if iv is None: + return lambda x: literally(x) # Force literal dispatch + assert iv == {'a': 1, 'b': 2, 'c': 3} # INITIAL VALUE + return lambda x: literally(x) + + @njit + def foo(): + d = {'a': 1, 'b': 2, 'c': 3} + d['c'] = 20 # no impact on .initial_value + d['d'] = 30 # no impact on .initial_value + return specialize(d) + + result = foo() + print(result) # {a: 1, b: 2, c: 20, d: 30} # NOT INITIAL VALUE! + # magictoken.test_ex_initial_value_dict_compile_time_consts.end + + expected = typed.Dict() + for k, v in {'a': 1, 'b': 2, 'c': 20, 'd': 30}.items(): + expected[k] = v + self.assertEqual(result, expected) + + def test_ex_literal_list(self): + with captured_stdout(): + # magictoken.test_ex_literal_list.begin + from numba import njit + from numba.extending import overload + + # overload this function + def specialize(x): + pass + + @overload(specialize) + def ol_specialize(x): + l = x.literal_value + const_expr = [] + for v in l: + const_expr.append(str(v)) + const_strings = tuple(const_expr) + + def impl(x): + return const_strings + return impl + + @njit + def foo(): + const_list = ['a', 10, 1j, ['another', 'list']] + return specialize(const_list) + + result = foo() + print(result) # ('Literal[str](a)', 'Literal[int](10)', 'complex128', 'list(unicode_type)') # noqa E501 + # magictoken.test_ex_literal_list.end + + expected = ('Literal[str](a)', 'Literal[int](10)', 'complex128', + "list(unicode_type)") + self.assertEqual(result, expected) + + def test_ex_initial_value_list_compile_time_consts(self): + with captured_stdout(): + # magictoken.test_ex_initial_value_list_compile_time_consts.begin + from numba import njit, literally + from numba.extending import overload + + # overload this function + def specialize(x): + pass + + @overload(specialize) + def ol_specialize(x): + iv = x.initial_value + if iv is None: + return lambda x: literally(x) # Force literal dispatch + assert iv == [1, 2, 3] # INITIAL VALUE + return lambda x: x + + @njit + def foo(): + l = [1, 2, 3] + l[2] = 20 # no impact on .initial_value + l.append(30) # no impact on .initial_value + return specialize(l) + + result = foo() + print(result) # [1, 2, 20, 30] # NOT INITIAL VALUE! + # magictoken.test_ex_initial_value_list_compile_time_consts.end + + expected = [1, 2, 20, 30] + self.assertEqual(result, expected) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_literally_usage.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_literally_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe8b45d37d05e82cdaa47149f75395467e36655 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_literally_usage.py @@ -0,0 +1,59 @@ +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import captured_stdout + + +class DocsLiterallyUsageTest(unittest.TestCase): + + def test_literally_usage(self): + with captured_stdout() as stdout: + # magictoken.ex_literally_usage.begin + import numba + + def power(x, n): + raise NotImplementedError + + @numba.extending.overload(power) + def ov_power(x, n): + if isinstance(n, numba.types.Literal): + # only if `n` is a literal + if n.literal_value == 2: + # special case: square + print("square") + return lambda x, n: x * x + elif n.literal_value == 3: + # special case: cubic + print("cubic") + return lambda x, n: x * x * x + else: + # If `n` is not literal, request literal dispatch + return lambda x, n: numba.literally(n) + + print("generic") + return lambda x, n: x ** n + + @numba.njit + def test_power(x, n): + return power(x, n) + + # should print "square" and "9" + print(test_power(3, 2)) + + # should print "cubic" and "27" + print(test_power(3, 3)) + + # should print "generic" and "81" + print(test_power(3, 4)) + + # magictoken.ex_literally_usage.end + assert test_power(3, 2) == 3 ** 2 + assert test_power(3, 3) == 3 ** 3 + assert test_power(3, 4) == 3 ** 4 + + self.assertEqual('square\n9\ncubic\n27\ngeneric\n81\n', + stdout.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_llvm_pass_timings.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_llvm_pass_timings.py new file mode 100644 index 0000000000000000000000000000000000000000..2f607c847b7bc13eeafa800f9e2151fa59e4e826 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_llvm_pass_timings.py @@ -0,0 +1,31 @@ +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import captured_stdout, override_config + + +class DocsLLVMPassTimings(unittest.TestCase): + + def test_pass_timings(self): + with override_config('LLVM_PASS_TIMINGS', True): + with captured_stdout() as stdout: + # magictoken.ex_llvm_pass_timings.begin + import numba + + @numba.njit + def foo(n): + c = 0 + for i in range(n): + for j in range(i): + c += j + return c + + foo(10) + md = foo.get_metadata(foo.signatures[0]) + print(md['llvm_pass_timings']) + # magictoken.ex_llvm_pass_timings.end + self.assertIn("Finalize object", stdout.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_numpy_generators.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_numpy_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..e10266d07e03f4a38a969c20da7fe923742f0086 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_numpy_generators.py @@ -0,0 +1,38 @@ +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +import numpy as np +import numba + + +class NumpyGeneratorUsageTest(unittest.TestCase): + + def test_numpy_gen_usage(self): + # magictoken.npgen_usage.begin + x = np.random.default_rng(1) + y = np.random.default_rng(1) + + size = 10 + + @numba.njit + def do_stuff(gen): + return gen.random(size=int(size / 2)) + + original = x.random(size=size) + # [0.51182162 0.9504637 0.14415961 0.94864945 0.31183145 + # 0.42332645 0.82770259 0.40919914 0.54959369 0.02755911] + + numba_func_res = do_stuff(y) + # [0.51182162 0.9504637 0.14415961 0.94864945 0.31183145] + + after_numba = y.random(size=int(size / 2)) + # [0.42332645 0.82770259 0.40919914 0.54959369 0.02755911] + + # magictoken.npgen_usage.end + numba_res = np.concatenate((numba_func_res, after_numba)) + for _np_res, _nb_res in zip(original, numba_res): + self.assertEqual(_np_res, _nb_res) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_parallel_chunksize.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_parallel_chunksize.py new file mode 100644 index 0000000000000000000000000000000000000000..36e161a029bd3fa513ce118d551eba967de3ee5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_parallel_chunksize.py @@ -0,0 +1,122 @@ +# Contents in this file are referenced from the sphinx-generated docs. +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import captured_stdout, skip_parfors_unsupported +from numba import set_parallel_chunksize +from numba.tests.support import TestCase + + +@skip_parfors_unsupported +class ChunksizeExamplesTest(TestCase): + + _numba_parallel_test_ = False + + def setUp(self): + set_parallel_chunksize(0) + + def tearDown(self): + set_parallel_chunksize(0) + + def test_unbalanced_example(self): + with captured_stdout(): + # magictoken.ex_unbalanced.begin + from numba import (njit, + prange, + ) + import numpy as np + + @njit(parallel=True) + def func1(): + n = 100 + vals = np.empty(n) + # The work in each iteration of the following prange + # loop is proportional to its index. + for i in prange(n): + cur = i + 1 + for j in range(i): + if cur % 2 == 0: + cur //= 2 + else: + cur = cur * 3 + 1 + vals[i] = cur + return vals + + result = func1() + # magictoken.ex_unbalanced.end + self.assertPreciseEqual(result, func1.py_func()) + + def test_chunksize_manual(self): + with captured_stdout(): + # magictoken.ex_chunksize_manual.begin + from numba import (njit, + prange, + set_parallel_chunksize, + get_parallel_chunksize, + ) + + @njit(parallel=True) + def func1(n): + acc = 0 + print(get_parallel_chunksize()) # Will print 4. + for i in prange(n): + print(get_parallel_chunksize()) # Will print 0. + acc += i + print(get_parallel_chunksize()) # Will print 4. + return acc + + @njit(parallel=True) + def func2(n): + acc = 0 + # This version gets the previous chunksize explicitly. + old_chunksize = get_parallel_chunksize() + set_parallel_chunksize(8) + for i in prange(n): + acc += i + set_parallel_chunksize(old_chunksize) + return acc + + # This version saves the previous chunksize as returned + # by set_parallel_chunksize. + old_chunksize = set_parallel_chunksize(4) + result1 = func1(12) + result2 = func2(12) + result3 = func1(12) + set_parallel_chunksize(old_chunksize) + # magictoken.ex_chunksize_manual.end + self.assertPreciseEqual(result1, func1.py_func(12)) + self.assertPreciseEqual(result2, func2.py_func(12)) + self.assertPreciseEqual(result3, func1.py_func(12)) + + def test_chunksize_with(self): + with captured_stdout(): + # magictoken.ex_chunksize_with.begin + from numba import njit, prange, parallel_chunksize + + @njit(parallel=True) + def func1(n): + acc = 0 + for i in prange(n): + acc += i + return acc + + @njit(parallel=True) + def func2(n): + acc = 0 + with parallel_chunksize(8): + for i in prange(n): + acc += i + return acc + + with parallel_chunksize(4): + result1 = func1(12) + result2 = func2(12) + result3 = func1(12) + # magictoken.ex_chunksize_with.end + self.assertPreciseEqual(result1, func1.py_func(12)) + self.assertPreciseEqual(result2, func2.py_func(12)) + self.assertPreciseEqual(result3, func1.py_func(12)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_rec_array.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_rec_array.py new file mode 100644 index 0000000000000000000000000000000000000000..74f7d9dc3c8828a1f6a2471f1dcba7f4c495ab48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_rec_array.py @@ -0,0 +1,46 @@ +import unittest + + +class TestExample(unittest.TestCase): + + def test_documentation_example1(self): + # magictoken.ex_rec_arr_const_index.begin + import numpy as np + from numba import njit + + arr = np.array([(1, 2)], dtype=[('a1', 'f8'), ('a2', 'f8')]) + fields_gl = ('a1', 'a2') + + @njit + def get_field_sum(rec): + fields_lc = ('a1', 'a2') + field_name1 = fields_lc[0] + field_name2 = fields_gl[1] + return rec[field_name1] + rec[field_name2] + + get_field_sum(arr[0]) # returns 3 + # magictoken.ex_rec_arr_const_index.end + self.assertEqual(get_field_sum(arr[0]), 3) + + def test_documentation_example2(self): + # magictoken.ex_rec_arr_lit_unroll_index.begin + import numpy as np + from numba import njit, literal_unroll + + arr = np.array([(1, 2)], dtype=[('a1', 'f8'), ('a2', 'f8')]) + fields_gl = ('a1', 'a2') + + @njit + def get_field_sum(rec): + out = 0 + for f in literal_unroll(fields_gl): + out += rec[f] + return out + + get_field_sum(arr[0]) # returns 3 + # magictoken.ex_rec_arr_lit_unroll_index.end + self.assertEqual(get_field_sum(arr[0]), 3) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_structref_usage.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_structref_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..9634a9b0e3688b8ac8dc4fe7b36904fab3ec00d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_structref_usage.py @@ -0,0 +1,149 @@ +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest + +# magictoken.ex_structref_type_definition.begin +import numpy as np + +from numba import njit +from numba.core import types +from numba.experimental import structref + +from numba.tests.support import skip_unless_scipy + + +# Define a StructRef. +# `structref.register` associates the type with the default data model. +# This will also install getters and setters to the fields of +# the StructRef. +@structref.register +class MyStructType(types.StructRef): + def preprocess_fields(self, fields): + # This method is called by the type constructor for additional + # preprocessing on the fields. + # Here, we don't want the struct to take Literal types. + return tuple((name, types.unliteral(typ)) for name, typ in fields) + + +# Define a Python type that can be use as a proxy to the StructRef +# allocated inside Numba. Users can construct the StructRef via +# the constructor for this type in python code and jit-code. +class MyStruct(structref.StructRefProxy): + def __new__(cls, name, vector): + # Overriding the __new__ method is optional, doing so + # allows Python code to use keyword arguments, + # or add other customized behavior. + # The default __new__ takes `*args`. + # IMPORTANT: Users should not override __init__. + return structref.StructRefProxy.__new__(cls, name, vector) + + # By default, the proxy type does not reflect the attributes or + # methods to the Python side. It is up to users to define + # these. (This may be automated in the future.) + + @property + def name(self): + # To access a field, we can define a function that simply + # return the field in jit-code. + # The definition of MyStruct_get_name is shown later. + return MyStruct_get_name(self) + + @property + def vector(self): + # The definition of MyStruct_get_vector is shown later. + return MyStruct_get_vector(self) + + +@njit +def MyStruct_get_name(self): + # In jit-code, the StructRef's attribute is exposed via + # structref.register + return self.name + + +@njit +def MyStruct_get_vector(self): + return self.vector + + +# This associates the proxy with MyStructType for the given set of +# fields. Notice how we are not constraining the type of each field. +# Field types remain generic. +structref.define_proxy(MyStruct, MyStructType, ["name", "vector"]) +# magictoken.ex_structref_type_definition.end + + +@skip_unless_scipy +class TestStructRefUsage(unittest.TestCase): + def test_type_definition(self): + np.random.seed(0) + # Redirect print + buf = [] + + def print(*args): + buf.append(args) + + # magictoken.ex_structref_type_definition_test.begin + # Let's test our new StructRef. + + # Define one in Python + alice = MyStruct("Alice", vector=np.random.random(3)) + + # Define one in jit-code + @njit + def make_bob(): + bob = MyStruct("unnamed", vector=np.zeros(3)) + # Mutate the attributes + bob.name = "Bob" + bob.vector = np.random.random(3) + return bob + + bob = make_bob() + + # Out: Alice: [0.5488135 0.71518937 0.60276338] + print(f"{alice.name}: {alice.vector}") + # Out: Bob: [0.88325739 0.73527629 0.87746707] + print(f"{bob.name}: {bob.vector}") + + # Define a jit function to operate on the structs. + @njit + def distance(a, b): + return np.linalg.norm(a.vector - b.vector) + + # Out: 0.4332647200356598 + print(distance(alice, bob)) + # magictoken.ex_structref_type_definition_test.end + + self.assertEqual(len(buf), 3) + + def test_overload_method(self): + # magictoken.ex_structref_method.begin + from numba.core.extending import overload_method + from numba.core.errors import TypingError + + # Use @overload_method to add a method for + # MyStructType.distance(other) + # where *other* is an instance of MyStructType. + @overload_method(MyStructType, "distance") + def ol_distance(self, other): + # Guard that *other* is an instance of MyStructType + if not isinstance(other, MyStructType): + raise TypingError( + f"*other* must be a {MyStructType}; got {other}" + ) + + def impl(self, other): + return np.linalg.norm(self.vector - other.vector) + + return impl + + # Test + @njit + def test(): + alice = MyStruct("Alice", vector=np.random.random(3)) + bob = MyStruct("Bob", vector=np.random.random(3)) + # Use the method + return alice.distance(bob) + # magictoken.ex_structref_method.end + + self.assertIsInstance(test(), float) diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_typed_dict_usage.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_typed_dict_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..93880fc92f9e41ade41d0676f84abe74b33ee388 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_typed_dict_usage.py @@ -0,0 +1,111 @@ +# Contents in this file are referenced from the sphinx-generated docs. +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import captured_stdout + + +class DocsTypedDictUsageTest(unittest.TestCase): + + def test_ex_typed_dict_from_cpython(self): + with captured_stdout(): + # magictoken.ex_typed_dict_from_cpython.begin + import numpy as np + from numba import njit + from numba.core import types + from numba.typed import Dict + + # The Dict.empty() constructs a typed dictionary. + # The key and value typed must be explicitly declared. + d = Dict.empty( + key_type=types.unicode_type, + value_type=types.float64[:], + ) + + # The typed-dict can be used from the interpreter. + d['posx'] = np.asarray([1, 0.5, 2], dtype='f8') + d['posy'] = np.asarray([1.5, 3.5, 2], dtype='f8') + d['velx'] = np.asarray([0.5, 0, 0.7], dtype='f8') + d['vely'] = np.asarray([0.2, -0.2, 0.1], dtype='f8') + + # Here's a function that expects a typed-dict as the argument + @njit + def move(d): + # inplace operations on the arrays + d['posx'] += d['velx'] + d['posy'] += d['vely'] + + print('posx: ', d['posx']) # Out: posx: [1. 0.5 2. ] + print('posy: ', d['posy']) # Out: posy: [1.5 3.5 2. ] + + # Call move(d) to inplace update the arrays in the typed-dict. + move(d) + + print('posx: ', d['posx']) # Out: posx: [1.5 0.5 2.7] + print('posy: ', d['posy']) # Out: posy: [1.7 3.3 2.1] + # magictoken.ex_typed_dict_from_cpython.end + + # Test + np.testing.assert_array_equal(d['posx'], [1.5, 0.5, 2.7]) + np.testing.assert_array_equal(d['posy'], [1.7, 3.3, 2.1]) + + def test_ex_typed_dict_njit(self): + with captured_stdout(): + # magictoken.ex_typed_dict_njit.begin + import numpy as np + from numba import njit + from numba.core import types + from numba.typed import Dict + + # Make array type. Type-expression is not supported in jit + # functions. + float_array = types.float64[:] + + @njit + def foo(): + # Make dictionary + d = Dict.empty( + key_type=types.unicode_type, + value_type=float_array, + ) + # Fill the dictionary + d["posx"] = np.arange(3).astype(np.float64) + d["posy"] = np.arange(3, 6).astype(np.float64) + return d + + d = foo() + # Print the dictionary + print(d) # Out: {posx: [0. 1. 2.], posy: [3. 4. 5.]} + # magictoken.ex_typed_dict_njit.end + np.testing.assert_array_equal(d['posx'], [0, 1, 2]) + np.testing.assert_array_equal(d['posy'], [3, 4, 5]) + + def test_ex_inferred_dict_njit(self): + with captured_stdout(): + # magictoken.ex_inferred_dict_njit.begin + from numba import njit + import numpy as np + + @njit + def foo(): + d = dict() + k = {1: np.arange(1), 2: np.arange(2)} + # The following tells the compiler what the key type and the + # value + # type are for `d`. + d[3] = np.arange(3) + d[5] = np.arange(5) + return d, k + + d, k = foo() + print(d) # {3: [0 1 2], 5: [0 1 2 3 4]} + print(k) # {1: [0], 2: [0 1]} + # magictoken.ex_inferred_dict_njit.end + np.testing.assert_array_equal(d[3], [0, 1, 2]) + np.testing.assert_array_equal(d[5], [0, 1, 2, 3, 4]) + np.testing.assert_array_equal(k[1], [0]) + np.testing.assert_array_equal(k[2], [0, 1]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_typed_list_usage.py b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_typed_list_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..1d804625ecc69912af75a908bb1bc6432918b009 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doc_examples/test_typed_list_usage.py @@ -0,0 +1,95 @@ +# Contents in this file are referenced from the sphinx-generated docs. +# "magictoken" is used for markers as beginning and ending of example text. + +import unittest +from numba.tests.support import captured_stdout + + +class DocsTypedListUsageTest(unittest.TestCase): + + def test_ex_inferred_list_jit(self): + with captured_stdout(): + + # magictoken.ex_inferred_list_jit.begin + from numba import njit + from numba.typed import List + + @njit + def foo(): + # Instantiate a typed-list + l = List() + # Append a value to it, this will set the type to int32/int64 + # (depending on platform) + l.append(42) + # The usual list operations, getitem, pop and length are + # supported + print(l[0]) # 42 + l[0] = 23 + print(l[0]) # 23 + print(len(l)) # 1 + l.pop() + print(len(l)) # 0 + return l + + foo() + + # magictoken.ex_inferred_list_jit.end + + def test_ex_inferred_list(self): + with captured_stdout(): + # magictoken.ex_inferred_list.begin + from numba import njit + from numba.typed import List + + @njit + def foo(mylist): + for i in range(10, 20): + mylist.append(i) + return mylist + + # Instantiate a typed-list, outside of a jit context + l = List() + # Append a value to it, this will set the type to int32/int64 + # (depending on platform) + l.append(42) + # The usual list operations, getitem, pop and length are supported + print(l[0]) # 42 + l[0] = 23 + print(l[0]) # 23 + print(len(l)) # 1 + l.pop() + print(len(l)) # 0 + + # And you can use the typed-list as an argument for a jit compiled + # function + l = foo(l) + print(len(l)) # 10 + + # You can also directly construct a typed-list from an existing + # Python list + py_list = [2, 3, 5] + numba_list = List(py_list) + print(len(numba_list)) # 3 + + # magictoken.ex_inferred_list.end + + def test_ex_nested_list(self): + with captured_stdout(): + # magictoken.ex_nested_list.begin + from numba.typed import List + + # typed-lists can be nested in typed-lists + mylist = List() + for i in range(10): + l = List() + for i in range(10): + l.append(i) + mylist.append(l) + # mylist is now a list of 10 lists, each containing 10 integers + print(mylist) + + # magictoken.ex_nested_list.end + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/doctest_usecase.py b/venv/lib/python3.10/site-packages/numba/tests/doctest_usecase.py new file mode 100644 index 0000000000000000000000000000000000000000..1761ba08a47ff67b7285b081659e0e474e81b0a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/doctest_usecase.py @@ -0,0 +1,31 @@ +""" +Test that all docstrings are the same: + +>>> len({f.__doc__ for f in (a, b, c, d)}) +1 +""" +from numba import guvectorize, int64, njit, vectorize + + +def a(): + """>>> x = 1""" + return 1 + + +@njit +def b(): + """>>> x = 1""" + return 1 + + +@guvectorize([(int64[:], int64, int64[:])], "(n),()->(n)") +def c(x, y, res): + """>>> x = 1""" + for i in range(x.shape[0]): + res[i] = x[i] + y + + +@vectorize([int64(int64, int64)]) +def d(x, y): + """>>> x = 1""" + return x + y diff --git a/venv/lib/python3.10/site-packages/numba/tests/dummy_module.py b/venv/lib/python3.10/site-packages/numba/tests/dummy_module.py new file mode 100644 index 0000000000000000000000000000000000000000..4152f3ff502021c9ed80bf147ca7bfe89a1f11ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/dummy_module.py @@ -0,0 +1,4 @@ +'''Dummy module''' + +def function(): + '''Do nothing''' diff --git a/venv/lib/python3.10/site-packages/numba/tests/enum_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/enum_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..995dbd917afb0abb2e48185ecb14adef692e2489 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/enum_usecases.py @@ -0,0 +1,55 @@ +from enum import Enum, IntEnum + + +class Color(Enum): + red = 1 + green = 2 + blue = 3 + + +class Shake(Enum): + vanilla = 7 + chocolate = 4 + cookies = 9 + # Same as Color.blue + mint = 3 + + +class Planet(Enum): + MERCURY = (3.303e+23, 2.4397e6) + VENUS = (4.869e+24, 6.0518e6) + EARTH = (5.976e+24, 6.37814e6) + MARS = (6.421e+23, 3.3972e6) + JUPITER = (1.9e+27, 7.1492e7) + SATURN = (5.688e+26, 6.0268e7) + URANUS = (8.686e+25, 2.5559e7) + NEPTUNE = (1.024e+26, 2.4746e7) + + +class HeterogeneousEnum(Enum): + red = 1.0 + green = 2.0 + blue = 3j + + +class Shape(IntEnum): + # Same as Color.green + circle = 2 + # Same as RequestError.internal_error + square = 500 + + +class RequestError(IntEnum): + dummy = 2 + not_found = 404 + internal_error = 500 + +class IntEnumWithNegatives(IntEnum): + # Used for testing of hash, need to make sure -1 -> -2 to comply with CPy + one = 1 + two = 2 + too = 2 + three = 3 + negone = -1 + negtwo = -2 + negthree = -3 diff --git a/venv/lib/python3.10/site-packages/numba/tests/error_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/error_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..c504a11c7fde9486a7aef5f2e38346e33750cbcf --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/error_usecases.py @@ -0,0 +1,6 @@ +import numba as nb + + +@nb.jit(nopython=True, parallel=True) +def foo(): + pass diff --git a/venv/lib/python3.10/site-packages/numba/tests/errorhandling_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/errorhandling_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..24e9c3fbc47f1a5722a9d9680fbf4694f0efdbba --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/errorhandling_usecases.py @@ -0,0 +1,14 @@ +from numba import typed, int64 + +# used in TestMiscErrorHandling::test_handling_of_write_to_*_global +_global_list = [1, 2, 3, 4] + +_global_dict = typed.Dict.empty(int64, int64) + + +def global_reflected_write(): + _global_list[0] = 10 + + +def global_dict_write(): + _global_dict[0] = 10 diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__init__.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2958441fe7a098eeb66fe183b62bd4ff0edbbe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/__init__.py @@ -0,0 +1,10 @@ +from os.path import dirname +import unittest +from unittest.suite import TestSuite + +from numba.testing import load_testsuite + +def load_tests(loader, tests, pattern): + suite = TestSuite() + suite.addTests(load_testsuite(loader, dirname(__file__))) + return suite diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0be505da91fb66dd657616591a2c2cd1684c16cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_array_arg.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_array_arg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d74a03bf061b5741773c354cea543aaefc00d9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_array_arg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8fa39b9e7e1f20361e73798cbd2dacf6c75c8db Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_break_on_symbol.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_break_on_symbol.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb7e942d3f525e337db1f481c19f2231bfce0799 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_break_on_symbol.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_break_on_symbol_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_break_on_symbol_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15ffcb932da1e68f19c211619e84cc28fb066154 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_break_on_symbol_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_conditional_breakpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_conditional_breakpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4f5e9fa0654b4aabf4a8fe011a7e839a56f390d Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_conditional_breakpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_pretty_print.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_pretty_print.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b30568be7bfb5e0a84210880c1a215bf254dfc14 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/gdb/__pycache__/test_pretty_print.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/test_array_arg.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_array_arg.py new file mode 100644 index 0000000000000000000000000000000000000000..cb48dae2c536a7b1e2568244bf2058ce934238f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_array_arg.py @@ -0,0 +1,51 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit, types +import numpy as np +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@needs_subprocess +class Test(TestCase): + + def test(self): + @njit(debug=True) + def foo(x): + z = np.ones_like(x) # break here + return x, z + + tmp = np.ones(5) + foo(tmp) + + driver = GdbMIDriver(__file__) + driver.set_breakpoint(line=15) + driver.run() + driver.check_hit_breakpoint(1) + driver.stack_list_arguments(2) + llvm_intp = f"i{types.intp.bitwidth}" + expect = ( + '[frame={level="0",args=[{name="x",type="array(float64, 1d, C) ' + f'({{i8*, i8*, {llvm_intp}, {llvm_intp}, double*, ' + f'[1 x {llvm_intp}], [1 x {llvm_intp}]}})"}}]}}]' + ) + driver.assert_output(expect) + driver.stack_list_variables(1) + # 'z' should be zero-init + expect = ('{name="z",value="{meminfo = 0x0, parent = 0x0, nitems = 0, ' + 'itemsize = 0, data = 0x0, shape = {0}, strides = {0}}"}') + driver.assert_output(expect) + driver.set_breakpoint(line=16) + driver.cont() + driver.check_hit_breakpoint(2) + driver.stack_list_variables(1) + # 'z' should be populated + expect = (r'^.*\{name="z",value="\{meminfo = 0x[0-9a-f]+ .*, ' + r'parent = 0x0, nitems = 5, itemsize = 8, ' + r'data = 0x[0-9a-f]+, shape = \{5\}, strides = \{8\}\}.*$') + driver.assert_regex_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/test_basic.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..2a28d6f7468221098ac56e41cb221f704b27a0be --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_basic.py @@ -0,0 +1,39 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit, types +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@needs_subprocess +class Test(TestCase): + + def test(self): + @njit(debug=True) + def foo(x): + z = 7 + x # break here + return x, z + + foo(120) + + sz = types.intp.bitwidth + driver = GdbMIDriver(__file__) + driver.set_breakpoint(line=14) + driver.run() + driver.check_hit_breakpoint(1) + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="int%s",' + 'value="120"}]}]' % sz) + driver.assert_output(expect) + driver.stack_list_variables(1) + expect = '[{name="x",arg="1",value="120"},{name="z",value="0"}]' + driver.assert_output(expect) + driver.next() + driver.stack_list_variables(1) + expect = '[{name="x",arg="1",value="120"},{name="z",value="127"}]' + driver.assert_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol.py new file mode 100644 index 0000000000000000000000000000000000000000..7743aafc3bebc87db5257452c34ada5b1cf5b682 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol.py @@ -0,0 +1,34 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit, types +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@njit(debug=True) +def foo(x): + z = 7 + x + return x, z + + +@needs_subprocess +class Test(TestCase): + + def test(self): + foo(120) + sz = types.intp.bitwidth + driver = GdbMIDriver(__file__) + driver.set_breakpoint(symbol="__main__::foo") + driver.run() # will hit cpython symbol match + driver.check_hit_breakpoint(number=1) + driver.cont() # will hit njit symbol match + driver.check_hit_breakpoint(number=1, line=10) # Ensure line number + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="int%s",' + 'value="120"}]}]' % sz) + driver.assert_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol_version.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol_version.py new file mode 100644 index 0000000000000000000000000000000000000000..72f09b19ab11f11b3d6bfb68cefec554a6e9f29e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_break_on_symbol_version.py @@ -0,0 +1,65 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +def foo_factory(n): + @njit(debug=True) + def foo(x): + z = 7 + n + return x, z + + return foo + + +foo1, foo2, foo3 = [foo_factory(x) for x in range(3)] + + +@njit(debug=True) +def call_foo(): + a = foo1(10) + b = foo2(20) + c = foo3(30) + return a, b, c + + +@needs_subprocess +class Test(TestCase): + + def test(self): + call_foo() + driver = GdbMIDriver(__file__) + # A specific foo, the first one, it has uid=2 + vsym = "__main__::foo_factory::_3clocals_3e::foo[abi:v2]" + driver.set_breakpoint(symbol=vsym) + driver.run() + driver.check_hit_breakpoint(number=1) + driver.assert_regex_output(r'^.*foo\[abi:v2\].*line="11"') + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="Literal[int](10)",' + 'value="10"}]}]') + driver.assert_output(expect) + # Now break on any foo + driver.set_breakpoint(symbol="foo") + driver.cont() + driver.check_hit_breakpoint(number=2) + driver.assert_regex_output(r'^.*foo\[abi:v3\].*line="11"') + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="Literal[int](20)",' + 'value="20"}]}]') + driver.assert_output(expect) + # and again, hit the third foo + driver.cont() + driver.check_hit_breakpoint(number=2) + driver.assert_regex_output(r'^.*foo\[abi:v4\].*line="11"') + driver.stack_list_arguments(2) + expect = ('[frame={level="0",args=[{name="x",type="Literal[int](30)",' + 'value="30"}]}]') + driver.assert_output(expect) + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/test_conditional_breakpoint.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_conditional_breakpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..867a53eb16b82cb0fbd5e03a8e833afd3640bfcf --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_conditional_breakpoint.py @@ -0,0 +1,45 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit +from numba.tests.gdb_support import GdbMIDriver +from numba.tests.support import TestCase, needs_subprocess +import unittest + + +@needs_subprocess +class Test(TestCase): + + def test(self): + + @njit(debug=True) + def foo(x, y): + c = x + y # break-here + return c + + @njit(debug=True) + def call_foo(a): + acc = 0 + for i in range(10): + acc += foo(i, a) + return acc + + call_foo(10) + + driver = GdbMIDriver(__file__) + driver.set_breakpoint(line=15, condition='x == 4') + driver.run() + driver.check_hit_breakpoint(1) + driver.stack_list_arguments(1) + expect = ('[frame={level="0",args=[{name="x",value="4"},' + '{name="y",value="10"}]}]') + driver.assert_output(expect) + driver.set_breakpoint(line=22, condition='i == 8') + driver.cont() + driver.check_hit_breakpoint(2) + driver.stack_list_variables(1) + # i should be 8 + driver.assert_output('{name="i",value="8"}') + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb/test_pretty_print.py b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_pretty_print.py new file mode 100644 index 0000000000000000000000000000000000000000..b0be5dbe8e4fcb78e2c1edafc679a4ebe3b1bbee --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb/test_pretty_print.py @@ -0,0 +1,69 @@ +# NOTE: This test is sensitive to line numbers as it checks breakpoints +from numba import njit +import numpy as np +from numba.tests.gdb_support import GdbMIDriver, needs_gdb_py3 +from numba.tests.support import TestCase, needs_subprocess +from numba.misc.numba_gdbinfo import collect_gdbinfo +import unittest +import re + + +@needs_gdb_py3 +@needs_subprocess +class Test(TestCase): + + def test(self): + rdt_a = np.dtype([("x", np.int16), ("y", np.float64)], align=True) + + @njit(debug=True) + def foo(): + a = 1.234 + b = (1, 2, 3) + c = ('a', b, 4) + d = np.arange(5.) + e = np.array([[1, 3j], [2, 4j]]) + f = "Some string" + " L-Padded string".lstrip() + g = 11 + 22j + h = np.arange(24).reshape((4, 6))[::2, ::3] + i = np.zeros(2, dtype=rdt_a) + return a, b, c, d, e, f, g, h, i + + foo() + + extension = collect_gdbinfo().extension_loc + driver = GdbMIDriver(__file__, init_cmds=['-x', extension], debug=False) + driver.set_breakpoint(line=29) + driver.run() + driver.check_hit_breakpoint(1) + + # Ideally the function would be run to get the string repr of locals + # but not everything appears in DWARF e.g. string literals. Further, + # str on NumPy arrays seems to vary a bit in output. Therefore a custom + # match is used. + + driver.stack_list_variables(1) + output = driver._captured.after.decode('UTF-8') + done_str = output.splitlines()[0] + pat = r'^\^done,variables=\[\{(.*)\}\]$' + lcls_strs = re.match(pat, done_str).groups()[0].split('},{') + lcls = {k: v for k, v in [re.match(r'name="(.*)",value="(.*)"', + x).groups() for x in lcls_strs]} + expected = dict() + expected['a'] = r'1\.234' + expected['b'] = r'\(1, 2, 3\)' + expected['c'] = r'\(0x0, \(1, 2, 3\), 4\)' + expected['d'] = r'\\n\[0. 1. 2. 3. 4.\]' + expected['e'] = r'\\n\[\[1.\+0.j 0.\+3.j\]\\n \[2.\+0.j 0.\+4.j\]\]' + expected['f'] = "'Some stringL-Padded string'" + expected['g'] = r"11\+22j" + expected['h'] = r'\\n\[\[ 0 3\]\\n \[12 15\]\]' + expected['i'] = r'\\n\[\(0, 0.\) \(0, 0.\)\]' + + for k, v in expected.items(): + self.assertRegex(lcls[k], v) + + driver.quit() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/gdb_support.py b/venv/lib/python3.10/site-packages/numba/tests/gdb_support.py new file mode 100644 index 0000000000000000000000000000000000000000..7a40bea0388a1468becd36474efede4f800bd790 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/gdb_support.py @@ -0,0 +1,197 @@ +"""Helpers for running gdb related testing""" +import os +import re +import sys +import unittest +from numba.core import config +from numba.misc.gdb_hook import _confirm_gdb +from numba.misc.numba_gdbinfo import collect_gdbinfo + +# check if gdb is present and working +try: + _confirm_gdb(need_ptrace_attach=False) # The driver launches as `gdb EXE`. + _HAVE_GDB = True + _gdb_info = collect_gdbinfo() + _GDB_HAS_PY3 = _gdb_info.py_ver.startswith('3') +except Exception: + _HAVE_GDB = False + _GDB_HAS_PY3 = False + +_msg = "functioning gdb with correct ptrace permissions is required" +needs_gdb = unittest.skipUnless(_HAVE_GDB, _msg) + +_msg = "gdb with python 3 support needed" +needs_gdb_py3 = unittest.skipUnless(_GDB_HAS_PY3, _msg) + + +try: + import pexpect + _HAVE_PEXPECT = True +except ImportError: + _HAVE_PEXPECT = False + + +_msg = "pexpect module needed for test" +skip_unless_pexpect = unittest.skipUnless(_HAVE_PEXPECT, _msg) + + +class GdbMIDriver(object): + """ + Driver class for the GDB machine interface: + https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html + """ + def __init__(self, file_name, debug=False, timeout=120, init_cmds=None): + if not _HAVE_PEXPECT: + msg = ("This driver requires the pexpect module. This can be " + "obtained via:\n\n$ conda install pexpect") + raise RuntimeError(msg) + if not _HAVE_GDB: + msg = ("This driver requires a gdb binary. This can be " + "obtained via the system package manager.") + raise RuntimeError(msg) + self._gdb_binary = config.GDB_BINARY + self._python = sys.executable + self._debug = debug + self._file_name = file_name + self._timeout = timeout + self._init_cmds = init_cmds + self._drive() + + def _drive(self): + """This function sets up the caputured gdb instance""" + assert os.path.isfile(self._file_name) + cmd = [self._gdb_binary, '--interpreter', 'mi'] + if self._init_cmds is not None: + cmd += list(self._init_cmds) + cmd += ['--args', self._python, self._file_name] + self._captured = pexpect.spawn(' '.join(cmd)) + if self._debug: + self._captured.logfile = sys.stdout.buffer + + def supports_python(self): + """Returns True if the underlying gdb implementation has python support + False otherwise""" + return "python" in self.list_features() + + def supports_numpy(self): + """Returns True if the underlying gdb implementation has NumPy support + (and by extension Python support) False otherwise""" + if not self.supports_python(): + return False + # Some gdb's have python 2! + cmd = ('python from __future__ import print_function;' + 'import numpy; print(numpy)') + self.interpreter_exec('console', cmd) + return "module \'numpy\' from" in self._captured.before.decode() + + def _captured_expect(self, expect): + try: + self._captured.expect(expect, timeout=self._timeout) + except pexpect.exceptions.TIMEOUT as e: + msg = f"Expected value did not arrive: {expect}." + raise ValueError(msg) from e + + def assert_output(self, expected): + """Asserts that the current output string contains the expected.""" + output = self._captured.after + decoded = output.decode('utf-8') + assert expected in decoded, f'decoded={decoded}\nexpected={expected})' + + def assert_regex_output(self, expected): + """Asserts that the current output string contains the expected + regex.""" + output = self._captured.after + decoded = output.decode('utf-8') + done_str = decoded.splitlines()[0] + found = re.match(expected, done_str) + assert found, f'decoded={decoded}\nexpected={expected})' + + def _run_command(self, command, expect=''): + self._captured.sendline(command) + self._captured_expect(expect) + + def run(self): + """gdb command ~= 'run'""" + self._run_command('-exec-run', expect=r'\^running.*\r\n') + + def cont(self): + """gdb command ~= 'continue'""" + self._run_command('-exec-continue', expect=r'\^running.*\r\n') + + def quit(self): + """gdb command ~= 'quit'""" + self._run_command('-gdb-exit', expect=r'-gdb-exit') + self._captured.terminate() + + def next(self): + """gdb command ~= 'next'""" + self._run_command('-exec-next', expect=r'\*stopped,.*\r\n') + + def step(self): + """gdb command ~= 'step'""" + self._run_command('-exec-step', expect=r'\*stopped,.*\r\n') + + def set_breakpoint(self, line=None, symbol=None, condition=None): + """gdb command ~= 'break'""" + if line is not None and symbol is not None: + raise ValueError("Can only supply one of line or symbol") + bp = '-break-insert ' + if condition is not None: + bp += f'-c "{condition}" ' + if line is not None: + assert isinstance(line, int) + bp += f'-f {self._file_name}:{line} ' + if symbol is not None: + assert isinstance(symbol, str) + bp += f'-f {symbol} ' + self._run_command(bp, expect=r'\^done') + + def check_hit_breakpoint(self, number=None, line=None): + """Checks that a breakpoint has been hit""" + self._captured_expect(r'\*stopped,.*\r\n') + self.assert_output('*stopped,reason="breakpoint-hit",') + if number is not None: + assert isinstance(number, int) + self.assert_output(f'bkptno="{number}"') + if line is not None: + assert isinstance(line, int) + self.assert_output(f'line="{line}"') + + def stack_list_arguments(self, print_values=1, low_frame=0, high_frame=0): + """gdb command ~= 'info args'""" + for x in (print_values, low_frame, high_frame): + assert isinstance(x, int) and x in (0, 1, 2) + cmd = f'-stack-list-arguments {print_values} {low_frame} {high_frame}' + self._run_command(cmd, expect=r'\^done,.*\r\n') + + def stack_list_variables(self, print_values=1): + """gdb command ~= 'info locals'""" + assert isinstance(print_values, int) and print_values in (0, 1, 2) + cmd = f'-stack-list-variables {print_values}' + self._run_command(cmd, expect=r'\^done,.*\r\n') + + def interpreter_exec(self, interpreter=None, command=None): + """gdb command ~= 'interpreter-exec'""" + if interpreter is None: + raise ValueError("interpreter cannot be None") + if command is None: + raise ValueError("command cannot be None") + cmd = f'-interpreter-exec {interpreter} "{command}"' + self._run_command(cmd, expect=r'\^(done|error).*\r\n') # NOTE no `,` + + def _list_features_raw(self): + cmd = '-list-features' + self._run_command(cmd, expect=r'\^done,.*\r\n') + + def list_features(self): + """No equivalent gdb command? Returns a list of supported gdb + features. + """ + self._list_features_raw() + output = self._captured.after + decoded = output.decode('utf-8') + m = re.match('.*features=\\[(.*)\\].*', decoded) + assert m is not None, "No match found for features string" + g = m.groups() + assert len(g) == 1, "Invalid number of match groups found" + return g[0].replace('"', '').split(',') diff --git a/venv/lib/python3.10/site-packages/numba/tests/inlining_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/inlining_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..663b7d3c4798a2add0689a3d52a3c82e034c81e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/inlining_usecases.py @@ -0,0 +1,69 @@ +""" Test cases for inlining IR from another module """ +from numba import jit, njit +from numba.core import types +from numba.core.extending import overload + +_GLOBAL1 = 100 + + +@njit(inline='always') +def bar(): + return _GLOBAL1 + 10 + + +def baz_factory(a): + b = 17 + a + + @njit(inline='always') + def baz(): + return _GLOBAL1 + a - b + return baz + + +def baz(): + return _GLOBAL1 + 10 + + +@overload(baz, inline='always') +def baz_ol(): + def impl(): + return _GLOBAL1 + 10 + return impl + + +def bop_factory(a): + b = 17 + a + + def bop(): + return _GLOBAL1 + a - b + + @overload(bop, inline='always') + def baz(): + def impl(): + return _GLOBAL1 + a - b + return impl + + return bop + + +@jit((types.int32,), nopython=True) +def inner(a): + return a + 1 + + +@jit((types.int32,), nopython=True) +def more(a): + return inner(inner(a)) + + +def outer_simple(a): + return inner(a) * 2 + + +def outer_multiple(a): + return inner(a) * more(a) + + +@njit +def __dummy__(): + return diff --git a/venv/lib/python3.10/site-packages/numba/tests/matmul_usecase.py b/venv/lib/python3.10/site-packages/numba/tests/matmul_usecase.py new file mode 100644 index 0000000000000000000000000000000000000000..84af4ef308e67c07190325e22fc1ca01b3c9d874 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/matmul_usecase.py @@ -0,0 +1,24 @@ +"""Use cases for testing matmul (@) +""" +def matmul_usecase(x, y): + return x @ y + +def imatmul_usecase(x, y): + x @= y + return x + +class DumbMatrix(object): + + def __init__(self, value): + self.value = value + + def __matmul__(self, other): + if isinstance(other, DumbMatrix): + return DumbMatrix(self.value * other.value) + return NotImplemented + + def __imatmul__(self, other): + if isinstance(other, DumbMatrix): + self.value *= other.value + return self + return NotImplemented diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__init__.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2958441fe7a098eeb66fe183b62bd4ff0edbbe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__init__.py @@ -0,0 +1,10 @@ +from os.path import dirname +import unittest +from unittest.suite import TestSuite + +from numba.testing import load_testsuite + +def load_tests(loader, tests, pattern): + suite = TestSuite() + suite.addTests(load_testsuite(loader, dirname(__file__))) + return suite diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e22221f663f4433260d52d942b3f97823e2e00ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/cache_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/cache_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c0063cbc8884db2598a97dd564c7dbdbfdb9963 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/cache_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_caching.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_caching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0e0605f33b20498c31e4e12e97ab78a12d3cc3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_caching.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_dufunc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_dufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c49421ec98b74fb44dca98f7d1bd247ba2765bae Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_dufunc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_errors.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ada9caf372c6ac4c895372304a4aab8039c9107e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_errors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_gufunc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_gufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9b7f063914c29802286abbd39002297eaa2553c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_gufunc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_env_variable.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_env_variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d01b35faf155c340046acf6ec0b95c6928214571 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_env_variable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_low_work.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_low_work.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab213645342a5218c33e3cea87451112caea010c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_low_work.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_ufunc_issues.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_ufunc_issues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19d1fbeec3d4378f243d0a34a1f79c076981a32e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_parallel_ufunc_issues.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_ufunc.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_ufunc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b0839b21a9793d5d430e0cf874ccc4991753646 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_ufunc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_ufuncbuilding.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_ufuncbuilding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d701849d00cc2dd0105c7dfcf5f74ddae366435 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_ufuncbuilding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_update_inplace.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_update_inplace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aa84424b35a546bf91d73001bdff90c15bf6b5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_update_inplace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_vectorize_decor.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_vectorize_decor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..239fa3d3d7a8c48fb7c8f9097d4be04498d22424 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/test_vectorize_decor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/ufuncbuilding_usecases.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/ufuncbuilding_usecases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef41d947348c07221e11ec424ffde4dc4aae1966 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/__pycache__/ufuncbuilding_usecases.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/cache_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/cache_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..4d250756d404b1e87115e4a191768925e5c2bc21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/cache_usecases.py @@ -0,0 +1,76 @@ +import numba as nb + + +# +# UFunc +# + +def direct_ufunc_cache_usecase(**kwargs): + @nb.vectorize(["intp(intp)", "float64(float64)"], cache=True, **kwargs) + def ufunc(inp): + return inp * 2 + + return ufunc + + +def indirect_ufunc_cache_usecase(**kwargs): + @nb.njit(cache=True) + def indirect_ufunc_core(inp): + return inp * 3 + + @nb.vectorize(["intp(intp)", "float64(float64)", "complex64(complex64)"], + **kwargs) + def ufunc(inp): + return indirect_ufunc_core(inp) + + return ufunc + + +# +# DUFunc +# + +def direct_dufunc_cache_usecase(**kwargs): + @nb.vectorize(cache=True, **kwargs) + def ufunc(inp): + return inp * 2 + + return ufunc + + +def indirect_dufunc_cache_usecase(**kwargs): + @nb.njit(cache=True) + def indirect_ufunc_core(inp): + return inp * 3 + + @nb.vectorize(**kwargs) + def ufunc(inp): + return indirect_ufunc_core(inp) + + return ufunc + + +# +# GUFunc +# + +def direct_gufunc_cache_usecase(**kwargs): + @nb.guvectorize(["(intp, intp[:])", "(float64, float64[:])"], + "()->()", cache=True, **kwargs) + def gufunc(inp, out): + out[0] = inp * 2 + + return gufunc + + +def indirect_gufunc_cache_usecase(**kwargs): + @nb.njit(cache=True) + def core(x): + return x * 3 + + @nb.guvectorize(["(intp, intp[:])", "(float64, float64[:])", + "(complex64, complex64[:])"], "()->()", **kwargs) + def gufunc(inp, out): + out[0] = core(inp) + + return gufunc diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_caching.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_caching.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb47f113211721bdeea9ea2327f811833706e9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_caching.py @@ -0,0 +1,228 @@ +import sys +import os.path +import re +import subprocess + +import numpy as np + +from numba.tests.support import capture_cache_log +from numba.tests.test_caching import BaseCacheTest +from numba.core import config +import unittest + + +class UfuncCacheTest(BaseCacheTest): + """ + Since the cache stats is not exposed by ufunc, we test by looking at the + cache debug log. + """ + _numba_parallel_test_ = False + + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "cache_usecases.py") + modname = "ufunc_caching_test_fodder" + + regex_data_saved = re.compile(r'\[cache\] data saved to') + regex_index_saved = re.compile(r'\[cache\] index saved to') + + regex_data_loaded = re.compile(r'\[cache\] data loaded from') + regex_index_loaded = re.compile(r'\[cache\] index loaded from') + + def check_cache_saved(self, cachelog, count): + """ + Check number of cache-save were issued + """ + data_saved = self.regex_data_saved.findall(cachelog) + index_saved = self.regex_index_saved.findall(cachelog) + self.assertEqual(len(data_saved), count) + self.assertEqual(len(index_saved), count) + + def check_cache_loaded(self, cachelog, count): + """ + Check number of cache-load were issued + """ + data_loaded = self.regex_data_loaded.findall(cachelog) + index_loaded = self.regex_index_loaded.findall(cachelog) + self.assertEqual(len(data_loaded), count) + self.assertEqual(len(index_loaded), count) + + def check_ufunc_cache(self, usecase_name, n_overloads, **kwargs): + """ + Check number of cache load/save. + There should be one per overloaded version. + """ + mod = self.import_module() + usecase = getattr(mod, usecase_name) + # New cache entry saved + with capture_cache_log() as out: + new_ufunc = usecase(**kwargs) + cachelog = out.getvalue() + self.check_cache_saved(cachelog, count=n_overloads) + + # Use cached version + with capture_cache_log() as out: + cached_ufunc = usecase(**kwargs) + cachelog = out.getvalue() + self.check_cache_loaded(cachelog, count=n_overloads) + + return new_ufunc, cached_ufunc + + +class TestUfuncCacheTest(UfuncCacheTest): + + def test_direct_ufunc_cache(self, **kwargs): + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "direct_ufunc_cache_usecase", n_overloads=2, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_direct_ufunc_cache_objmode(self): + self.test_direct_ufunc_cache(forceobj=True) + + def test_direct_ufunc_cache_parallel(self): + self.test_direct_ufunc_cache(target='parallel') + + def test_indirect_ufunc_cache(self, **kwargs): + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "indirect_ufunc_cache_usecase", n_overloads=3, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_indirect_ufunc_cache_parallel(self): + self.test_indirect_ufunc_cache(target='parallel') + + +class TestDUfuncCacheTest(UfuncCacheTest): + # Note: DUFunc doesn't support parallel target yet + + def check_dufunc_usecase(self, usecase_name): + mod = self.import_module() + usecase = getattr(mod, usecase_name) + # Create dufunc + with capture_cache_log() as out: + ufunc = usecase() + self.check_cache_saved(out.getvalue(), count=0) + # Compile & cache + with capture_cache_log() as out: + ufunc(np.arange(10)) + self.check_cache_saved(out.getvalue(), count=1) + self.check_cache_loaded(out.getvalue(), count=0) + # Use cached + with capture_cache_log() as out: + ufunc = usecase() + ufunc(np.arange(10)) + self.check_cache_loaded(out.getvalue(), count=1) + + def test_direct_dufunc_cache(self): + # We don't test for objmode because DUfunc don't support it. + self.check_dufunc_usecase('direct_dufunc_cache_usecase') + + def test_indirect_dufunc_cache(self): + self.check_dufunc_usecase('indirect_dufunc_cache_usecase') + + +def _fix_raw_path(rstr): + if config.IS_WIN32: + rstr = rstr.replace(r'/', r'\\\\') + return rstr + + +class TestGUfuncCacheTest(UfuncCacheTest): + + def test_filename_prefix(self): + mod = self.import_module() + usecase = getattr(mod, "direct_gufunc_cache_usecase") + with capture_cache_log() as out: + usecase() + cachelog = out.getvalue() + # find number filename with "guf-" prefix + fmt1 = _fix_raw_path(r'/__pycache__/guf-{}') + prefixed = re.findall(fmt1.format(self.modname), cachelog) + fmt2 = _fix_raw_path(r'/__pycache__/{}') + normal = re.findall(fmt2.format(self.modname), cachelog) + # expecting 2 overloads + self.assertGreater(len(normal), 2) + # expecting equal number of wrappers and overloads cache entries + self.assertEqual(len(normal), len(prefixed)) + + def test_direct_gufunc_cache(self, **kwargs): + # 2 cache entry for the 2 overloads + # and 2 cache entry for the gufunc wrapper + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "direct_gufunc_cache_usecase", n_overloads=2 + 2, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_direct_gufunc_cache_objmode(self): + self.test_direct_gufunc_cache(forceobj=True) + + def test_direct_gufunc_cache_parallel(self): + self.test_direct_gufunc_cache(target='parallel') + + def test_indirect_gufunc_cache(self, **kwargs): + # 3 cache entry for the 3 overloads + # and no cache entry for the gufunc wrapper + new_ufunc, cached_ufunc = self.check_ufunc_cache( + "indirect_gufunc_cache_usecase", n_overloads=3, **kwargs) + # Test the cached and original versions + inp = np.random.random(10).astype(np.float64) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + inp = np.arange(10, dtype=np.intp) + np.testing.assert_equal(new_ufunc(inp), cached_ufunc(inp)) + + def test_indirect_gufunc_cache_parallel(self, **kwargs): + self.test_indirect_gufunc_cache(target='parallel') + + +class TestCacheSpecificIssue(UfuncCacheTest): + + def run_in_separate_process(self, runcode): + # Based on the same name util function in test_dispatcher but modified + # to allow user to define what to run. + code = """if 1: + import sys + + sys.path.insert(0, %(tempdir)r) + mod = __import__(%(modname)r) + mod.%(runcode)s + """ % dict(tempdir=self.tempdir, modname=self.modname, + runcode=runcode) + + popen = subprocess.Popen([sys.executable, "-c", code], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError("process failed with code %s: stderr follows" + "\n%s\n" % (popen.returncode, err.decode())) + + # + # The following test issue #2198 that loading cached (g)ufunc first + # bypasses some target context initialization. + # + + def test_first_load_cached_ufunc(self): + # ensure function is cached + self.run_in_separate_process('direct_ufunc_cache_usecase()') + # use the cached function + # this will fail if the target context is not init'ed + self.run_in_separate_process('direct_ufunc_cache_usecase()') + + def test_first_load_cached_gufunc(self): + # ensure function is cached + self.run_in_separate_process('direct_gufunc_cache_usecase()') + # use the cached function + # this will fail out if the target context is not init'ed + self.run_in_separate_process('direct_gufunc_cache_usecase()') + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_dufunc.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_dufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..3902e291d1365a06216ab8071a3ccdd86c3105a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_dufunc.py @@ -0,0 +1,931 @@ +import itertools +import pickle +import textwrap +import warnings + +import numpy as np + +from numba import njit, vectorize +from numba.tests.support import MemoryLeakMixin, TestCase +from numba.core.errors import (TypingError, NumbaNotImplementedError, + NumbaExperimentalFeatureWarning) +import unittest +from numba.np.ufunc import dufunc +from numba.np.numpy_support import from_dtype + + +def pyuadd(a0, a1): + return a0 + a1 + + +def pysub(a0, a1): + return a0 - a1 + + +def pymult(a0, a1): + return a0 * a1 + + +def pydiv(a0, a1): + return a0 // a1 + + +def pymin(a0, a1): + return a0 if a0 < a1 else a1 + + +class TestDUFunc(MemoryLeakMixin, unittest.TestCase): + + def nopython_dufunc(self, pyfunc): + return dufunc.DUFunc(pyfunc, targetoptions=dict(nopython=True)) + + def test_frozen(self): + duadd = self.nopython_dufunc(pyuadd) + self.assertFalse(duadd._frozen) + duadd._frozen = True + self.assertTrue(duadd._frozen) + with self.assertRaises(ValueError): + duadd._frozen = False + with self.assertRaises(TypeError): + duadd(np.linspace(0,1,10), np.linspace(1,2,10)) + + def test_scalar(self): + duadd = self.nopython_dufunc(pyuadd) + self.assertEqual(pyuadd(1,2), duadd(1,2)) + + def test_npm_call(self): + duadd = self.nopython_dufunc(pyuadd) + + @njit + def npmadd(a0, a1, o0): + duadd(a0, a1, o0) + X = np.linspace(0,1.9,20) + X0 = X[:10] + X1 = X[10:] + out0 = np.zeros(10) + npmadd(X0, X1, out0) + np.testing.assert_array_equal(X0 + X1, out0) + Y0 = X0.reshape((2,5)) + Y1 = X1.reshape((2,5)) + out1 = np.zeros((2,5)) + npmadd(Y0, Y1, out1) + np.testing.assert_array_equal(Y0 + Y1, out1) + Y2 = X1[:5] + out2 = np.zeros((2,5)) + npmadd(Y0, Y2, out2) + np.testing.assert_array_equal(Y0 + Y2, out2) + + def test_npm_call_implicit_output(self): + duadd = self.nopython_dufunc(pyuadd) + + @njit + def npmadd(a0, a1): + return duadd(a0, a1) + X = np.linspace(0,1.9,20) + X0 = X[:10] + X1 = X[10:] + out0 = npmadd(X0, X1) + np.testing.assert_array_equal(X0 + X1, out0) + Y0 = X0.reshape((2,5)) + Y1 = X1.reshape((2,5)) + out1 = npmadd(Y0, Y1) + np.testing.assert_array_equal(Y0 + Y1, out1) + Y2 = X1[:5] + out2 = npmadd(Y0, Y2) + np.testing.assert_array_equal(Y0 + Y2, out2) + out3 = npmadd(1.,2.) + self.assertEqual(out3, 3.) + + def test_ufunc_props(self): + duadd = self.nopython_dufunc(pyuadd) + self.assertEqual(duadd.nin, 2) + self.assertEqual(duadd.nout, 1) + self.assertEqual(duadd.nargs, duadd.nin + duadd.nout) + self.assertEqual(duadd.ntypes, 0) + self.assertEqual(duadd.types, []) + self.assertEqual(duadd.identity, None) + duadd(1, 2) + self.assertEqual(duadd.ntypes, 1) + self.assertEqual(duadd.ntypes, len(duadd.types)) + self.assertIsNone(duadd.signature) + + def test_ufunc_props_jit(self): + duadd = self.nopython_dufunc(pyuadd) + duadd(1, 2) # initialize types attribute + + attributes = {'nin': duadd.nin, + 'nout': duadd.nout, + 'nargs': duadd.nargs, + #'ntypes': duadd.ntypes, + #'types': duadd.types, + 'identity': duadd.identity, + 'signature': duadd.signature} + + def get_attr_fn(attr): + fn = f''' + def impl(): + return duadd.{attr} + ''' + l = {} + exec(textwrap.dedent(fn), {'duadd': duadd}, l) + return l['impl'] + + for attr, val in attributes.items(): + cfunc = njit(get_attr_fn(attr)) + self.assertEqual(val, cfunc(), + f'Attribute differs from original: {attr}') + + # We don't expose [n]types attributes as they are dynamic attributes + # and can change as the user calls the ufunc + # cfunc = njit(get_attr_fn('ntypes')) + # self.assertEqual(cfunc(), 1) + # duadd(1.1, 2.2) + # self.assertEqual(cfunc(), 2) + + +class TestDUFuncAt(TestCase): + def _compare_output(self, fn, ufunc, a, *args): + expected = a.copy() + got = a.copy() + ufunc.at(expected, *args) + fn(got, *args) + self.assertPreciseEqual(expected, got) + + def _generate_jit(self, ufunc): + if ufunc.nin == 2: + vec = vectorize()(lambda a, b: ufunc(a, b)) + else: + vec = vectorize()(lambda a: ufunc(a)) + + @njit + def fn(*args): + return vec.at(*args) + return fn + + def test_numpy_ufunc_at_basic(self): + # tests taken from: https://github.com/numpy/numpy/blob/27d8c43eb958b4ecee59b4d66908750759a9afc2/numpy/core/tests/test_ufunc.py#L1974-L2003 # noqa: E501 + # NumPy also test this function with a Rational array dtype. We skip + # this test as Numba doesn't support Rational + a = np.arange(10, dtype=int) + + add_at = self._generate_jit(np.add) + negative_at = self._generate_jit(np.negative) + + negative_vec = vectorize()(lambda a: np.negative(a)) + + @njit + def negative_jit_2(a, indices, b): + return negative_vec.at(a, indices, b) + + # basic testing + self._compare_output(add_at, np.add, a, [2, 5, 2], 1) + + # missing second operand + err_msg = 'second operand needed for ufunc' + with self.assertRaisesRegex(TypingError, err_msg): + add_at(a.copy(), [2, 5, 3], None) + + self._compare_output(negative_at, np.negative, a.copy(), [2, 5, 3]) + + b = np.array([100, 100, 100]) + self._compare_output(add_at, np.add, a.copy(), [2, 5, 2], b) + + # extraneous second operand + err_msg = 'second operand provided when ufunc is unary' + with self.assertRaisesRegex(TypingError, err_msg): + negative_jit_2(a.copy(), [2, 5, 3], [1, 2, 3]) + + with self.assertRaises(TypingError): + add_at(a.copy(), [2, 5, 3], [[1, 2], 1]) + + def test_ufunc_at_inner_loop(self): + typecodes = np.typecodes['Complex'] + ufuncs = (np.add, np.subtract, np.multiply) + for typecode in typecodes: + + try: + from_dtype(np.dtype(typecode)) + except NumbaNotImplementedError: + continue + + for ufunc in ufuncs: + a = np.ones(10, dtype=typecode) + indx = np.concatenate([np.ones(6, dtype=np.intp), + np.full(18, 4, dtype=np.intp)]) + value = a.dtype.type(1j) + ufunc_at = self._generate_jit(ufunc) + ufunc_at(a, indx, value) + expected = np.ones_like(a) + if ufunc is np.multiply: + expected[1] = expected[4] = -1 + else: + expected[1] += 6 * (value if ufunc is np.add else -value) + expected[4] += 18 * (value if ufunc is np.add else -value) + + self.assertPreciseEqual(a, expected) + + def test_ufunc_at_ellipsis(self): + # Make sure the indexed loop check does not choke on iters + # with subspaces + arr = np.zeros(5, dtype=int) + add_at = self._generate_jit(np.add) + self._compare_output(add_at, np.add, arr, slice(None), + np.ones(5, dtype=int)) + + def test_ufunc_at_negative(self): + arr = np.ones(5, dtype=np.int32) + indx = np.arange(5) + at = self._generate_jit(np.negative) + at(arr, indx) + assert np.all(arr == [-1, -1, -1, -1, -1]) + + def test_ufunc_at_large(self): + # NumPy issue gh-23457 + indices = np.zeros(8195, dtype=np.int16) + b = np.zeros(8195, dtype=float) + b[0] = 10 + b[1] = 5 + b[8192:] = 100 + a = np.zeros(1, dtype=float) + add_at = self._generate_jit(np.add) + add_at(a, indices, b) + assert a[0] == b.sum() + + def test_cast_index_fastpath(self): + arr = np.zeros(10) + values = np.ones(100000) + add_at = self._generate_jit(np.add) + # index must be cast, which may be buffered in chunks: + index = np.zeros(len(values), dtype=np.uint8) + add_at(arr, index, values) + assert arr[0] == len(values) + + def test_ufunc_at_scalar_value_fastpath(self): + values = (np.ones(1), np.ones(()), np.float64(1.), 1.) + for value in values: + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + add_at = self._generate_jit(np.add) + add_at(arr, index, value) + np.testing.assert_array_equal(arr, np.full_like(arr, 2 * value)) + + def test_ufunc_at_multiD(self): + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + add_at = self._generate_jit(np.add) + add_at(a, (slice(None), np.asarray([1, 2, 1])), b) + self.assertPreciseEqual(a, np.array( + [[0, 201, 102], [3, 404, 205], [6, 607, 308]])) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + add_at(a, (slice(None), slice(None), np.asarray([1, 2, 1])), b) + self.assertPreciseEqual(a, np.array( + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]])) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + add_at(a, (np.asarray([1, 2, 1]), slice(None)), b) + self.assertPreciseEqual(a, np.asarray( + [[0, 1, 2], [403, 404, 405], [206, 207, 208]])) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + add_at(a, (slice(None), np.asarray([1, 2, 1]), slice(None)), b) + self.assertPreciseEqual(a, np.asarray( + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]])) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + add_at(a, (0, np.asarray([1, 2, 1])), b) + self.assertPreciseEqual(a, np.asarray( + [[0, 401, 202], [3, 4, 5], [6, 7, 8]])) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + add_at(a, (np.asarray([1, 2, 1]), 0, slice(None)), b) + self.assertPreciseEqual(a, np.asarray( + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]])) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + add_at = self._generate_jit(np.add) + add_at(a, (slice(None), slice(None), slice(None)), b) + self.assertPreciseEqual(a, np.asarray( + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]])) + + def test_ufunc_at_0D(self): + a = np.array(0) + add_at = self._generate_jit(np.add) + add_at(a, (), 1) + self.assertPreciseEqual(a, np.array(1)) + + with self.assertRaises(TypingError): + add_at(a, 0, 1) + + b = np.arange(3) + add_at(b, 0, 1) + self.assertPreciseEqual(b, np.array([1, 1, 2])) + + # NumPy checks for IndexError but we can't call a jit function with an + # empty list as Numba raises "can't compute fingerprint of empty list" + with self.assertRaises(ValueError): + add_at(a, [], 1) + + def test_ufunc_at_dtypes(self): + # Test mixed dtypes + a = np.arange(10) + power_at = self._generate_jit(np.power) + power_at(a, [1, 2, 3, 2], 3.5) + self.assertPreciseEqual(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + def test_ufunc_at_boolean(self): + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + equal_at = self._generate_jit(np.equal) + # boolean indexing not supported + equal_at(a, index, [0, 2, 4, 6, 8]) + self.assertPreciseEqual(a, np.array([1, 1, 1, 3, 1, 5, 1, 7, 1, 9])) + + def test_ufunc_at_boolean2(self): + # Test unary operator + a = np.arange(10, dtype='u4') + invert_at = self._generate_jit(np.invert) + invert_at(a, [2, 5, 2]) + self.assertPreciseEqual(a, np.array([0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, + 7, 8, 9], dtype=np.uint32)) + + def test_ufunc_at_advanced(self): + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + add_at = self._generate_jit(np.add) + add_at(a, [0, 1], 3) + self.assertPreciseEqual(orig, np.arange(4)) + + @unittest.expectedFailure + def test_ufunc_at_advanced_2(self): + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + add_at = self._generate_jit(np.add) + add_at(values, index, 3) + self.assertPreciseEqual(values, [1, 8, 6, 4]) + + def test_ufunc_at_advanced_3(self): + # Test exception thrown + values = np.array(['a', 1], dtype=object) + add_at = self._generate_jit(np.add) + with self.assertRaises(TypingError): + add_at(values, [0, 1], 1) + self.assertPreciseEqual(values, np.array(['a', 1], dtype=object)) + + def test_ufunc_at_advanced_4(self): + # Test multiple output ufuncs raise error, NumPy gh-5665 + modf_at = self._generate_jit(np.modf) + # NumPy raises ValueError as modf returns multiple outputs + with self.assertRaises(TypingError): + modf_at(np.arange(10), [1]) + + def test_ufunc_at_advanced_5(self): + # Test maximum + maximum_at = self._generate_jit(np.maximum) + a = np.array([1, 2, 3]) + maximum_at(a, [0], 0) + self.assertPreciseEqual(a, np.array([1, 2, 3])) + + def test_ufunc_at_negative_indexes(self): + dtypes = np.typecodes['AllInteger'] + np.typecodes['Float'] + ufuncs = (np.add, np.subtract, np.divide, np.minimum, np.maximum) + + for dtype in dtypes: + + if dtype in ('e',): # skip float16 as we don't have an impl. for it + continue + + try: + from_dtype(np.dtype(dtype)) + except NumbaNotImplementedError: + continue + + for ufunc in ufuncs: + a = np.arange(0, 10).astype(dtype) + indxs = np.array([-1, 1, -1, 2]).astype(np.intp) + vals = np.array([1, 5, 2, 10], dtype=a.dtype) + + expected = a.copy() + for i, v in zip(indxs, vals): + expected[i] = ufunc(expected[i], v) + + ufunc_at = self._generate_jit(ufunc) + ufunc_at(a, indxs, vals) + np.testing.assert_array_equal(a, expected) + assert np.all(indxs == [-1, 1, -1, 2]) + + @unittest.expectedFailure + def test_ufunc_at_not_none_signature(self): + # Test ufuncs with non-trivial signature raise a TypeError + a = np.ones((2, 2, 2)) + b = np.ones((1, 2, 2)) + # matmul is a gufunc, thus, this will fail atm + matmul_at = self._generate_jit(np.matmul) + err_msg = 'does not support ufunc with non-trivial signature' + with self.assertRaisesRegex(TypingError, err_msg): + matmul_at(a, [0], b) + + # a = np.array([[[1, 2], [3, 4]]]) + # assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0]) + + def test_ufunc_at_no_loop_for_op(self): + # str dtype does not have a ufunc loop for np.add + arr = np.ones(10, dtype=str) + add_at = self._generate_jit(np.add) + # NumPy raises `np.core._exceptions._UFuncNoLoopError` + with self.assertRaises(TypingError): + add_at(arr, [0, 1], [0, 1]) + + def test_ufunc_at_output_casting(self): + arr = np.array([-1]) + equal_at = self._generate_jit(np.equal) + equal_at(arr, [0], [0]) + assert arr[0] == 0 + + def test_ufunc_at_broadcast_failure(self): + arr = np.arange(5) + add_at = self._generate_jit(np.add) + + # NumPy raises ValueError('array is not broadcastable to correct shape') + msg = 'operands could not be broadcast together with remapped shapes' + with self.assertRaisesRegex(ValueError, msg): + add_at(arr, [0, 1], [1, 2, 3]) + + def test_ufunc_at_dynamic(self): + arr = np.arange(5) + + @vectorize + def inc(x): + return x + 1 + + self.assertEqual(len(inc.types), 0) + + # trying to call inc.at should trigger compilation + inc.at(arr, [1, 3]) + + self.assertGreater(len(inc.types), 0) + + def test_ufunc_at_experimental_warning(self): + arr = np.arange(5) + add_at = self._generate_jit(np.add) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaExperimentalFeatureWarning) + + add_at(arr, [0, 3], 10) + + self.assertGreater(len(w), 0) + self.assertIn('ufunc.at feature is experimental', str(w[0].message)) + + +class TestDUFuncReduceNumPyTests(TestCase): + # Tests taken from + # https://github.com/numpy/numpy/blob/51ee17b6bd4ccec60a5483ee8bff94ad0c0e8585/numpy/_core/tests/test_ufunc.py # noqa: E501 + + def _generate_jit(self, ufunc, identity=None): + if ufunc.nin == 2: + vec = vectorize(identity=identity)(lambda a, b: ufunc(a, b)) + else: + vec = vectorize(identity=identity)(lambda a: ufunc(a)) + + @njit + def fn(array, axis=0, initial=None): + return vec.reduce(array, axis=axis, initial=initial) + return fn + + @unittest.expectedFailure + def test_numpy_scalar_reduction(self): + # scalar reduction is not supported + power_reduce = self._generate_jit(np.power) + expected = np.power.reduce(3) + got = power_reduce(3) + self.assertPreciseEqual(expected, got) + + def check_identityless_reduction(self, a): + def compare_output(a, b): + # We don't use self.assertPreciseEqual as the dtype differs + # between the value from the reduction and the hardcoded output + np.testing.assert_equal(a, b) + # test taken from: + # https://github.com/numpy/numpy/blob/51ee17b6bd4ccec60a5483ee8bff94ad0c0e8585/numpy/_core/tests/test_ufunc.py#L1591 # noqa: E501 + + minimum_reduce = self._generate_jit(np.minimum, identity='reorderable') + + # np.minimum.reduce is an identityless reduction + + # Verify that it sees the zero at various positions + a[...] = 1 + a[1, 0, 0] = 0 + compare_output(minimum_reduce(a, axis=None), 0) + compare_output(minimum_reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + compare_output(minimum_reduce(a, axis=(0, 2)), [0, 1, 1]) + compare_output(minimum_reduce(a, axis=(1, 2)), [1, 0]) + compare_output(minimum_reduce(a, axis=0), + [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + compare_output(minimum_reduce(a, axis=1), + [[1, 1, 1, 1], [0, 1, 1, 1]]) + compare_output(minimum_reduce(a, axis=2), + [[1, 1, 1], [0, 1, 1]]) + compare_output(minimum_reduce(a, axis=()), a) + + a[...] = 1 + a[0, 1, 0] = 0 + compare_output(minimum_reduce(a, axis=None), 0) + compare_output(minimum_reduce(a, axis=(0, 1)), [0, 1, 1, 1]) + compare_output(minimum_reduce(a, axis=(0, 2)), [1, 0, 1]) + compare_output(minimum_reduce(a, axis=(1, 2)), [0, 1]) + compare_output(minimum_reduce(a, axis=0), + [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) + compare_output(minimum_reduce(a, axis=1), + [[0, 1, 1, 1], [1, 1, 1, 1]]) + compare_output(minimum_reduce(a, axis=2), + [[1, 0, 1], [1, 1, 1]]) + compare_output(minimum_reduce(a, axis=()), a) + + a[...] = 1 + a[0, 0, 1] = 0 + compare_output(minimum_reduce(a, axis=None), 0) + compare_output(minimum_reduce(a, axis=(0, 1)), [1, 0, 1, 1]) + compare_output(minimum_reduce(a, axis=(0, 2)), [0, 1, 1]) + compare_output(minimum_reduce(a, axis=(1, 2)), [0, 1]) + compare_output(minimum_reduce(a, axis=0), + [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) + compare_output(minimum_reduce(a, axis=1), + [[1, 0, 1, 1], [1, 1, 1, 1]]) + compare_output(minimum_reduce(a, axis=2), + [[0, 1, 1], [1, 1, 1]]) + compare_output(minimum_reduce(a, axis=()), a) + + def test_numpy_identityless_reduction_corder(self): + a = np.empty((2, 3, 4), order='C') + self.check_identityless_reduction(a) + + def test_numpy_identityless_reduction_forder(self): + a = np.empty((2, 3, 4), order='F') + self.check_identityless_reduction(a) + + def test_numpy_identityless_reduction_otherorder(self): + a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) + self.check_identityless_reduction(a) + + def test_numpy_identityless_reduction_noncontig(self): + a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_numpy_identityless_reduction_noncontig_unaligned(self): + a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + self.check_identityless_reduction(a) + + def test_numpy_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + add_reduce = self._generate_jit(np.add) + min_reduce = self._generate_jit(np.minimum) + max_reduce = self._generate_jit(np.maximum) + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + self.assertPreciseEqual(max_reduce(np.asarray([]), initial=0), 0.0) + + # For cases like reduction of an empty array over the reals. + self.assertPreciseEqual(min_reduce(np.asarray([]), initial=np.inf), + np.inf) + self.assertPreciseEqual(max_reduce(np.asarray([]), initial=-np.inf), + -np.inf) + + # Random tests + self.assertPreciseEqual(min_reduce(np.asarray([5]), initial=4), 4) + self.assertPreciseEqual(max_reduce(np.asarray([4]), initial=5), 5) + self.assertPreciseEqual(max_reduce(np.asarray([5]), initial=4), 5) + self.assertPreciseEqual(min_reduce(np.asarray([4]), initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc + # reductions + msg = 'zero-size array to reduction operation' + for func in (add_reduce, min_reduce): + with self.assertRaisesRegex(ValueError, msg): + func(np.asarray([]), initial=None) + + def test_numpy_empty_reduction_and_identity(self): + arr = np.zeros((0, 5)) + true_divide_reduce = self._generate_jit(np.true_divide) + + # OK, since the reduction itself is *not* empty, the result is + expected = np.true_divide.reduce(arr, axis=1) + got = true_divide_reduce(arr, axis=1) + self.assertPreciseEqual(expected, got) + self.assertPreciseEqual(got.shape, (0,)) + + # Not OK, the reduction itself is empty and we have no identity + msg = 'zero-size array to reduction operation' + with self.assertRaisesRegex(ValueError, msg): + true_divide_reduce(arr, axis=0) + + # Test that an empty reduction fails also if the result is empty + arr = np.zeros((0, 0, 5)) + with self.assertRaisesRegex(ValueError, msg): + true_divide_reduce(arr, axis=1) + + # Division reduction makes sense with `initial=1` (empty or not): + expected = np.true_divide.reduce(arr, axis=1, initial=1) + got = true_divide_reduce(arr, axis=1, initial=1) + self.assertPreciseEqual(expected, got) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + divide_reduce = self._generate_jit(np.divide) + res = divide_reduce(a, axis=0) + self.assertPreciseEqual(res, np.asarray([8.0, 4.0, 8.0])) + + res = divide_reduce(a, axis=1) + self.assertPreciseEqual(res, np.asarray([2.0, 8.0])) + + res = divide_reduce(a, axis=()) + self.assertPreciseEqual(res, a) + + # will not raise as per Numba issue #9283 + # assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have a n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + with self.assertRaises(ValueError): + f(*args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + maximum_reduce = self._generate_jit(np.maximum, identity='reorderable') + self.assertEqual(np.maximum.identity, None) + t(ok, maximum_reduce, 30, 30) + t(ok, maximum_reduce, 0, 30) + t(err, maximum_reduce, 30, 0) + t(err, maximum_reduce, 0, 0) + err(maximum_reduce, []) + maximum_reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + add_reduce = self._generate_jit(np.add, identity=0) + t(ok, add_reduce, 30, 30) + t(ok, add_reduce, 0, 30) + t(ok, add_reduce, 30, 0) + t(ok, add_reduce, 0, 0) + add_reduce(np.array([], dtype=np.int64)) + add_reduce(np.zeros((0, 0)), axis=()) + + +class TestDUFuncReduce(TestCase): + def _check_reduce(self, ufunc, dtype=None, initial=None): + + @njit + def foo(a, axis, dtype, initial): + return ufunc.reduce(a, + axis=axis, + dtype=dtype, + initial=initial) + + inputs = [ + np.arange(5), + np.arange(4).reshape(2, 2), + np.arange(40).reshape(5, 4, 2), + ] + for array in inputs: + for axis in range(array.ndim): + expected = foo.py_func(array, axis, dtype, initial) + got = foo(array, axis, dtype, initial) + self.assertPreciseEqual(expected, got) + + def _check_reduce_axis(self, ufunc, dtype, initial=None): + + @njit + def foo(a, axis): + return ufunc.reduce(a, axis=axis, initial=initial) + + def _check(*args): + try: + expected = foo.py_func(array, axis) + except ValueError as e: + self.assertEqual(e.args[0], exc_msg) + with self.assertRaisesRegex(TypingError, exc_msg): + got = foo(array, axis) + else: + got = foo(array, axis) + self.assertPreciseEqual(expected, got) + + exc_msg = (f"reduction operation '{ufunc.__name__}' is not " + "reorderable, so at most one axis may be specified") + inputs = [ + np.arange(40, dtype=dtype).reshape(5, 4, 2), + np.arange(10, dtype=dtype), + ] + for array in inputs: + for i in range(1, array.ndim + 1): + for axis in itertools.combinations(range(array.ndim), r=i): + _check(array, axis) + + # corner cases: Reduce over axis=() and axis=None + for axis in ((), None): + _check(array, axis) + + def test_add_reduce(self): + duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) + self._check_reduce(duadd) + self._check_reduce_axis(duadd, dtype=np.int64) + + def test_mul_reduce(self): + dumul = vectorize('int64(int64, int64)', identity=1)(pymult) + self._check_reduce(dumul) + + def test_non_associative_reduce(self): + dusub = vectorize('int64(int64, int64)', identity=None)(pysub) + dudiv = vectorize('int64(int64, int64)', identity=None)(pydiv) + self._check_reduce(dusub) + self._check_reduce_axis(dusub, dtype=np.int64) + self._check_reduce(dudiv) + self._check_reduce_axis(dudiv, dtype=np.int64) + + def test_reduce_dtype(self): + duadd = vectorize('float64(float64, int64)', identity=0)(pyuadd) + self._check_reduce(duadd, dtype=np.float64) + + def test_min_reduce(self): + dumin = vectorize('int64(int64, int64)', identity='reorderable')(pymin) + self._check_reduce(dumin, initial=10) + self._check_reduce_axis(dumin, dtype=np.int64) + + def test_add_reduce_initial(self): + # Initial should be used as a start + duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) + self._check_reduce(duadd, dtype=np.int64, initial=100) + + def test_add_reduce_no_initial_or_identity(self): + # don't provide an initial or identity value + duadd = vectorize('int64(int64, int64)')(pyuadd) + self._check_reduce(duadd, dtype=np.int64) + + def test_invalid_input(self): + duadd = vectorize('float64(float64, int64)', identity=0)(pyuadd) + + @njit + def foo(a): + return duadd.reduce(a) + + exc_msg = 'The first argument "array" must be array-like' + with self.assertRaisesRegex(TypingError, exc_msg): + foo('a') + + def test_dufunc_negative_axis(self): + duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) + + @njit + def foo(a, axis): + return duadd.reduce(a, axis=axis) + + a = np.arange(40).reshape(5, 4, 2) + cases = (0, -1, (0, -1), (-1, -2), (1, -1), -3) + for axis in cases: + expected = duadd.reduce(a, axis) + got = foo(a, axis) + self.assertPreciseEqual(expected, got) + + def test_dufunc_invalid_axis(self): + duadd = vectorize('int64(int64, int64)', identity=0)(pyuadd) + + @njit + def foo(a, axis): + return duadd.reduce(a, axis=axis) + + a = np.arange(40).reshape(5, 4, 2) + cases = ((0, 0), (0, 1, 0), (0, -3), (-1, -1), (-1, 2)) + for axis in cases: + msg = "duplicate value in 'axis'" + with self.assertRaisesRegex(ValueError, msg): + foo(a, axis) + + cases = (-4, 3, (0, -4),) + for axis in cases: + with self.assertRaisesRegex(ValueError, "Invalid axis"): + foo(a, axis) + + +class TestDUFuncPickling(MemoryLeakMixin, unittest.TestCase): + def check(self, ident, result_type): + buf = pickle.dumps(ident) + rebuilt = pickle.loads(buf) + + # Check reconstructed dufunc + r = rebuilt(123) + self.assertEqual(123, r) + self.assertIsInstance(r, result_type) + + # Try to use reconstructed dufunc in @jit + @njit + def foo(x): + return rebuilt(x) + + r = foo(321) + self.assertEqual(321, r) + self.assertIsInstance(r, result_type) + + def test_unrestricted(self): + @vectorize + def ident(x1): + return x1 + + self.check(ident, result_type=(int, np.integer)) + + def test_restricted(self): + @vectorize(["float64(float64)"]) + def ident(x1): + return x1 + + self.check(ident, result_type=float) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_errors.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..948655d22b44fc76faf4ed31ad57dd5b6b59a574 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_errors.py @@ -0,0 +1,174 @@ +import contextlib +import sys + +import numpy as np + +from numba import vectorize, guvectorize + +from numba.tests.support import (TestCase, CheckWarningsMixin, + skip_macos_fenv_errors) +import unittest + + +def sqrt(val): + if val < 0.0: + raise ValueError('Value must be positive') + return val ** 0.5 + + +def gufunc_foo(inp, n, out): + for i in range(inp.shape[0]): + if inp[i] < 0: + raise ValueError('Value must be positive') + out[i] = inp[i] * n[0] + +def truediv(a, b): + return a / b + +def floordiv(a, b): + return a // b + +def remainder(a, b): + return a % b + +def power(a, b): + return a ** b + + +class TestExceptions(TestCase): + """ + Test raising exceptions inside ufuncs. + """ + + def check_ufunc_raise(self, **vectorize_args): + f = vectorize(['float64(float64)'], **vectorize_args)(sqrt) + arr = np.array([1, 4, -2, 9, -1, 16], dtype=np.float64) + out = np.zeros_like(arr) + with self.assertRaises(ValueError) as cm: + f(arr, out) + self.assertIn('Value must be positive', str(cm.exception)) + # All values were computed except for the ones giving an error + self.assertEqual(list(out), [1, 2, 0, 3, 0, 4]) + + def test_ufunc_raise(self): + self.check_ufunc_raise(nopython=True) + + def test_ufunc_raise_objmode(self): + self.check_ufunc_raise(forceobj=True) + + def check_gufunc_raise(self, **vectorize_args): + f = guvectorize(['int32[:], int32[:], int32[:]'], '(n),()->(n)', + **vectorize_args)(gufunc_foo) + arr = np.array([1, 2, -3, 4], dtype=np.int32) + out = np.zeros_like(arr) + with self.assertRaises(ValueError) as cm: + f(arr, 2, out) + # The gufunc bailed out after the error + self.assertEqual(list(out), [2, 4, 0, 0]) + + def test_gufunc_raise(self): + self.check_gufunc_raise(nopython=True) + + def test_gufunc_raise_objmode(self): + self.check_gufunc_raise(forceobj=True) + +class TestFloatingPointExceptions(TestCase, CheckWarningsMixin): + """ + Test floating-point exceptions inside ufuncs. + + Note the warnings emitted by Numpy reflect IEEE-754 semantics. + """ + + def check_truediv_real(self, dtype): + """ + Test 1 / 0 and 0 / 0. + """ + f = vectorize(nopython=True)(truediv) + a = np.array([5., 6., 0., 8.], dtype=dtype) + b = np.array([1., 0., 0., 4.], dtype=dtype) + expected = np.array([5., float('inf'), float('nan'), 2.]) + with self.check_warnings(["divide by zero encountered", + "invalid value encountered"]): + res = f(a, b) + self.assertPreciseEqual(res, expected) + + def test_truediv_float(self): + self.check_truediv_real(np.float64) + + def test_truediv_integer(self): + self.check_truediv_real(np.int32) + + def check_divmod_float(self, pyfunc, values, messages): + """ + Test 1 // 0 and 0 // 0. + """ + f = vectorize(nopython=True)(pyfunc) + a = np.array([5., 6., 0., 9.]) + b = np.array([1., 0., 0., 4.]) + expected = np.array(values) + with self.check_warnings(messages): + res = f(a, b) + self.assertPreciseEqual(res, expected) + + def test_floordiv_float(self): + self.check_divmod_float(floordiv, + [5.0, float('inf'), float('nan'), 2.0], + ["divide by zero encountered", + "invalid value encountered"]) + + @skip_macos_fenv_errors + def test_remainder_float(self): + self.check_divmod_float(remainder, + [0.0, float('nan'), float('nan'), 1.0], + ["invalid value encountered"]) + + def check_divmod_int(self, pyfunc, values): + """ + Test 1 % 0 and 0 % 0. + """ + f = vectorize(nopython=True)(pyfunc) + a = np.array([5, 6, 0, 9]) + b = np.array([1, 0, 0, 4]) + expected = np.array(values) + # No warnings raised because LLVM makes it difficult + with self.check_warnings([]): + res = f(a, b) + self.assertPreciseEqual(res, expected) + + def test_floordiv_int(self): + self.check_divmod_int(floordiv, [5, 0, 0, 2]) + + def test_remainder_int(self): + self.check_divmod_int(remainder, [0, 0, 0, 1]) + + def test_power_float(self): + """ + Test 0 ** -1 and 2 ** . + """ + f = vectorize(nopython=True)(power) + a = np.array([5., 0., 2., 8.]) + b = np.array([1., -1., 1e20, 4.]) + expected = np.array([5., float('inf'), float('inf'), 4096.]) + with self.check_warnings(["divide by zero encountered", + "overflow encountered"]): + res = f(a, b) + self.assertPreciseEqual(res, expected) + + def test_power_integer(self): + """ + Test 0 ** -1. + Note 2 ** returns an undefined value (depending + on the algorithm). + """ + dtype = np.int64 + f = vectorize(["int64(int64, int64)"], nopython=True)(power) + a = np.array([5, 0, 6], dtype=dtype) + b = np.array([1, -1, 2], dtype=dtype) + expected = np.array([5, -2**63, 36], dtype=dtype) + with self.check_warnings([]): + res = f(a, b) + self.assertPreciseEqual(res, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_gufunc.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_gufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb3343ffb17bbbf5d3146a272813ef6a3cd07f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_gufunc.py @@ -0,0 +1,849 @@ +import unittest +import pickle + +import numpy as np + +from numba import void, float32, float64, int32, int64, jit, guvectorize +from numba.core.errors import TypingError +from numba.np.ufunc import GUVectorize +from numba.tests.support import tag, TestCase + + +def matmulcore(A, B, C): + """docstring for matmulcore""" + m, n = A.shape + n, p = B.shape + for i in range(m): + for j in range(p): + C[i, j] = 0 + for k in range(n): + C[i, j] += A[i, k] * B[k, j] + + +def axpy(a, x, y, out): + out[0] = a * x + y + + +class TestGUFunc(TestCase): + target = 'cpu' + + def check_matmul_gufunc(self, gufunc): + matrix_ct = 1001 + A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4) + B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5) + + C = gufunc(A, B) + Gold = np.matmul(A, B) + + np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8) + + def test_gufunc(self): + gufunc = GUVectorize(matmulcore, '(m,n),(n,p)->(m,p)', + target=self.target) + gufunc.add((float32[:, :], float32[:, :], float32[:, :])) + gufunc = gufunc.build_ufunc() + + self.check_matmul_gufunc(gufunc) + + def test_guvectorize_decor(self): + gufunc = guvectorize([void(float32[:,:], float32[:,:], float32[:,:])], + '(m,n),(n,p)->(m,p)', + target=self.target)(matmulcore) + + self.check_matmul_gufunc(gufunc) + + def test_ufunc_like(self): + # Test problem that the stride of "scalar" gufunc argument not properly + # handled when the actual argument is an array, + # causing the same value (first value) being repeated. + gufunc = GUVectorize(axpy, '(), (), () -> ()', target=self.target) + gufunc.add('(intp, intp, intp, intp[:])') + gufunc = gufunc.build_ufunc() + + x = np.arange(10, dtype=np.intp) + out = gufunc(x, x, x) + + np.testing.assert_equal(out, x * x + x) + + def test_axis(self): + # issue https://github.com/numba/numba/issues/6773 + @guvectorize(["f8[:],f8[:]"], "(n)->(n)") + def my_cumsum(x, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + res[i] = acc + + x = np.ones((20, 30)) + # Check regular call + y = my_cumsum(x, axis=0) + expected = np.cumsum(x, axis=0) + np.testing.assert_equal(y, expected) + # Check "out" kw + out_kw = np.zeros_like(y) + my_cumsum(x, out=out_kw, axis=0) + np.testing.assert_equal(out_kw, expected) + + def test_docstring(self): + @guvectorize([(int64[:], int64, int64[:])], '(n),()->(n)') + def gufunc(x, y, res): + "docstring for gufunc" + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.assertEqual("numba.tests.npyufunc.test_gufunc", gufunc.__module__) + self.assertEqual("gufunc", gufunc.__name__) + self.assertEqual("TestGUFunc.test_docstring..gufunc", gufunc.__qualname__) + self.assertEqual("docstring for gufunc", gufunc.__doc__) + + +class TestMultipleOutputs(TestCase): + target = 'cpu' + + def test_multiple_outputs_same_type_passed_in(self): + @guvectorize('(x)->(x),(x)', + target=self.target) + def copy(A, B, C): + for i in range(B.size): + B[i] = A[i] + C[i] = A[i] + + A = np.arange(10, dtype=np.float32) + 1 + B = np.zeros_like(A) + C = np.zeros_like(A) + copy(A, B, C) + np.testing.assert_allclose(A, B) + np.testing.assert_allclose(A, C) + + def test_multiple_outputs_distinct_values(self): + + @guvectorize('(x)->(x),(x)', + target=self.target) + def copy_and_double(A, B, C): + for i in range(B.size): + B[i] = A[i] + C[i] = A[i] * 2 + + A = np.arange(10, dtype=np.float32) + 1 + B = np.zeros_like(A) + C = np.zeros_like(A) + copy_and_double(A, B, C) + np.testing.assert_allclose(A, B) + np.testing.assert_allclose(A * 2, C) + + def test_multiple_output_dtypes(self): + + @guvectorize('(x)->(x),(x)', + target=self.target) + def copy_and_multiply(A, B, C): + for i in range(B.size): + B[i] = A[i] + C[i] = A[i] * 1.5 + + A = np.arange(10, dtype=np.int32) + 1 + B = np.zeros_like(A) + C = np.zeros_like(A, dtype=np.float64) + copy_and_multiply(A, B, C) + np.testing.assert_allclose(A, B) + np.testing.assert_allclose(A * np.float64(1.5), C) + + def test_incorrect_number_of_pos_args(self): + @guvectorize('(m),(m)->(m),(m)', target=self.target) + def f(x, y, z, w): + pass + + arr = np.arange(5, dtype=np.int32) + + # Inputs only, too few + msg = "Too few arguments for function 'f'" + with self.assertRaises(TypeError) as te: + f(arr) + self.assertIn(msg, str(te.exception)) + + # Inputs and outputs, too many + with self.assertRaises(TypeError) as te: + f(arr, arr, arr, arr, arr) + self.assertIn(msg, str(te.exception)) + + +class TestGUFuncParallel(TestGUFunc): + _numba_parallel_test_ = False + target = 'parallel' + + +class TestDynamicGUFunc(TestCase): + target = 'cpu' + + def test_dynamic_matmul(self): + + def check_matmul_gufunc(gufunc, A, B, C): + Gold = np.matmul(A, B) + gufunc(A, B, C) + np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8) + + gufunc = GUVectorize(matmulcore, '(m,n),(n,p)->(m,p)', + target=self.target, is_dynamic=True) + matrix_ct = 10 + Ai64 = np.arange(matrix_ct * 2 * 4, dtype=np.int64).reshape(matrix_ct, 2, 4) + Bi64 = np.arange(matrix_ct * 4 * 5, dtype=np.int64).reshape(matrix_ct, 4, 5) + Ci64 = np.arange(matrix_ct * 2 * 5, dtype=np.int64).reshape(matrix_ct, 2, 5) + check_matmul_gufunc(gufunc, Ai64, Bi64, Ci64) + + A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4) + B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5) + C = np.arange(matrix_ct * 2 * 5, dtype=np.float32).reshape(matrix_ct, 2, 5) + check_matmul_gufunc(gufunc, A, B, C) # trigger compilation + + self.assertEqual(len(gufunc.types), 2) # ensure two versions of gufunc + + + def test_dynamic_ufunc_like(self): + + def check_ufunc_output(gufunc, x): + out = np.zeros(10, dtype=x.dtype) + out_kw = np.zeros(10, dtype=x.dtype) + gufunc(x, x, x, out) + gufunc(x, x, x, out=out_kw) + golden = x * x + x + np.testing.assert_equal(out, golden) + np.testing.assert_equal(out_kw, golden) + + # Test problem that the stride of "scalar" gufunc argument not properly + # handled when the actual argument is an array, + # causing the same value (first value) being repeated. + gufunc = GUVectorize(axpy, '(), (), () -> ()', target=self.target, + is_dynamic=True) + x = np.arange(10, dtype=np.intp) + check_ufunc_output(gufunc, x) + + + def test_dynamic_scalar_output(self): + """ + Note that scalar output is a 0-dimension array that acts as + a pointer to the output location. + """ + + @guvectorize('(n)->()', target=self.target, nopython=True) + def sum_row(inp, out): + tmp = 0. + for i in range(inp.shape[0]): + tmp += inp[i] + out[()] = tmp + + # inp is (10000, 3) + # out is (10000) + # The outer (leftmost) dimension must match or numpy broadcasting is performed. + + self.assertTrue(sum_row.is_dynamic) + inp = np.arange(30000, dtype=np.int32).reshape(10000, 3) + out = np.zeros(10000, dtype=np.int32) + sum_row(inp, out) + + # verify result + for i in range(inp.shape[0]): + self.assertEqual(out[i], inp[i].sum()) + + msg = "Too few arguments for function 'sum_row'." + with self.assertRaisesRegex(TypeError, msg): + sum_row(inp) + + def test_axis(self): + # issue https://github.com/numba/numba/issues/6773 + @guvectorize("(n)->(n)") + def my_cumsum(x, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + res[i] = acc + + x = np.ones((20, 30)) + expected = np.cumsum(x, axis=0) + # Check regular call + y = np.zeros_like(expected) + my_cumsum(x, y, axis=0) + np.testing.assert_equal(y, expected) + # Check "out" kw + out_kw = np.zeros_like(y) + my_cumsum(x, out=out_kw, axis=0) + np.testing.assert_equal(out_kw, expected) + + def test_gufunc_attributes(self): + @guvectorize("(n)->(n)") + def gufunc(x, res): + acc = 0 + for i in range(x.shape[0]): + acc += x[i] + res[i] = acc + + # ensure gufunc exports attributes + attrs = ['signature', 'accumulate', 'at', 'outer', 'reduce', 'reduceat'] + for attr in attrs: + contains = hasattr(gufunc, attr) + self.assertTrue(contains, 'dynamic gufunc not exporting "%s"' % (attr,)) + + a = np.array([1, 2, 3, 4]) + res = np.array([0, 0, 0, 0]) + gufunc(a, res) # trigger compilation + self.assertPreciseEqual(res, np.array([1, 3, 6, 10])) + + # other attributes are not callable from a gufunc with signature + # see: https://github.com/numba/numba/issues/2794 + # note: this is a limitation in NumPy source code! + self.assertEqual(gufunc.signature, "(n)->(n)") + + with self.assertRaises(RuntimeError) as raises: + gufunc.accumulate(a) + self.assertEqual(str(raises.exception), "Reduction not defined on ufunc with signature") + + with self.assertRaises(RuntimeError) as raises: + gufunc.reduce(a) + self.assertEqual(str(raises.exception), "Reduction not defined on ufunc with signature") + + with self.assertRaises(RuntimeError) as raises: + gufunc.reduceat(a, [0, 2]) + self.assertEqual(str(raises.exception), "Reduction not defined on ufunc with signature") + + with self.assertRaises(TypeError) as raises: + gufunc.outer(a, a) + self.assertEqual(str(raises.exception), "method outer is not allowed in ufunc with non-trivial signature") + + def test_gufunc_attributes2(self): + @guvectorize('(),()->()') + def add(x, y, res): + res[0] = x + y + + # add signature "(),() -> ()" is evaluated to None + self.assertIsNone(add.signature) + + a = np.array([1, 2, 3, 4]) + b = np.array([4, 3, 2, 1]) + res = np.array([0, 0, 0, 0]) + add(a, b, res) # trigger compilation + self.assertPreciseEqual(res, np.array([5, 5, 5, 5])) + + # now test other attributes + self.assertIsNone(add.signature) + self.assertEqual(add.reduce(a), 10) + self.assertPreciseEqual(add.accumulate(a), np.array([1, 3, 6, 10])) + self.assertPreciseEqual(add.outer([0, 1], [1, 2]), np.array([[1, 2], [2, 3]])) + self.assertPreciseEqual(add.reduceat(a, [0, 2]), np.array([3, 7])) + + x = np.array([1, 2, 3, 4]) + y = np.array([1, 2]) + add.at(x, [0, 1], y) + self.assertPreciseEqual(x, np.array([2, 4, 3, 4])) + + +class TestGUVectorizeScalar(TestCase): + """ + Nothing keeps user from out-of-bound memory access + """ + target = 'cpu' + + def test_scalar_output(self): + """ + Note that scalar output is a 0-dimension array that acts as + a pointer to the output location. + """ + + @guvectorize(['void(int32[:], int32[:])'], '(n)->()', + target=self.target, nopython=True) + def sum_row(inp, out): + tmp = 0. + for i in range(inp.shape[0]): + tmp += inp[i] + out[()] = tmp + + # inp is (10000, 3) + # out is (10000) + # The outer (leftmost) dimension must match or numpy broadcasting is performed. + + inp = np.arange(30000, dtype=np.int32).reshape(10000, 3) + out = sum_row(inp) + + # verify result + for i in range(inp.shape[0]): + self.assertEqual(out[i], inp[i].sum()) + + def test_scalar_input(self): + + @guvectorize(['int32[:], int32[:], int32[:]'], '(n),()->(n)', + target=self.target, nopython=True) + def foo(inp, n, out): + for i in range(inp.shape[0]): + out[i] = inp[i] * n[0] + + inp = np.arange(3 * 10, dtype=np.int32).reshape(10, 3) + # out = np.empty_like(inp) + out = foo(inp, 2) + + # verify result + self.assertPreciseEqual(inp * 2, out) + + def test_scalar_input_core_type(self): + def pyfunc(inp, n, out): + for i in range(inp.size): + out[i] = n * (inp[i] + 1) + + my_gufunc = guvectorize(['int32[:], int32, int32[:]'], + '(n),()->(n)', + target=self.target)(pyfunc) + + # test single core loop execution + arr = np.arange(10).astype(np.int32) + got = my_gufunc(arr, 2) + + expected = np.zeros_like(got) + pyfunc(arr, 2, expected) + + np.testing.assert_equal(got, expected) + + # test multiple core loop execution + arr = np.arange(20).astype(np.int32).reshape(10, 2) + got = my_gufunc(arr, 2) + + expected = np.zeros_like(got) + for ax in range(expected.shape[0]): + pyfunc(arr[ax], 2, expected[ax]) + + np.testing.assert_equal(got, expected) + + def test_scalar_input_core_type_error(self): + with self.assertRaises(TypeError) as raises: + @guvectorize(['int32[:], int32, int32[:]'], '(n),(n)->(n)', + target=self.target) + def pyfunc(a, b, c): + pass + self.assertEqual("scalar type int32 given for non scalar argument #2", + str(raises.exception)) + + def test_ndim_mismatch(self): + with self.assertRaises(TypeError) as raises: + @guvectorize(['int32[:], int32[:]'], '(m,n)->(n)', + target=self.target) + def pyfunc(a, b): + pass + self.assertEqual("type and shape signature mismatch for arg #1", + str(raises.exception)) + + +class TestGUVectorizeScalarParallel(TestGUVectorizeScalar): + _numba_parallel_test_ = False + target = 'parallel' + + +class TestGUVectorizePickling(TestCase): + def test_pickle_gufunc_non_dyanmic(self): + """Non-dynamic gufunc. + """ + @guvectorize(["f8,f8[:]"], "()->()") + def double(x, out): + out[:] = x * 2 + + # pickle + ser = pickle.dumps(double) + cloned = pickle.loads(ser) + + # attributes carried over + self.assertEqual(cloned._frozen, double._frozen) + self.assertEqual(cloned.identity, double.identity) + self.assertEqual(cloned.is_dynamic, double.is_dynamic) + self.assertEqual(cloned.gufunc_builder._sigs, + double.gufunc_builder._sigs) + # expected value of attributes + self.assertTrue(cloned._frozen) + + cloned.disable_compile() + self.assertTrue(cloned._frozen) + + # scalar version + self.assertPreciseEqual(double(0.5), cloned(0.5)) + # array version + arr = np.arange(10) + self.assertPreciseEqual(double(arr), cloned(arr)) + + def test_pickle_gufunc_dyanmic_null_init(self): + """Dynamic gufunc w/o prepopulating before pickling. + """ + @guvectorize("()->()", identity=1) + def double(x, out): + out[:] = x * 2 + + # pickle + ser = pickle.dumps(double) + cloned = pickle.loads(ser) + + # attributes carried over + self.assertEqual(cloned._frozen, double._frozen) + self.assertEqual(cloned.identity, double.identity) + self.assertEqual(cloned.is_dynamic, double.is_dynamic) + self.assertEqual(cloned.gufunc_builder._sigs, + double.gufunc_builder._sigs) + # expected value of attributes + self.assertFalse(cloned._frozen) + + # scalar version + expect = np.zeros(1) + got = np.zeros(1) + double(0.5, out=expect) + cloned(0.5, out=got) + self.assertPreciseEqual(expect, got) + # array version + arr = np.arange(10) + expect = np.zeros_like(arr) + got = np.zeros_like(arr) + double(arr, out=expect) + cloned(arr, out=got) + self.assertPreciseEqual(expect, got) + + def test_pickle_gufunc_dynamic_initialized(self): + """Dynamic gufunc prepopulated before pickling. + + Once unpickled, we disable compilation to verify that the gufunc + compilation state is carried over. + """ + @guvectorize("()->()", identity=1) + def double(x, out): + out[:] = x * 2 + + # prepopulate scalar + expect = np.zeros(1) + got = np.zeros(1) + double(0.5, out=expect) + # prepopulate array + arr = np.arange(10) + expect = np.zeros_like(arr) + got = np.zeros_like(arr) + double(arr, out=expect) + + # pickle + ser = pickle.dumps(double) + cloned = pickle.loads(ser) + + # attributes carried over + self.assertEqual(cloned._frozen, double._frozen) + self.assertEqual(cloned.identity, double.identity) + self.assertEqual(cloned.is_dynamic, double.is_dynamic) + self.assertEqual(cloned.gufunc_builder._sigs, + double.gufunc_builder._sigs) + # expected value of attributes + self.assertFalse(cloned._frozen) + + # disable compilation + cloned.disable_compile() + self.assertTrue(cloned._frozen) + # scalar version + expect = np.zeros(1) + got = np.zeros(1) + double(0.5, out=expect) + cloned(0.5, out=got) + self.assertPreciseEqual(expect, got) + # array version + expect = np.zeros_like(arr) + got = np.zeros_like(arr) + double(arr, out=expect) + cloned(arr, out=got) + self.assertPreciseEqual(expect, got) + + +class TestGUVectorizeJit(TestCase): + target = 'cpu' + + def check_add_gufunc(self, gufunc): + @jit(nopython=True) + def jit_add(x, y, res): + gufunc(x, y, res) + + x = np.arange(40, dtype='i8').reshape(4, 2, 5) + y = np.int32(100) + res = np.zeros_like(x) + jit_add(x, y, res) + self.assertPreciseEqual(res, x + y) + + def test_add_static(self): + @guvectorize('int64[:], int64, int64[:]', '(n),()->(n)', + target=self.target) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + def test_add_static_cast_args(self): + # cast the second argument from i32 -> i64 + @guvectorize('int64[:], int64, int64[:]', '(n),()->(n)', + target=self.target) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + def test_add_dynamic(self): + @guvectorize('(n),()->(n)', target=self.target) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + @unittest.expectedFailure + def test_object_mode(self): + @guvectorize('(n),()->(n)', target=self.target, forceobj=True) + def add(x, y, res): + for i in range(x.shape[0]): + res[i] = x[i] + y + + self.check_add_gufunc(add) + + def check_matmul(self, jit_func): + matrix_ct = 1001 + A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4) + B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5) + C = np.arange(matrix_ct * 2 * 5, dtype=np.float32).reshape(matrix_ct, 2, 5) + + jit_func(A, B, C) + Gold = np.matmul(A, B) + + np.testing.assert_allclose(C, Gold, rtol=1e-5, atol=1e-8) + + def test_njit_matmul_call(self): + + gufunc = guvectorize('(m,n),(n,p)->(m,p)', + target=self.target)(matmulcore) + + @jit(nopython=True) + def matmul_jit(A, B, C): + return gufunc(A, B, C) + + self.check_matmul(matmul_jit) + + def test_axpy(self): + gufunc = GUVectorize(axpy, '(),(),() -> ()', target=self.target, + is_dynamic=True) + + @jit(nopython=True) + def axpy_jit(a, x, y, out): + gufunc(a, x, y, out) + + x = np.arange(10, dtype=np.intp) + out = np.zeros_like(x) + axpy_jit(x, x, x, out) + self.assertPreciseEqual(out, x * x + x) + + def test_output_scalar(self): + + @guvectorize('(n),(m) -> ()') + def gufunc(x, y, res): + res[0] = x.sum() + y.sum() + + @jit(nopython=True) + def jit_func(x, y, res): + gufunc(x, y, res) + + x = np.arange(40, dtype='i8').reshape(4, 10) + y = np.arange(20, dtype='i8') + res = np.zeros(4, dtype='i8') + jit_func(x, y, res) + expected = np.zeros_like(res) + gufunc(x, y, expected) + self.assertPreciseEqual(res, expected) + + def test_input_scalar(self): + + @guvectorize('() -> ()') + def gufunc(x, res): + res[0] = x + 100 + + @jit(nopython=True) + def jit_func(x, res): + gufunc(x, res) + + x = np.arange(40, dtype='i8').reshape(5, 2, 4) + res = np.zeros_like(x) + jit_func(x, res) + expected = np.zeros_like(res) + gufunc(x, expected) + self.assertPreciseEqual(res, expected) + + def test_gufunc_ndim_mismatch(self): + signature = "(n, m), (n, n, n) -> (m), (n, n)" + @guvectorize(signature) + def bar(x, y, res, out): + res[0] = 123 + out[0] = 456 + + @jit(nopython=True) + def foo(x, y, res, out): + bar(x, y, res, out) + + N, M = 2, 3 + x = np.arange(N**2).reshape(N, N) + y = np.arange(N**3).reshape(N, N, N) + res = np.arange(M) + out = np.arange(N**2).reshape(N, N) + + # calling with a 1d array should result in an error + with self.assertRaises(TypingError) as raises: + x_ = np.arange(N * N) + foo(x_, y, res, out) + msg = ('bar: Input operand 0 does not have enough dimensions (has ' + f'1, gufunc core with signature {signature} requires 2)') + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + y_ = np.arange(N * N).reshape(N, N) + foo(x, y_, res, out) + msg = ('bar: Input operand 1 does not have enough dimensions (has ' + f'2, gufunc core with signature {signature} requires 3)') + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + res_ = np.array(3) + foo(x, y, res_, out) + msg = ('bar: Output operand 0 does not have enough dimensions (has ' + f'0, gufunc core with signature {signature} requires 1)') + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + out_ = np.arange(N) + foo(x, y, res, out_) + msg = ('bar: Output operand 1 does not have enough dimensions (has ' + f'1, gufunc core with signature {signature} requires 2)') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_inner_dimensions(self): + @guvectorize('(n),(n) -> ()') + def bar(x, y, res): + res[0] = 123 + + @jit(nopython=True) + def foo(x, y, res): + bar(x, y, res) + + N = 2 + M = 3 + x = np.empty((5, 3, N)) + y = np.empty((M,)) + res = np.zeros((5, 3)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + bar(x, y, res) + msg = ('Input operand 1 has a mismatch in its core dimension 0, with ' + 'gufunc signature (n),(n) -> () (size 3 is different from 2)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res) + msg = ('Operand has a mismatch in one of its core dimensions') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_inner_dimensions_input_output(self): + @guvectorize('(n),(m) -> (n)') + def bar(x, y, res): + res[0] = 123 + + @jit(nopython=True) + def foo(x, y, res): + bar(x, y, res) + + N = 2 + M = 3 + x = np.empty((5, 3, N)) + y = np.empty((M,)) + res = np.zeros((5, 3)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + bar(x, y, res) + msg = ('Output operand 0 has a mismatch in its core dimension 0, with ' + 'gufunc signature (n),(m) -> (n) (size 3 is different from 2)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res) + msg = ('Operand has a mismatch in one of its core dimensions') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_inner_dimensions_output(self): + @guvectorize('(n),(m) -> (m),(m)') + def bar(x, y, res, out): + res[0] = 123 + out[0] = 456 + + @jit(nopython=True) + def foo(x, y, res, out): + bar(x, y, res, out) + + N = 2 + M = 3 + x = np.empty((N,)) + y = np.empty((M,)) + res = np.zeros((N,)) + out = np.zeros((M,)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + bar(x, y, res, out) + msg = ('Output operand 0 has a mismatch in its core dimension 0, with ' + 'gufunc signature (n),(m) -> (m),(m) (size 2 is different from 3)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res, out) + msg = ('Operand has a mismatch in one of its core dimensions') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_loop_shape(self): + @guvectorize('(n),(n) -> ()') + def bar(x, y, res): + res[0] = 123 + + @jit(nopython=True) + def foo(x, y, res): + bar(x, y, res) + + N = 2 + x = np.empty((1, 5, 3, N,)) + y = np.empty((5, 3, N,)) + res = np.zeros((5, 3)) + + with self.assertRaises(ValueError) as raises: + foo(x, y, res) + msg = ('Loop and array shapes are incompatible') + self.assertIn(msg, str(raises.exception)) + + def test_mismatch_loop_shape_2(self): + @guvectorize('(n),(n) -> (), (n)') + def gufunc(x, y, res, out): + res[0] = x.sum() + for i in range(x.shape[0]): + out[i] += x[i] + y.sum() + + @jit + def jit_func(x, y, res, out): + gufunc(x, y, res, out) + + N = 2 + + x = np.arange(4*N).reshape((4, N)) + y = np.arange(N) + res = np.empty((3,)) + out = np.zeros((3, N)) + + # ensure that NumPy raises an exception + with self.assertRaises(ValueError) as np_raises: + gufunc(x, y, res, out) + msg = ('operands could not be broadcast together with remapped shapes ' + '[original->remapped]: (4,2)->(4,newaxis) (2,)->() ' + '(3,)->(3,newaxis) (3,2)->(3,2) and requested shape (2)') + self.assertIn(msg, str(np_raises.exception)) + + with self.assertRaises(ValueError) as raises: + jit_func(x, y, res, out) + msg = ('Loop and array shapes are incompatible') + self.assertIn(msg, str(raises.exception)) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_env_variable.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_env_variable.py new file mode 100644 index 0000000000000000000000000000000000000000..7d11692ad34250e1056121e570d4c18d86f93183 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_env_variable.py @@ -0,0 +1,38 @@ +from numba.np.ufunc.parallel import get_thread_count +from os import environ as env +from numba.core import config +import unittest + + +class TestParallelEnvVariable(unittest.TestCase): + """ + Tests environment variables related to the underlying "parallel" + functions for npyufuncs. + """ + + _numba_parallel_test_ = False + + def test_num_threads_variable(self): + """ + Tests the NUMBA_NUM_THREADS env variable behaves as expected. + """ + key = 'NUMBA_NUM_THREADS' + current = str(getattr(env, key, config.NUMBA_NUM_THREADS)) + threads = "3154" + env[key] = threads + try: + config.reload_config() + except RuntimeError as e: + # This test should fail if threads have already been launched + self.assertIn("Cannot set NUMBA_NUM_THREADS", e.args[0]) + else: + self.assertEqual(threads, str(get_thread_count())) + self.assertEqual(threads, str(config.NUMBA_NUM_THREADS)) + finally: + # reset the env variable/set to default. Should not fail even if + # threads are launched because the value is the same. + env[key] = current + config.reload_config() + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_low_work.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_low_work.py new file mode 100644 index 0000000000000000000000000000000000000000..cab4d42749f643ca1b38f74f3f6ef400661c5023 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_low_work.py @@ -0,0 +1,44 @@ +""" +There was a deadlock problem when work count is smaller than number of threads. +""" + +import numpy as np + +from numba import float32, float64, int32, uint32 +from numba.np.ufunc import Vectorize +import unittest + + +def vector_add(a, b): + return a + b + + +class TestParallelLowWorkCount(unittest.TestCase): + + _numba_parallel_test_ = False + + def test_low_workcount(self): + # build parallel native code ufunc + pv = Vectorize(vector_add, target='parallel') + for ty in (int32, uint32, float32, float64): + pv.add(ty(ty, ty)) + para_ufunc = pv.build_ufunc() + + # build python ufunc + np_ufunc = np.vectorize(vector_add) + + # test it out + def test(ty): + data = np.arange(1).astype(ty) # just one item + result = para_ufunc(data, data) + gold = np_ufunc(data, data) + np.testing.assert_allclose(gold, result) + + test(np.double) + test(np.float32) + test(np.int32) + test(np.uint32) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py new file mode 100644 index 0000000000000000000000000000000000000000..2237122291960d7e437875bc9d55b96bed4a1d33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_parallel_ufunc_issues.py @@ -0,0 +1,128 @@ +import time +import ctypes + +import numpy as np + +from numba.tests.support import captured_stdout +from numba import vectorize, guvectorize +import unittest + + +class TestParUfuncIssues(unittest.TestCase): + + _numba_parallel_test_ = False + + def test_thread_response(self): + """ + Related to #89. + This does not test #89 but tests the fix for it. + We want to make sure the worker threads can be used multiple times + and with different time gap between each execution. + """ + + @vectorize('float64(float64, float64)', target='parallel') + def fnv(a, b): + return a + b + + sleep_time = 1 # 1 second + while sleep_time > 0.00001: # 10us + time.sleep(sleep_time) + a = b = np.arange(10**5) + np.testing.assert_equal(a + b, fnv(a, b)) + # Reduce sleep time + sleep_time /= 2 + + def test_gil_reacquire_deadlock(self): + """ + Testing issue #1998 due to GIL reacquiring + """ + # make a ctypes callback that requires the GIL + proto = ctypes.CFUNCTYPE(None, ctypes.c_int32) + characters = 'abcdefghij' + + def bar(x): + print(characters[x]) + + cbar = proto(bar) + + # our unit under test + @vectorize(['int32(int32)'], target='parallel', nopython=True) + def foo(x): + print(x % 10) # this reacquires the GIL + cbar(x % 10) # this reacquires the GIL + return x * 2 + + # Numpy ufunc has a heuristic to determine whether to release the GIL + # during execution. Small input size (10) seems to not release the GIL. + # Large input size (1000) seems to release the GIL. + for nelem in [1, 10, 100, 1000]: + # inputs + a = np.arange(nelem, dtype=np.int32) + acopy = a.copy() + # run and capture stdout + with captured_stdout() as buf: + got = foo(a) + stdout = buf.getvalue() + buf.close() + # process outputs from print + got_output = sorted(map(lambda x: x.strip(), stdout.splitlines())) + # build expected output + expected_output = [str(x % 10) for x in range(nelem)] + expected_output += [characters[x % 10] for x in range(nelem)] + expected_output = sorted(expected_output) + # verify + self.assertEqual(got_output, expected_output) + np.testing.assert_equal(got, 2 * acopy) + + + +class TestParGUfuncIssues(unittest.TestCase): + + _numba_parallel_test_ = False + + def test_gil_reacquire_deadlock(self): + """ + Testing similar issue to #1998 due to GIL reacquiring for Gufunc + """ + # make a ctypes callback that requires the GIL + proto = ctypes.CFUNCTYPE(None, ctypes.c_int32) + characters = 'abcdefghij' + + def bar(x): + print(characters[x]) + + cbar = proto(bar) + + # our unit under test + @guvectorize(['(int32, int32[:])'], "()->()", + target='parallel', nopython=True) + def foo(x, out): + print(x % 10) # this reacquires the GIL + cbar(x % 10) # this reacquires the GIL + out[0] = x * 2 + + # Numpy ufunc has a heuristic to determine whether to release the GIL + # during execution. Small input size (10) seems to not release the GIL. + # Large input size (1000) seems to release the GIL. + for nelem in [1, 10, 100, 1000]: + # inputs + a = np.arange(nelem, dtype=np.int32) + acopy = a.copy() + # run and capture stdout + with captured_stdout() as buf: + got = foo(a) + stdout = buf.getvalue() + buf.close() + # process outputs from print + got_output = sorted(map(lambda x: x.strip(), stdout.splitlines())) + # build expected output + expected_output = [str(x % 10) for x in range(nelem)] + expected_output += [characters[x % 10] for x in range(nelem)] + expected_output = sorted(expected_output) + # verify + self.assertEqual(got_output, expected_output) + np.testing.assert_equal(got, 2 * acopy) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_ufunc.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_ufunc.py new file mode 100644 index 0000000000000000000000000000000000000000..16020d8ebc4f8f7c2ecc9ff492d490ae7caa90cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_ufunc.py @@ -0,0 +1,173 @@ +import numpy as np + +from numba import float32, jit, njit +from numba.np.ufunc import Vectorize +from numba.core.errors import TypingError +from numba.tests.support import TestCase +import unittest + + +dtype = np.float32 +a = np.arange(80, dtype=dtype).reshape(8, 10) +b = a.copy() +c = a.copy(order='F') +d = np.arange(16 * 20, dtype=dtype).reshape(16, 20)[::2, ::2] + + +def add(a, b): + return a + b + + +def add_multiple_args(a, b, c, d): + return a + b + c + d + + +def gufunc_add(a, b): + result = 0.0 + for i in range(a.shape[0]): + result += a[i] * b[i] + + return result + + +def ufunc_reduce(ufunc, arg): + for i in range(arg.ndim): + arg = ufunc.reduce(arg) + return arg + + +vectorizers = [ + Vectorize, + # ParallelVectorize, + # StreamVectorize, + # CudaVectorize, + # GUFuncVectorize, +] + + +class TestUFuncs(TestCase): + + def _test_ufunc_attributes(self, cls, a, b, *args): + "Test ufunc attributes" + vectorizer = cls(add, *args) + vectorizer.add(float32(float32, float32)) + ufunc = vectorizer.build_ufunc() + + info = (cls, a.ndim) + self.assertPreciseEqual(ufunc(a, b), a + b, msg=info) + self.assertPreciseEqual(ufunc_reduce(ufunc, a), np.sum(a), msg=info) + self.assertPreciseEqual(ufunc.accumulate(a), np.add.accumulate(a), + msg=info) + self.assertPreciseEqual(ufunc.outer(a, b), np.add.outer(a, b), msg=info) + + def _test_broadcasting(self, cls, a, b, c, d): + "Test multiple args" + vectorizer = cls(add_multiple_args) + vectorizer.add(float32(float32, float32, float32, float32)) + ufunc = vectorizer.build_ufunc() + + info = (cls, a.shape) + self.assertPreciseEqual(ufunc(a, b, c, d), a + b + c + d, msg=info) + + def test_ufunc_attributes(self): + for v in vectorizers: # 1D + self._test_ufunc_attributes(v, a[0], b[0]) + for v in vectorizers: # 2D + self._test_ufunc_attributes(v, a, b) + for v in vectorizers: # 3D + self._test_ufunc_attributes(v, a[:, np.newaxis, :], + b[np.newaxis, :, :]) + + def test_broadcasting(self): + for v in vectorizers: # 1D + self._test_broadcasting(v, a[0], b[0], c[0], d[0]) + for v in vectorizers: # 2D + self._test_broadcasting(v, a, b, c, d) + for v in vectorizers: # 3D + self._test_broadcasting(v, a[:, np.newaxis, :], b[np.newaxis, :, :], + c[:, np.newaxis, :], d[np.newaxis, :, :]) + + def test_implicit_broadcasting(self): + for v in vectorizers: + vectorizer = v(add) + vectorizer.add(float32(float32, float32)) + ufunc = vectorizer.build_ufunc() + + broadcasting_b = b[np.newaxis, :, np.newaxis, np.newaxis, :] + self.assertPreciseEqual(ufunc(a, broadcasting_b), + a + broadcasting_b) + + def test_ufunc_exception_on_write_to_readonly(self): + z = np.ones(10) + z.flags.writeable = False # flip write bit + + tests = [] + expect = "ufunc 'sin' called with an explicit output that is read-only" + tests.append((jit(nopython=True), TypingError, expect)) + tests.append((jit(forceobj=True), ValueError, + "output array is read-only")) + + for dec, exc, msg in tests: + def test(x): + a = np.ones(x.shape, x.dtype) # do not copy RO attribute from x + np.sin(a, x) + + with self.assertRaises(exc) as raises: + dec(test)(z) + + self.assertIn(msg, str(raises.exception)) + + def test_optional_type_handling(self): + # Tests ufunc compilation with Optional type + + @njit + def inner(x, y): + if y > 2: + z = None + else: + z = np.ones(4) + return np.add(x, z) + + # This causes `z` to be np.ones(4) at runtime, success + self.assertPreciseEqual(inner(np.arange(4), 1), + np.arange(1, 5).astype(np.float64)) + + with self.assertRaises(TypeError) as raises: + # This causes `z` to be None at runtime, TypeError raised on the + # type cast of the Optional. + inner(np.arange(4), 3) + + msg = "expected array(float64, 1d, C), got None" + self.assertIn(msg, str(raises.exception)) + + +class TestUFuncsMisc(TestCase): + # Test for miscellaneous ufunc issues + + def test_exp2(self): + # See issue #8898, and TargetLibraryInfo based fix in #9336 + @njit + def foo(x): + return np.exp2(x) + + for ty in (np.int8, np.uint16): + x = ty(2) + expected = foo.py_func(x) + got = foo(x) + self.assertPreciseEqual(expected, got) + + def test_log2(self): + # See issue #8898, and TargetLibraryInfo based fix in #9336 + @njit + def foo(x): + return np.log2(x) + + for ty in (np.int8, np.uint16): + x = ty(2) + expected = foo.py_func(x) + got = foo(x) + self.assertPreciseEqual(expected, got) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_ufuncbuilding.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_ufuncbuilding.py new file mode 100644 index 0000000000000000000000000000000000000000..df6e37dcd5c69e122c87629e410ee2ee38b2acec --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_ufuncbuilding.py @@ -0,0 +1,471 @@ +import pickle +import unittest + +import numpy as np +from numpy.testing import assert_array_equal + +from numba.np.ufunc.ufuncbuilder import GUFuncBuilder +from numba import vectorize, guvectorize +from numba.np.ufunc import PyUFunc_One +from numba.np.ufunc.dufunc import DUFunc as UFuncBuilder +from numba.tests.support import tag, TestCase +from numba.core import config + + +class TestUfuncBuilding(TestCase): + + def test_basic_ufunc(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + ufb = UFuncBuilder(add) + cres = ufb.add("int32(int32, int32)") + self.assertFalse(cres.objectmode) + cres = ufb.add("int64(int64, int64)") + self.assertFalse(cres.objectmode) + ufunc = ufb.build_ufunc() + + def check(a): + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + self.assertEqual(b.dtype, a.dtype) + + a = np.arange(12, dtype='int32') + check(a) + # Non-contiguous dimension + a = a[::2] + check(a) + a = a.reshape((2, 3)) + check(a) + + # Metadata + self.assertEqual(ufunc.__name__, "add") + self.assertIn("An addition", ufunc.__doc__) + + def test_ufunc_struct(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + ufb = UFuncBuilder(add) + cres = ufb.add("complex64(complex64, complex64)") + self.assertFalse(cres.objectmode) + ufunc = ufb.build_ufunc() + + def check(a): + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + self.assertEqual(b.dtype, a.dtype) + + a = np.arange(12, dtype='complex64') + 1j + check(a) + # Non-contiguous dimension + a = a[::2] + check(a) + a = a.reshape((2, 3)) + check(a) + + def test_ufunc_forceobj(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + ufb = UFuncBuilder(add, targetoptions={'forceobj': True}) + cres = ufb.add("int32(int32, int32)") + self.assertTrue(cres.objectmode) + ufunc = ufb.build_ufunc() + + a = np.arange(10, dtype='int32') + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + + def test_nested_call(self): + """ + Check nested call to an implicitly-typed ufunc. + """ + from numba.tests.npyufunc.ufuncbuilding_usecases import outer + builder = UFuncBuilder(outer, + targetoptions={'nopython': True}) + builder.add("(int64, int64)") + ufunc = builder.build_ufunc() + self.assertEqual(ufunc(-1, 3), 2) + + def test_nested_call_explicit(self): + """ + Check nested call to an explicitly-typed ufunc. + """ + from numba.tests.npyufunc.ufuncbuilding_usecases import outer_explicit + builder = UFuncBuilder(outer_explicit, + targetoptions={'nopython': True}) + builder.add("(int64, int64)") + ufunc = builder.build_ufunc() + self.assertEqual(ufunc(-1, 3), 2) + + +class TestUfuncBuildingJitDisabled(TestUfuncBuilding): + + def setUp(self): + self.old_disable_jit = config.DISABLE_JIT + config.DISABLE_JIT = False + + def tearDown(self): + config.DISABLE_JIT = self.old_disable_jit + + +class TestGUfuncBuilding(TestCase): + + def test_basic_gufunc(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd + gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)") + cres = gufb.add("void(int32[:,:], int32[:,:], int32[:,:])") + self.assertFalse(cres.objectmode) + ufunc = gufb.build_ufunc() + + a = np.arange(10, dtype="int32").reshape(2, 5) + b = ufunc(a, a) + + self.assertPreciseEqual(a + a, b) + self.assertEqual(b.dtype, np.dtype('int32')) + + # Metadata + self.assertEqual(ufunc.__name__, "guadd") + self.assertIn("A generalized addition", ufunc.__doc__) + + def test_gufunc_struct(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd + gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)") + cres = gufb.add("void(complex64[:,:], complex64[:,:], complex64[:,:])") + self.assertFalse(cres.objectmode) + ufunc = gufb.build_ufunc() + + a = np.arange(10, dtype="complex64").reshape(2, 5) + 1j + b = ufunc(a, a) + + self.assertPreciseEqual(a + a, b) + + def test_gufunc_struct_forceobj(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd + gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)", + targetoptions=dict(forceobj=True)) + cres = gufb.add("void(complex64[:,:], complex64[:,:], complex64[:," + ":])") + self.assertTrue(cres.objectmode) + ufunc = gufb.build_ufunc() + + a = np.arange(10, dtype="complex64").reshape(2, 5) + 1j + b = ufunc(a, a) + + self.assertPreciseEqual(a + a, b) + + +class TestGUfuncBuildingJitDisabled(TestGUfuncBuilding): + + def setUp(self): + self.old_disable_jit = config.DISABLE_JIT + config.DISABLE_JIT = False + + def tearDown(self): + config.DISABLE_JIT = self.old_disable_jit + + +class TestVectorizeDecor(TestCase): + + _supported_identities = [0, 1, None, "reorderable"] + + def test_vectorize(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + ufunc = vectorize(['int32(int32, int32)'])(add) + a = np.arange(10, dtype='int32') + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + + def test_vectorize_objmode(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + ufunc = vectorize(['int32(int32, int32)'], forceobj=True)(add) + a = np.arange(10, dtype='int32') + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + + def test_vectorize_bool_return(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import equals + ufunc = vectorize(['bool_(int32, int32)'])(equals) + a = np.arange(10, dtype='int32') + r = ufunc(a,a) + self.assertPreciseEqual(r, np.ones(r.shape, dtype=np.bool_)) + + def test_vectorize_identity(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + sig = 'int32(int32, int32)' + for identity in self._supported_identities: + ufunc = vectorize([sig], identity=identity)(add) + expected = None if identity == 'reorderable' else identity + self.assertEqual(ufunc.identity, expected) + # Default value is None + ufunc = vectorize([sig])(add) + self.assertIs(ufunc.identity, None) + # Invalid values + with self.assertRaises(ValueError): + vectorize([sig], identity='none')(add) + with self.assertRaises(ValueError): + vectorize([sig], identity=2)(add) + + def test_vectorize_no_args(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add + a = np.linspace(0,1,10) + b = np.linspace(1,2,10) + ufunc = vectorize(add) + self.assertPreciseEqual(ufunc(a,b), a + b) + ufunc2 = vectorize(add) + c = np.empty(10) + ufunc2(a, b, c) + self.assertPreciseEqual(c, a + b) + + def test_vectorize_only_kws(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import mul + a = np.linspace(0,1,10) + b = np.linspace(1,2,10) + ufunc = vectorize(identity=PyUFunc_One, nopython=True)(mul) + self.assertPreciseEqual(ufunc(a,b), a * b) + + def test_vectorize_output_kwarg(self): + """ + Passing the output array as a keyword argument (issue #1867). + """ + def check(ufunc): + a = np.arange(10, 16, dtype='int32') + out = np.zeros_like(a) + got = ufunc(a, a, out=out) + self.assertIs(got, out) + self.assertPreciseEqual(out, a + a) + with self.assertRaises(TypeError): + ufunc(a, a, zzz=out) + + # With explicit sigs + from numba.tests.npyufunc.ufuncbuilding_usecases import add + ufunc = vectorize(['int32(int32, int32)'], nopython=True)(add) + check(ufunc) + # With implicit sig + ufunc = vectorize(nopython=True)(add) + check(ufunc) # compiling + check(ufunc) # after compiling + + def test_guvectorize(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd + ufunc = guvectorize(['(int32[:,:], int32[:,:], int32[:,:])'], + "(x,y),(x,y)->(x,y)")(guadd) + a = np.arange(10, dtype='int32').reshape(2, 5) + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + + def test_guvectorize_no_output(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd + ufunc = guvectorize(['(int32[:,:], int32[:,:], int32[:,:])'], + "(x,y),(x,y),(x,y)")(guadd) + a = np.arange(10, dtype='int32').reshape(2, 5) + out = np.zeros_like(a) + ufunc(a, a, out) + self.assertPreciseEqual(a + a, out) + + def test_guvectorize_objectmode(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd_obj + ufunc = guvectorize(['(int32[:,:], int32[:,:], int32[:,:])'], + "(x,y),(x,y)->(x,y)", forceobj=True)(guadd_obj) + a = np.arange(10, dtype='int32').reshape(2, 5) + b = ufunc(a, a) + self.assertPreciseEqual(a + a, b) + + def test_guvectorize_scalar_objectmode(self): + """ + Test passing of scalars to object mode gufuncs. + """ + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd_scalar_obj + ufunc = guvectorize(['(int32[:,:], int32, int32[:,:])'], + "(x,y),()->(x,y)", forceobj=True)(guadd_scalar_obj) + a = np.arange(10, dtype='int32').reshape(2, 5) + b = ufunc(a, 3) + self.assertPreciseEqual(a + 3, b) + + def test_guvectorize_error_in_objectmode(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guerror, \ + MyException + ufunc = guvectorize(['(int32[:,:], int32[:,:], int32[:,:])'], + "(x,y),(x,y)->(x,y)", forceobj=True)(guerror) + a = np.arange(10, dtype='int32').reshape(2, 5) + with self.assertRaises(MyException): + ufunc(a, a) + + def test_guvectorize_identity(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import add, guadd + args = (['(int32[:,:], int32[:,:], int32[:,:])'], "(x,y),(x,y)->(x,y)") + for identity in self._supported_identities: + ufunc = guvectorize(*args, identity=identity)(guadd) + expected = None if identity == 'reorderable' else identity + self.assertEqual(ufunc.identity, expected) + # Default value is None + ufunc = guvectorize(*args)(guadd) + self.assertIs(ufunc.identity, None) + # Invalid values + with self.assertRaises(ValueError): + guvectorize(*args, identity='none')(add) + with self.assertRaises(ValueError): + guvectorize(*args, identity=2)(add) + + def test_guvectorize_invalid_layout(self): + from numba.tests.npyufunc.ufuncbuilding_usecases import guadd + sigs = ['(int32[:,:], int32[:,:], int32[:,:])'] + # Syntax error + with self.assertRaises(ValueError) as raises: + guvectorize(sigs, ")-:")(guadd) + self.assertIn("bad token in signature", str(raises.exception)) + # Output shape can't be inferred from inputs + with self.assertRaises(NameError) as raises: + guvectorize(sigs, "(x,y),(x,y)->(x,z,v)")(guadd) + self.assertEqual(str(raises.exception), + "undefined output symbols: v,z") + # Arrow but no outputs + with self.assertRaises(ValueError) as raises: + guvectorize(sigs, "(x,y),(x,y),(x,y)->")(guadd) + # (error message depends on Numpy version) + + +class NEP13Array: + """https://numpy.org/neps/nep-0013-ufunc-overrides.html""" + def __init__(self, array): + self.array = array + + def __array__(self): + return self.array + + def tolist(self): + return self.array.tolist() + + def __array_ufunc__(self, ufunc, method, *args, **kwargs): + if method != "__call__": + return NotImplemented + + return NEP13Array(ufunc(*[np.asarray(x) for x in args], **kwargs)) + + +class FakeDaskArray: + """This class defines both the NEP13 protocol and the dask collection protocol + (https://docs.dask.org/en/stable/custom-collections.html). This is a stand-in for + dask array, dask dataframe, and for any wrapper around them (e.g. xarray or pint). + """ + + def __init__(self, array): + self.array = array + + def __array_ufunc__(self, ufunc, method, *args, **kwargs): + if method != "__call__": + return NotImplemented + + # Simulate sending the ufunc over the network and applying it on a remote worker + ufunc = pickle.loads(pickle.dumps(ufunc)) + args = [x.array if isinstance(x, FakeDaskArray) else x for x in args] + return FakeDaskArray(ufunc(*args, **kwargs)) + + def _dask_method(self, *args, **kwargs): + raise AssertionError("called potentially expensive method") + + __array__ = _dask_method + __dask_graph__ = _dask_method + __dask_keys__ = _dask_method + __dask_optimize__ = _dask_method + __dask_postcompute__ = _dask_method + __dask_postpersist__ = _dask_method + __dask_scheduler__ = _dask_method + __dask_tokenize__ = _dask_method + compute = _dask_method + persist = _dask_method + visualize = _dask_method + + +class TestNEP13WithoutSignature(TestCase): + + def test_all(self): + + # note: no signatures specified + @vectorize(nopython=True) + def new_ufunc(hundreds, tens, ones): + return 100*hundreds + 10*tens + ones + + # give it integers + a = np.array([1, 2, 3], dtype=np.int64) + b = np.array([4, 5, 6], dtype=np.int64) + c = np.array([7, 8, 9], dtype=np.int64) + + all_np = new_ufunc(a, b, c) + self.assertIsInstance(all_np, np.ndarray) + self.assertEqual(all_np.tolist(), [147, 258, 369]) + + nep13_1 = new_ufunc(NEP13Array(a), b, c) + self.assertIsInstance(nep13_1, NEP13Array) + self.assertEqual(nep13_1.tolist(), [147, 258, 369]) + + nep13_2 = new_ufunc(a, NEP13Array(b), c) + self.assertIsInstance(nep13_2, NEP13Array) + self.assertEqual(nep13_2.tolist(), [147, 258, 369]) + + nep13_3 = new_ufunc(a, b, NEP13Array(c)) + self.assertIsInstance(nep13_3, NEP13Array) + self.assertEqual(nep13_3.tolist(), [147, 258, 369]) + + # give it floats + a = np.array([1.1, 2.2, 3.3], dtype=np.float64) + b = np.array([4.4, 5.5, 6.6], dtype=np.float64) + c = np.array([7.7, 8.8, 9.9], dtype=np.float64) + + all_np = new_ufunc(a, b, c) + self.assertIsInstance(all_np, np.ndarray) + self.assertEqual(all_np.tolist(), [161.7, 283.8, 405.9]) + + nep13_1 = new_ufunc(NEP13Array(a), b, c) + self.assertIsInstance(nep13_1, NEP13Array) + self.assertEqual(nep13_1.tolist(), [161.7, 283.8, 405.9]) + + nep13_2 = new_ufunc(a, NEP13Array(b), c) + self.assertIsInstance(nep13_2, NEP13Array) + self.assertEqual(nep13_2.tolist(), [161.7, 283.8, 405.9]) + + nep13_3 = new_ufunc(a, b, NEP13Array(c)) + self.assertIsInstance(nep13_3, NEP13Array) + self.assertEqual(nep13_3.tolist(), [161.7, 283.8, 405.9]) + + +class TestDask(unittest.TestCase): + """Test that numba ufuncs are compatible with dask collections and wrappers around + dask (e.g. xarray or pint) and that they can be serialized, sent over the network, + deserialized on a different host and applied remotely. + """ + + def test_dask_array(self): + a = FakeDaskArray(np.arange(4, dtype=np.float64)) + expect = np.arange(4, dtype=np.float64) * 2 + + @vectorize(["f8(f8)"]) + def double_static_vectorize(x): + return x * 2 + + @vectorize() + def double_dynamic_vectorize(x): + return x * 2 + + @guvectorize(["f8,f8[:]"], "()->()") + def double_guvectorize(x, out): + out[:] = x * 2 + + for func in ( + double_static_vectorize, + double_dynamic_vectorize, + double_guvectorize, + ): + with self.subTest(func): + b = func(a) + assert isinstance(b, FakeDaskArray) + assert_array_equal(b.array, expect) + + +class TestVectorizeDecorJitDisabled(TestVectorizeDecor): + + def setUp(self): + self.old_disable_jit = config.DISABLE_JIT + config.DISABLE_JIT = False + + def tearDown(self): + config.DISABLE_JIT = self.old_disable_jit + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_update_inplace.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_update_inplace.py new file mode 100644 index 0000000000000000000000000000000000000000..97bc39226d3f0f7e8021dd9be993666a5b9ec3b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_update_inplace.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +import unittest + +import numpy as np +from numba import guvectorize +from numba.tests.support import TestCase + + +def py_replace_2nd(x_t, y_1): + for t in range(0, x_t.shape[0], 2): + x_t[t] = y_1[0] + + +def py_update_3(x0_t, x1_t, x2_t, y_1): + for t in range(0, x0_t.shape[0]): + x0_t[t] = y_1[0] + x1_t[t] = 2 * y_1[0] + x2_t[t] = 3 * y_1[0] + + +class TestUpdateInplace(TestCase): + + def _run_test_for_gufunc(self, gufunc, py_func, expect_f4_to_pass=True, + z=2): + for dtype, expect_to_pass in [('f8', True), ('f4', expect_f4_to_pass)]: + inputs = [np.zeros(10, dtype) for _ in range(gufunc.nin - 1)] + ex_inputs = [x_t.copy() for x_t in inputs] + + gufunc(*inputs, z) + py_func(*ex_inputs, np.array([z])) + + for i, (x_t, ex_x_t) in enumerate(zip(inputs, ex_inputs)): + if expect_to_pass: + np.testing.assert_equal(x_t, ex_x_t, err_msg='input %s' % i) + else: + self.assertFalse((x_t == ex_x_t).all(), msg='input %s' % i) + + def test_update_inplace(self): + # test without writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True)(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd, + expect_f4_to_pass=False) + + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,))(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + # test with writable_args as strings + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', nopython=True, + writable_args=('x_t',))(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + def test_update_inplace_with_cache(self): + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + cache=True)(py_replace_2nd) + # 2nd time it is loaded from cache + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + cache=True)(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + def test_update_inplace_parallel(self): + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + target='parallel')(py_replace_2nd) + self._run_test_for_gufunc(gufunc, py_replace_2nd) + + def test_update_inplace_3(self): + # test without writable_args + gufunc = guvectorize(['void(f8[:], f8[:], f8[:], f8[:])'], + '(t),(t),(t),()', + nopython=True)(py_update_3) + self._run_test_for_gufunc(gufunc, py_update_3, expect_f4_to_pass=False) + + # test with writable_args + gufunc = guvectorize(['void(f8[:], f8[:], f8[:], f8[:])'], + '(t),(t),(t),()', nopython=True, + writable_args=(0, 1, 2))(py_update_3) + self._run_test_for_gufunc(gufunc, py_update_3) + + # test with writable_args as mix of strings and ints + gufunc = guvectorize(['void(f8[:], f8[:], f8[:], f8[:])'], + '(t),(t),(t),()', nopython=True, + writable_args=('x0_t', 'x1_t', 2))(py_update_3) + self._run_test_for_gufunc(gufunc, py_update_3) + + def test_exceptions(self): + # check that len(writable_args) <= nin + with self.assertRaises(ValueError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', nopython=True, + writable_args=(0, 1, 2, 5))(py_replace_2nd) + + # check that all values in writable_args are between 0 and nin + with self.assertRaises(ValueError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(5,))(py_replace_2nd) + + with self.assertRaises(ValueError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(-1,))(py_replace_2nd) + + # check that exception is raised when passing non-existing argument name + with self.assertRaises(RuntimeError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=('z_t',))(py_replace_2nd) + + # writable_args are not supported for target='cuda' + with self.assertRaises(TypeError): + guvectorize(['void(f8[:], f8[:])'], '(t),()', + nopython=True, writable_args=(0,), + target='cuda')(py_replace_2nd) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_vectorize_decor.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_vectorize_decor.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb984d8c04a07c12b0d690a2003b0a61defd058 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/test_vectorize_decor.py @@ -0,0 +1,151 @@ +import math + +import numpy as np + +from numba import int32, uint32, float32, float64, jit, vectorize +from numba.tests.support import tag, CheckWarningsMixin +import unittest + + +pi = math.pi + + +def sinc(x): + if x == 0.0: + return 1.0 + else: + return math.sin(x * pi) / (pi * x) + +def scaled_sinc(x, scale): + if x == 0.0: + return scale + else: + return scale * (math.sin(x * pi) / (pi * x)) + +def vector_add(a, b): + return a + b + + +class BaseVectorizeDecor(object): + target = None + wrapper = None + funcs = { + 'func1': sinc, + 'func2': scaled_sinc, + 'func3': vector_add, + } + + @classmethod + def _run_and_compare(cls, func, sig, A, *args, **kwargs): + if cls.wrapper is not None: + func = cls.wrapper(func) + numba_func = vectorize(sig, target=cls.target)(func) + numpy_func = np.vectorize(func) + result = numba_func(A, *args) + gold = numpy_func(A, *args) + np.testing.assert_allclose(result, gold, **kwargs) + + def test_1(self): + sig = ['float64(float64)', 'float32(float32)'] + func = self.funcs['func1'] + A = np.arange(100, dtype=np.float64) + self._run_and_compare(func, sig, A) + + def test_2(self): + sig = [float64(float64), float32(float32)] + func = self.funcs['func1'] + A = np.arange(100, dtype=np.float64) + self._run_and_compare(func, sig, A) + + def test_3(self): + sig = ['float64(float64, uint32)'] + func = self.funcs['func2'] + A = np.arange(100, dtype=np.float64) + scale = np.uint32(3) + self._run_and_compare(func, sig, A, scale, atol=1e-8) + + def test_4(self): + sig = [ + int32(int32, int32), + uint32(uint32, uint32), + float32(float32, float32), + float64(float64, float64), + ] + func = self.funcs['func3'] + A = np.arange(100, dtype=np.float64) + self._run_and_compare(func, sig, A, A) + A = A.astype(np.float32) + self._run_and_compare(func, sig, A, A) + A = A.astype(np.int32) + self._run_and_compare(func, sig, A, A) + A = A.astype(np.uint32) + self._run_and_compare(func, sig, A, A) + + +class TestCPUVectorizeDecor(unittest.TestCase, BaseVectorizeDecor): + target = 'cpu' + + +class TestParallelVectorizeDecor(unittest.TestCase, BaseVectorizeDecor): + _numba_parallel_test_ = False + target = 'parallel' + + +class TestCPUVectorizeJitted(unittest.TestCase, BaseVectorizeDecor): + target = 'cpu' + wrapper = jit(nopython=True) + + +class BaseVectorizeNopythonArg(unittest.TestCase, CheckWarningsMixin): + """ + Test passing the nopython argument to the vectorize decorator. + """ + def _test_target_nopython(self, target, warnings, with_sig=True): + a = np.array([2.0], dtype=np.float32) + b = np.array([3.0], dtype=np.float32) + sig = [float32(float32, float32)] + args = with_sig and [sig] or [] + with self.check_warnings(warnings): + f = vectorize(*args, target=target, nopython=True)(vector_add) + f(a, b) + +class TestVectorizeNopythonArg(BaseVectorizeNopythonArg): + def test_target_cpu_nopython(self): + self._test_target_nopython('cpu', []) + + def test_target_cpu_nopython_no_sig(self): + self._test_target_nopython('cpu', [], False) + + def test_target_parallel_nopython(self): + self._test_target_nopython('parallel', []) + + +class BaseVectorizeUnrecognizedArg(unittest.TestCase, CheckWarningsMixin): + """ + Test passing an unrecognized argument to the vectorize decorator. + """ + def _test_target_unrecognized_arg(self, target, with_sig=True): + a = np.array([2.0], dtype=np.float32) + b = np.array([3.0], dtype=np.float32) + sig = [float32(float32, float32)] + args = with_sig and [sig] or [] + with self.assertRaises(KeyError) as raises: + f = vectorize(*args, target=target, nonexistent=2)(vector_add) + f(a, b) + self.assertIn("Unrecognized options", str(raises.exception)) + +class TestVectorizeUnrecognizedArg(BaseVectorizeUnrecognizedArg): + def test_target_cpu_unrecognized_arg(self): + self._test_target_unrecognized_arg('cpu') + + def test_target_cpu_unrecognized_arg_no_sig(self): + self._test_target_unrecognized_arg('cpu', False) + + def test_target_parallel_unrecognized_arg(self): + self._test_target_unrecognized_arg('parallel') + + + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/npyufunc/ufuncbuilding_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/ufuncbuilding_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..0e96dc300cf3642ee3a042bf55cda4885a98c547 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/npyufunc/ufuncbuilding_usecases.py @@ -0,0 +1,69 @@ +from numba import vectorize + + +def add(a, b): + """An addition""" + return a + b + + +def equals(a, b): + return a == b + + +def mul(a, b): + """A multiplication""" + return a * b + + +def guadd(a, b, c): + """A generalized addition""" + x, y = c.shape + for i in range(x): + for j in range(y): + c[i, j] = a[i, j] + b[i, j] + + +@vectorize(nopython=True) +def inner(a, b): + return a + b + + +@vectorize(["int64(int64, int64)"], nopython=True) +def inner_explicit(a, b): + return a + b + + +def outer(a, b): + return inner(a, b) + + +def outer_explicit(a, b): + return inner_explicit(a, b) + + +class Dummy: + pass + + +def guadd_obj(a, b, c): + Dummy() # to force object mode + x, y = c.shape + for i in range(x): + for j in range(y): + c[i, j] = a[i, j] + b[i, j] + + +def guadd_scalar_obj(a, b, c): + Dummy() # to force object mode + x, y = c.shape + for i in range(x): + for j in range(y): + c[i, j] = a[i, j] + b + + +class MyException(Exception): + pass + + +def guerror(a, b, c): + raise MyException diff --git a/venv/lib/python3.10/site-packages/numba/tests/orphaned_semaphore_usecase.py b/venv/lib/python3.10/site-packages/numba/tests/orphaned_semaphore_usecase.py new file mode 100644 index 0000000000000000000000000000000000000000..e9101e2f2b541b55a91a24d62031f29bcf114af0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/orphaned_semaphore_usecase.py @@ -0,0 +1,24 @@ +# See issue: https://github.com/numba/numba/issues/4348 +# this file must be run in isolation to replicate, test: +# numba.tests.test_parallel_backend.TestInitSafetyIssues.test_orphaned_semaphore +# does this to check semaphores are not leaking. + +import multiprocessing as mp + +import numba # noqa, deliberately unused, here to test import is safe + + +def w(): + pass + + +def main(): + ps = [mp.Process(target=w) for _ in range(4)] + [p.start() for p in ps] + [p.join() for p in ps] + + +if __name__ == '__main__': + p = mp.get_context('spawn').Process(target=main) + p.start() + p.join() diff --git a/venv/lib/python3.10/site-packages/numba/tests/overload_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/overload_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..fe1ca5799e847dbb37c63b2706e6477cd3ba8947 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/overload_usecases.py @@ -0,0 +1,28 @@ +# Python 3 syntax only use cases, used in test_extending.py + +# arg name is different, and there's no arg name to match before *args + + +def impl4(z, *args, kw=None): + if z > 10: + return 1 + else: + return -1 + +# arg name is different but at a detectable location, with *args + + +def impl5(z, b, *args, kw=None): + if z > 10: + return 1 + else: + return -1 + + +def var_positional_impl(a, *star_args_token, kw=None, kw1=12): + def impl(a, b, f, kw=None, kw1=12): + if a > 10: + return 1 + else: + return -1 + return impl diff --git a/venv/lib/python3.10/site-packages/numba/tests/parfor_iss9490_usecase.py b/venv/lib/python3.10/site-packages/numba/tests/parfor_iss9490_usecase.py new file mode 100644 index 0000000000000000000000000000000000000000..15e5d41130e48ea6c7c6540a5bac335c5d8002b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/parfor_iss9490_usecase.py @@ -0,0 +1,78 @@ +""" +This is a testcase for https://github.com/numba/numba/issues/9490. +The bug is very sensitive to the control-flow and variable uses. +It is impossible to shrink the reproducer in any meaningful way. + +The test is also sensitive to PYTHONHASHSEED. +PYTHONHASHSEED=1 will trigger the bug. + +Example of traceback: + + File "/numba/parfors/parfor.py", line 2070, in _arrayexpr_to_parfor + index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, + scope, loc) + File "/numba/parfors/parfor.py", line 1981, in _mk_parfor_loops + for size_var in size_vars: +TypeError: Failed in nopython mode pipeline (step: convert to parfors) +'NoneType' object is not iterable +""" + +import numba +import numpy as np + + +@numba.jit(nopython=True, parallel=True) +def stable_fit(X, y, threshold=3): + min_obs = int(X.shape[1] * 1.5) + beta = np.zeros((X.shape[1], y.shape[1]), dtype=np.float64) + residuals = np.full_like(y, np.nan) + stable = np.empty((y.shape[1])) + for idx in numba.prange(y.shape[1]): + y_sub = y[:, idx] + isna = np.isnan(y_sub) + X_sub = X[~isna] + y_sub = y_sub[~isna] + is_stable = False + + # Run until minimum observations + # or until stability is reached + for jdx in range(len(y_sub), min_obs - 1, -2): + # Timeseries gets reduced by two elements + # each iteration + y_ = y_sub[-jdx:] + X_ = X_sub[-jdx:] + beta_sub = np.linalg.solve(np.dot(X_.T, X_), np.dot(X_.T, y_)) + resid_sub = np.dot(X_, beta_sub) - y_ + # Check for stability + rmse = np.sqrt(np.mean(resid_sub ** 2)) + first = np.fabs(resid_sub[0]) / rmse < threshold + last = np.fabs(resid_sub[-1]) / rmse < threshold + slope = np.fabs(beta_sub[1]) / rmse < threshold + # Break if stability is reached + is_stable = slope & first & last + if is_stable: + break + + beta[:, idx] = beta_sub + residuals[-jdx:, idx] = resid_sub + stable[idx] = is_stable + return beta, residuals, stable.astype(np.bool_) + + +def check(): + np.random.seed(0) + X_shape = (10, 4) + y_shape = (10, 5) + X = np.random.random(X_shape) + y = np.random.random(y_shape) + + got_beta, got_residuals, got_stable = stable_fit(X, y) + exp_beta, exp_residuals, exp_stable = stable_fit.py_func(X, y) + + np.testing.assert_allclose(got_beta, exp_beta) + np.testing.assert_allclose(got_residuals, exp_residuals) + np.testing.assert_allclose(got_stable, exp_stable) + + +if __name__ == "__main__": + check() diff --git a/venv/lib/python3.10/site-packages/numba/tests/parfors_cache_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/parfors_cache_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..b650bcf5e0f2da3445e251ba972244066a323b7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/parfors_cache_usecases.py @@ -0,0 +1,66 @@ +import sys + +import numpy as np + +from numba import njit +from numba.tests.support import TestCase + + +@njit(parallel=True, cache=True) +def arrayexprs_case(arr): + return arr / arr.sum() + + +@njit(parallel=True, cache=True) +def prange_case(arr): + out = np.zeros_like(arr) + c = 1 / arr.sum() + for i in range(arr.size): + out[i] = arr[i] * c + return out + + +@njit(cache=True) +def caller_case(arr): + return prange_case(arrayexprs_case(arr)) + + +class _TestModule(TestCase): + """ + Tests for functionality of this module's functions. + Note this does not define any "test_*" method, instead check_module() + should be called by hand. + """ + def check_module(self, mod): + total_cache_hits = 0 + for fn in [mod.arrayexprs_case, mod.prange_case, mod.caller_case]: + arr = np.ones(20) + np.testing.assert_allclose( + fn(arr), fn.py_func(arr), + ) + # Accumulate cache hits + total_cache_hits += len(fn.stats.cache_hits) + self.assertGreater( + total_cache_hits, 0, + msg="At least one dispatcher has used the cache", + ) + + def run_module(self, mod): + # This just executes the module's functionality without asserting + # anything about the cache, it's used in tests that ensure that + # properties such as thread count aren't baked in to the cached object. + for fn in [mod.arrayexprs_case, mod.prange_case, mod.caller_case]: + arr = np.ones(20) + np.testing.assert_allclose( + fn(arr), fn.py_func(arr), + ) + + +def self_test(): + mod = sys.modules[__name__] + _TestModule().check_module(mod) + + +def self_run(): + mod = sys.modules[__name__] + _TestModule().run_module(mod) diff --git a/venv/lib/python3.10/site-packages/numba/tests/pdlike_usecase.py b/venv/lib/python3.10/site-packages/numba/tests/pdlike_usecase.py new file mode 100644 index 0000000000000000000000000000000000000000..75ac00b6ffa199e236dfe09d8af2628da393e850 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pdlike_usecase.py @@ -0,0 +1,306 @@ +""" +Implementation of a minimal Pandas-like API. +""" + +import numpy as np + +from numba.core import types, cgutils +from numba.core.datamodel import models +from numba.core.extending import ( + typeof_impl, type_callable, register_model, + lower_builtin, box, unbox, NativeValue, + overload, overload_attribute, overload_method, make_attribute_wrapper) +from numba.core.imputils import impl_ret_borrowed + + +class Index(object): + """ + A minimal pandas.Index-like object. + """ + + def __init__(self, data): + assert isinstance(data, np.ndarray) + assert data.ndim == 1 + self._data = data + + def __iter__(self): + return iter(self._data) + + @property + def dtype(self): + return self._data.dtype + + @property + def flags(self): + return self._data.flags + + +class IndexType(types.Buffer): + """ + The type class for Index objects. + """ + array_priority = 1000 + + def __init__(self, dtype, layout, pyclass): + self.pyclass = pyclass + super(IndexType, self).__init__(dtype, 1, layout) + + @property + def key(self): + return self.pyclass, self.dtype, self.layout + + @property + def as_array(self): + return types.Array(self.dtype, 1, self.layout) + + def copy(self, dtype=None, ndim=1, layout=None): + assert ndim == 1 + if dtype is None: + dtype = self.dtype + layout = layout or self.layout + return type(self)(dtype, layout, self.pyclass) + + +class Series(object): + """ + A minimal pandas.Series-like object. + """ + + def __init__(self, data, index): + assert isinstance(data, np.ndarray) + assert isinstance(index, Index) + assert data.ndim == 1 + self._values = data + self._index = index + + def __iter__(self): + return iter(self._values) + + @property + def dtype(self): + return self._values.dtype + + @property + def flags(self): + return self._values.flags + + +class SeriesType(types.ArrayCompatible): + """ + The type class for Series objects. + """ + array_priority = 1000 + + def __init__(self, dtype, index): + assert isinstance(index, IndexType) + self.dtype = dtype + self.index = index + self.values = types.Array(self.dtype, 1, 'C') + name = "series(%s, %s)" % (dtype, index) + super(SeriesType, self).__init__(name) + + @property + def key(self): + return self.dtype, self.index + + @property + def as_array(self): + return self.values + + def copy(self, dtype=None, ndim=1, layout='C'): + assert ndim == 1 + assert layout == 'C' + if dtype is None: + dtype = self.dtype + return type(self)(dtype, self.index) + + +@typeof_impl.register(Index) +def typeof_index(val, c): + arrty = typeof_impl(val._data, c) + assert arrty.ndim == 1 + return IndexType(arrty.dtype, arrty.layout, type(val)) + +@typeof_impl.register(Series) +def typeof_series(val, c): + index = typeof_impl(val._index, c) + arrty = typeof_impl(val._values, c) + assert arrty.ndim == 1 + assert arrty.layout == 'C' + return SeriesType(arrty.dtype, index) + +@type_callable('__array_wrap__') +def type_array_wrap(context): + def typer(input_type, result): + if isinstance(input_type, (IndexType, SeriesType)): + return input_type.copy(dtype=result.dtype, + ndim=result.ndim, + layout=result.layout) + + return typer + +@type_callable(Series) +def type_series_constructor(context): + def typer(data, index): + if isinstance(index, IndexType) and isinstance(data, types.Array): + assert data.layout == 'C' + assert data.ndim == 1 + return SeriesType(data.dtype, index) + + return typer + + +# Backend extensions for Index and Series + +@register_model(IndexType) +class IndexModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [('data', fe_type.as_array)] + models.StructModel.__init__(self, dmm, fe_type, members) + +@register_model(SeriesType) +class SeriesModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('index', fe_type.index), + ('values', fe_type.as_array), + ] + models.StructModel.__init__(self, dmm, fe_type, members) + +make_attribute_wrapper(IndexType, 'data', '_data') +make_attribute_wrapper(SeriesType, 'index', '_index') +make_attribute_wrapper(SeriesType, 'values', '_values') + +def make_index(context, builder, typ, **kwargs): + return cgutils.create_struct_proxy(typ)(context, builder, **kwargs) + +def make_series(context, builder, typ, **kwargs): + return cgutils.create_struct_proxy(typ)(context, builder, **kwargs) + +@lower_builtin('__array__', IndexType) +def index_as_array(context, builder, sig, args): + val = make_index(context, builder, sig.args[0], ref=args[0]) + return val._get_ptr_by_name('data') + +@lower_builtin('__array__', SeriesType) +def series_as_array(context, builder, sig, args): + val = make_series(context, builder, sig.args[0], ref=args[0]) + return val._get_ptr_by_name('values') + +@lower_builtin('__array_wrap__', IndexType, types.Array) +def index_wrap_array(context, builder, sig, args): + dest = make_index(context, builder, sig.return_type) + dest.data = args[1] + return impl_ret_borrowed(context, builder, sig.return_type, dest._getvalue()) + +@lower_builtin('__array_wrap__', SeriesType, types.Array) +def series_wrap_array(context, builder, sig, args): + src = make_series(context, builder, sig.args[0], value=args[0]) + dest = make_series(context, builder, sig.return_type) + dest.values = args[1] + dest.index = src.index + return impl_ret_borrowed(context, builder, sig.return_type, dest._getvalue()) + +@lower_builtin(Series, types.Array, IndexType) +def pdseries_constructor(context, builder, sig, args): + data, index = args + series = make_series(context, builder, sig.return_type) + series.index = index + series.values = data + return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue()) + + +@unbox(IndexType) +def unbox_index(typ, obj, c): + """ + Convert a Index object to a native structure. + """ + data = c.pyapi.object_getattr_string(obj, "_data") + index = make_index(c.context, c.builder, typ) + index.data = c.unbox(typ.as_array, data).value + + return NativeValue(index._getvalue()) + +@unbox(SeriesType) +def unbox_series(typ, obj, c): + """ + Convert a Series object to a native structure. + """ + index = c.pyapi.object_getattr_string(obj, "_index") + values = c.pyapi.object_getattr_string(obj, "_values") + series = make_series(c.context, c.builder, typ) + series.index = c.unbox(typ.index, index).value + series.values = c.unbox(typ.values, values).value + + return NativeValue(series._getvalue()) + + +@box(IndexType) +def box_index(typ, val, c): + """ + Convert a native index structure to a Index object. + """ + # First build a Numpy array object, then wrap it in a Index + index = make_index(c.context, c.builder, typ, value=val) + classobj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.pyclass)) + arrayobj = c.box(typ.as_array, index.data) + indexobj = c.pyapi.call_function_objargs(classobj, (arrayobj,)) + return indexobj + +@box(SeriesType) +def box_series(typ, val, c): + """ + Convert a native series structure to a Series object. + """ + series = make_series(c.context, c.builder, typ, value=val) + classobj = c.pyapi.unserialize(c.pyapi.serialize_object(Series)) + indexobj = c.box(typ.index, series.index) + arrayobj = c.box(typ.as_array, series.values) + seriesobj = c.pyapi.call_function_objargs(classobj, (arrayobj, indexobj)) + return seriesobj + + +@overload_attribute(IndexType, 'is_monotonic_increasing') +def index_is_monotonic_increasing(index): + """ + Index.is_monotonic_increasing + """ + def getter(index): + data = index._data + if len(data) == 0: + return True + u = data[0] + for v in data: + if v < u: + return False + v = u + return True + + return getter + +@overload(len) +def series_len(series): + """ + len(Series) + """ + if isinstance(series, SeriesType): + def len_impl(series): + return len(series._values) + return len_impl + +@overload_method(SeriesType, 'clip') +def series_clip(series, lower, upper): + """ + Series.clip(...) + """ + def clip_impl(series, lower, upper): + data = series._values.copy() + for i in range(len(data)): + v = data[i] + if v < lower: + data[i] = lower + elif v > upper: + data[i] = upper + return Series(data, series._index) + + return clip_impl diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__init__.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2fd4f23339088455865364a924224f83defc5bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_distutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_distutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..734dde8d10c6c8f23e16a1b8238792a39574a92f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_distutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_distutils_nested.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_distutils_nested.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1df07b8c444e71164e40354959e30c707ac56c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_distutils_nested.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_setuptools.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_setuptools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3737c03ea93ee329e889da764b1fd15ba4c82695 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_setuptools.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_setuptools_nested.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_setuptools_nested.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c6e6167f2f7bbe71b62367796b291926cdb9648 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/setup_setuptools_nested.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/source_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/source_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6241489ca3163512ac0c67a76de809227fe58405 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/__pycache__/source_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__init__.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bad2ecd7d4bf48536b03dd2969e09eb869c95a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__pycache__/source_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__pycache__/source_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e6f027b5e9d439abd0ad069adc31aa51330b907 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/__pycache__/source_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/source_module.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/source_module.py new file mode 100644 index 0000000000000000000000000000000000000000..234fcdb53fc206f47a21bfa7144f3c71c9c77918 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/nested/source_module.py @@ -0,0 +1,20 @@ +import numpy as np + +from numba.pycc import CC + + +cc = CC('pycc_compiled_module') + +_const = 42 + + +# This ones references a global variable at compile time +@cc.export('get_const', 'i8()') +def get_const(): + return _const + + +# This one needs NRT and an environment +@cc.export('ones', 'f8[:](i4)') +def ones(n): + return np.ones(n) diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_distutils.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_distutils.py new file mode 100644 index 0000000000000000000000000000000000000000..3976af72db2547bd15da9d4ac60169f7014a8223 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_distutils.py @@ -0,0 +1,13 @@ +from setuptools import distutils +from source_module import cc + + +setup = distutils.core.setup + + +def run_setup(): + setup(ext_modules=[cc.distutils_extension()]) + + +if __name__ == '__main__': + run_setup() diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_distutils_nested.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_distutils_nested.py new file mode 100644 index 0000000000000000000000000000000000000000..750551ba51d5871d03f737b54bd9656da83c0116 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_distutils_nested.py @@ -0,0 +1,14 @@ +from setuptools import distutils + +from nested.source_module import cc + + +setup = distutils.core.setup + + +def run_setup(): + setup(ext_modules=[cc.distutils_extension()]) + + +if __name__ == '__main__': + run_setup() diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_setuptools.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_setuptools.py new file mode 100644 index 0000000000000000000000000000000000000000..ecfe9decd3213dfff402a5818acf09129250081a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_setuptools.py @@ -0,0 +1,11 @@ +from setuptools import setup + +from source_module import cc + + +def run_setup(): + setup(ext_modules=[cc.distutils_extension()]) + + +if __name__ == '__main__': + run_setup() diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_setuptools_nested.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_setuptools_nested.py new file mode 100644 index 0000000000000000000000000000000000000000..c4381fc5560e0f04a8b3ed5c8f40ac52d33ef74c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/setup_setuptools_nested.py @@ -0,0 +1,11 @@ +from setuptools import setup + +from nested.source_module import cc + + +def run_setup(): + setup(ext_modules=[cc.distutils_extension()]) + + +if __name__ == '__main__': + run_setup() diff --git a/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/source_module.py b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/source_module.py new file mode 100644 index 0000000000000000000000000000000000000000..e5e6ea82f3b2ac5be2569c915ce742f40d62dfa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/pycc_distutils_usecase/source_module.py @@ -0,0 +1,18 @@ +import numpy as np + +from numba.pycc import CC + + +cc = CC('pycc_compiled_module') + +_const = 42 + +# This ones references a global variable at compile time +@cc.export('get_const', 'i8()') +def get_const(): + return _const + +# This one needs NRT and an environment +@cc.export('ones', 'f8[:](i4)') +def ones(n): + return np.ones(n) diff --git a/venv/lib/python3.10/site-packages/numba/tests/recursion_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/recursion_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa987c425584d4f4222394a8ebe8c5c2b876831 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/recursion_usecases.py @@ -0,0 +1,228 @@ +""" +Usecases of recursive functions. + +Some functions are compiled at import time, hence a separate module. +""" + +from numba import jit + + +@jit("i8(i8)", nopython=True) +def fib1(n): + if n < 2: + return n + # Note the second call uses a named argument + return fib1(n - 1) + fib1(n=n - 2) + + +def make_fib2(): + @jit("i8(i8)", nopython=True) + def fib2(n): + if n < 2: + return n + return fib2(n - 1) + fib2(n=n - 2) + + return fib2 + +fib2 = make_fib2() + + +def make_type_change_self(jit=lambda x: x): + @jit + def type_change_self(x, y): + if x > 1 and y > 0: + return x + type_change_self(x - y, y) + else: + return y + return type_change_self + + +# Implicit signature +@jit(nopython=True) +def fib3(n): + if n < 2: + return n + return fib3(n - 1) + fib3(n - 2) + + +# Run-away self recursion +@jit(nopython=True) +def runaway_self(x): + return runaway_self(x) + + +@jit(nopython=True) +def raise_self(x): + if x == 1: + raise ValueError("raise_self") + elif x > 0: + return raise_self(x - 1) + else: + return 1 + + +# Mutual recursion +@jit(nopython=True) +def outer_fac(n): + if n < 1: + return 1 + return n * inner_fac(n - 1) + + +@jit(nopython=True) +def inner_fac(n): + if n < 1: + return 1 + return n * outer_fac(n - 1) + + +# Mutual recursion with different arg names +def make_mutual2(jit=lambda x: x): + @jit + def foo(x): + if x > 0: + return 2 * bar(z=1, y=x) + return 1 + x + + @jit + def bar(y, z): + return foo(x=y - z) + + return foo, bar + + +# Mutual runaway recursion + +@jit(nopython=True) +def runaway_mutual(x): + return runaway_mutual_inner(x) + + +@jit(nopython=True) +def runaway_mutual_inner(x): + return runaway_mutual(x) + + +# Mutual type changing recursion + +def make_type_change_mutual(jit=lambda x: x): + @jit + def foo(x, y): + if x > 1 and y > 0: + # call bar first to exercise partial type inference. + # typeinferer suspended at the call to bar() and haven't determined + # the potential return type from the else-branch + return x + bar(x - y, y) + else: + return y + + @jit + def bar(x, y): + if x > 1 and y > 0: + return x + foo(x - y, y) + else: + return y + + return foo + + +# Indirect mutual recursion +def make_four_level(jit=lambda x: x): + @jit + def first(x): + # The recursing call must have a path that is non-recursing. + if x > 0: + return second(x) * 2 + else: + return 1 + + @jit + def second(x): + return third(x) * 3 + + @jit + def third(x): + return fourth(x) * 4 + + @jit + def fourth(x): + return first(x / 2 - 1) + + return first + + +def make_inner_error(jit=lambda x: x): + @jit + def outer(x): + if x > 0: + return inner(x) + + else: + return 1 + + @jit + def inner(x): + if x > 0: + return outer(x - 1) + else: + # this branch is actually never executed + return error_fun(x) + + @jit + def error_fun(x): + # to trigger an untyped attribute error + return x.ndim + + return outer + + +def make_raise_mutual(jit=lambda x: x): + @jit + def outer(x): + if x > 0: + return inner(x) + else: + return 1 + + @jit + def inner(x): + if x == 1: + raise ValueError('raise_mutual') + elif x > 0: + return outer(x - 1) + else: + return 1 + + return outer + + +def make_optional_return_case(jit=lambda x: x): + @jit + def foo(x): + if x > 5: + return x - 1 + else: + return + + @jit + def bar(x): + out = foo(x) + if out is None: + return out + elif out < 8: + return out + else: + return x * bar(out) + + return bar + + +def make_growing_tuple_case(jit=lambda x: x): + # From issue #4387 + @jit + def make_list(n): + if n <= 0: + return None + + return (n, make_list(n - 1)) + return make_list diff --git a/venv/lib/python3.10/site-packages/numba/tests/serialize_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/serialize_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..af465ed4fe51c9f0b49a143a5972b2ac550e30eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/serialize_usecases.py @@ -0,0 +1,97 @@ +""" +Separate module with function samples for serialization tests, +to avoid issues with __main__. +""" + +import math + +from numba import jit +from numba.core import types + + +@jit((types.int32, types.int32)) +def add_with_sig(a, b): + return a + b + +@jit +def add_without_sig(a, b): + return a + b + +@jit(nopython=True) +def add_nopython(a, b): + return a + b + +@jit(nopython=True) +def add_nopython_fail(a, b): + object() + return a + b + +def closure(a): + @jit(nopython=True) + def inner(b, c): + return a + b + c + return inner + +K = 3.0 + +from math import sqrt + +def closure_with_globals(x, **jit_args): + @jit(**jit_args) + def inner(y): + # Exercise a builtin function and a module-level constant + k = max(K, K + 1) + # Exercise two functions from another module, one accessed with + # dotted notation, one imported explicitly. + return math.hypot(x, y) + sqrt(k) + return inner + +@jit(nopython=True) +def other_function(x, y): + return math.hypot(x, y) + +@jit(forceobj=True) +def get_global_objmode(x): + return K * x + +import numpy as np +import numpy.random as nprand + +@jit(nopython=True) +def get_renamed_module(x): + nprand.seed(42) + return np.cos(x), nprand.random() + + +def closure_calling_other_function(x): + @jit(nopython=True) + def inner(y, z): + return other_function(x, y) + z + return inner + +def closure_calling_other_closure(x): + @jit(nopython=True) + def other_inner(y): + return math.hypot(x, y) + + @jit(nopython=True) + def inner(y): + return other_inner(y) + x + return inner + + +# A dynamic function calling a builtin function +def _get_dyn_func(**jit_args): + code = """ + def dyn_func(x): + res = 0 + for i in range(x): + res += x + return res + """ + ns = {} + exec(code.strip(), ns) + return jit(**jit_args)(ns['dyn_func']) + +dyn_func = _get_dyn_func(nopython=True) +dyn_func_objmode = _get_dyn_func(forceobj=True) diff --git a/venv/lib/python3.10/site-packages/numba/tests/support.py b/venv/lib/python3.10/site-packages/numba/tests/support.py new file mode 100644 index 0000000000000000000000000000000000000000..412fa176707a631709bb40779aa18dbb926c3ec8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/support.py @@ -0,0 +1,1301 @@ +""" +Assorted utilities for use in tests. +""" + +import cmath +import contextlib +from collections import defaultdict +import enum +import gc +import math +import platform +import os +import signal +import shutil +import subprocess +import sys +import tempfile +import threading +import time +import io +import ctypes +import multiprocessing as mp +import warnings +import traceback +from contextlib import contextmanager +import uuid +import importlib +import types as pytypes +from functools import cached_property + +import numpy as np + +from numba import testing, types +from numba.core import errors, typing, utils, config, cpu +from numba.core.typing import cffi_utils +from numba.core.compiler import (compile_extra, Flags, + DEFAULT_FLAGS, CompilerBase, + DefaultPassBuilder) +from numba.core.typed_passes import IRLegalization +from numba.core.untyped_passes import PreserveIR +import unittest +from numba.core.runtime import rtsys +from numba.np import numpy_support +from numba.core.runtime import _nrt_python as _nrt +from numba.core.extending import ( + overload_method, + typeof_impl, + register_model, + unbox, + NativeValue, + models, +) +from numba.core.datamodel.models import OpaqueModel + +try: + import scipy +except ImportError: + scipy = None + +# Make sure that coverage is set up. +try: + import coverage +except ImportError: + pass +else: + coverage.process_startup() + +enable_pyobj_flags = Flags() +enable_pyobj_flags.enable_pyobject = True + +force_pyobj_flags = Flags() +force_pyobj_flags.force_pyobject = True + +no_pyobj_flags = Flags() + +nrt_flags = Flags() +nrt_flags.nrt = True + + +tag = testing.make_tag_decorator(['important', 'long_running', 'always_test']) + +# Use to mark a test as a test that must always run when sharded +always_test = tag('always_test') + +_32bit = sys.maxsize <= 2 ** 32 +is_parfors_unsupported = _32bit +skip_parfors_unsupported = unittest.skipIf( + is_parfors_unsupported, + 'parfors not supported', +) + +skip_unless_py10_or_later = unittest.skipUnless( + utils.PYVERSION >= (3, 10), + "needs Python 3.10 or later" +) + +skip_unless_py10 = unittest.skipUnless( + utils.PYVERSION == (3, 10), + "needs Python 3.10" +) + +skip_unless_py312 = unittest.skipUnless( + utils.PYVERSION == (3, 12), + "needs Python 3.12" +) + +skip_if_py313_on_windows = unittest.skipIf( + utils.PYVERSION == (3, 13) and sys.platform.startswith('win'), + "Not supported on Python 3.13 on Windows" + ) + +skip_if_32bit = unittest.skipIf(_32bit, "Not supported on 32 bit") + +IS_NUMPY_2 = numpy_support.numpy_version >= (2, 0) +skip_if_numpy_2 = unittest.skipIf(IS_NUMPY_2, + "Not supported on numpy 2.0+") + +def expected_failure_py311(fn): + if utils.PYVERSION == (3, 11): + return unittest.expectedFailure(fn) + else: + return fn + + +def expected_failure_py312(fn): + if utils.PYVERSION == (3, 12): + return unittest.expectedFailure(fn) + else: + return fn + + +def expected_failure_py313(fn): + if utils.PYVERSION == (3, 13): + return unittest.expectedFailure(fn) + else: + return fn + + +def expected_failure_np2(fn): + if numpy_support.numpy_version == (2, 0): + return unittest.expectedFailure(fn) + else: + return fn + +_msg = "SciPy needed for test" +skip_unless_scipy = unittest.skipIf(scipy is None, _msg) + +skip_unless_cffi = unittest.skipUnless(cffi_utils.SUPPORTED, 'requires cffi') + +_lnx_reason = 'linux only test' +linux_only = unittest.skipIf(not sys.platform.startswith('linux'), _lnx_reason) + +_win_reason = 'Windows-only test' +windows_only = unittest.skipIf(not sys.platform.startswith('win'), _win_reason) + +_is_armv7l = platform.machine() == 'armv7l' + +disabled_test = unittest.skipIf(True, 'Test disabled') + +# See issue #4563, PPC64LE LLVM bug +skip_ppc64le_issue4563 = unittest.skipIf(platform.machine() == 'ppc64le', + ("Hits: 'Parameter area must exist " + "to pass an argument in memory'")) + +# Typeguard +has_typeguard = bool(os.environ.get('NUMBA_USE_TYPEGUARD', 0)) + +skip_unless_typeguard = unittest.skipUnless( + has_typeguard, "Typeguard is not enabled", +) + +skip_if_typeguard = unittest.skipIf( + has_typeguard, "Broken if Typeguard is enabled", +) + +# See issue #6465, PPC64LE LLVM bug +skip_ppc64le_issue6465 = unittest.skipIf(platform.machine() == 'ppc64le', + ("Hits: 'mismatch in size of " + "parameter area' in " + "LowerCall_64SVR4")) + +# LLVM PPC issue. +# Sample error message: +# Invalid PPC CTR loop! +# UNREACHABLE executed at /llvm/lib/Target/PowerPC/PPCCTRLoops.cpp:179! +skip_ppc64le_invalid_ctr_loop = unittest.skipIf( + platform.machine() == 'ppc64le', + "Invalid PPC CTR loop") + +# fenv.h on M1 may have various issues: +# https://github.com/numba/numba/issues/7822#issuecomment-1065356758 +_uname = platform.uname() +IS_MACOS = _uname.system == 'Darwin' +skip_macos_fenv_errors = unittest.skipIf(IS_MACOS, + "fenv.h-like functionality unreliable on macOS") +IS_MACOS_ARM64 = IS_MACOS and _uname.machine == 'arm64' + +try: + import scipy.linalg.cython_lapack + has_lapack = True +except ImportError: + has_lapack = False + +needs_lapack = unittest.skipUnless(has_lapack, + "LAPACK needs SciPy 1.0+") + +try: + import scipy.linalg.cython_blas + has_blas = True +except ImportError: + has_blas = False + +needs_blas = unittest.skipUnless(has_blas, "BLAS needs SciPy 1.0+") + +# Decorate a test with @needs_subprocess to ensure it doesn't run unless the +# `SUBPROC_TEST` environment variable is set. Use this in conjunction with: +# TestCase::subprocess_test_runner which will execute a given test in subprocess +# with this environment variable set. +_exec_cond = os.environ.get('SUBPROC_TEST', None) == '1' +needs_subprocess = unittest.skipUnless(_exec_cond, "needs subprocess harness") + + +try: + import setuptools + has_setuptools = True +except ImportError: + has_setuptools = False + + +# decorator for a test that need setuptools +needs_setuptools = unittest.skipUnless(has_setuptools, 'Test needs setuptools') + + +def ignore_internal_warnings(): + """Use in testing within a ` warnings.catch_warnings` block to filter out + warnings that are unrelated/internally generated by Numba. + """ + # Filter out warnings from typeguard + warnings.filterwarnings('ignore', module="typeguard") + # Filter out warnings about TBB interface mismatch + warnings.filterwarnings(action='ignore', + message=r".*TBB_INTERFACE_VERSION.*", + category=errors.NumbaWarning, + module=r'numba\.np\.ufunc\.parallel.*') + + +class TestCase(unittest.TestCase): + + longMessage = True + + # A random state yielding the same random numbers for any test case. + # Use as `self.random.` + @cached_property + def random(self): + return np.random.RandomState(42) + + def reset_module_warnings(self, module): + """ + Reset the warnings registry of a module. This can be necessary + as the warnings module is buggy in that regard. + See http://bugs.python.org/issue4180 + """ + if isinstance(module, str): + module = sys.modules[module] + try: + del module.__warningregistry__ + except AttributeError: + pass + + @contextlib.contextmanager + def assertTypingError(self): + """ + A context manager that asserts the enclosed code block fails + compiling in nopython mode. + """ + _accepted_errors = (errors.LoweringError, errors.TypingError, + TypeError, NotImplementedError) + with self.assertRaises(_accepted_errors) as cm: + yield cm + + @contextlib.contextmanager + def assertRefCount(self, *objects): + """ + A context manager that asserts the given objects have the + same reference counts before and after executing the + enclosed block. + """ + old_refcounts = [sys.getrefcount(x) for x in objects] + yield + gc.collect() + new_refcounts = [sys.getrefcount(x) for x in objects] + for old, new, obj in zip(old_refcounts, new_refcounts, objects): + if old != new: + self.fail("Refcount changed from %d to %d for object: %r" + % (old, new, obj)) + + def assertRefCountEqual(self, *objects): + gc.collect() + rc = [sys.getrefcount(x) for x in objects] + rc_0 = rc[0] + for i in range(len(objects))[1:]: + rc_i = rc[i] + if rc_0 != rc_i: + self.fail(f"Refcount for objects does not match. " + f"#0({rc_0}) != #{i}({rc_i}) does not match.") + + @contextlib.contextmanager + def assertNoNRTLeak(self): + """ + A context manager that asserts no NRT leak was created during + the execution of the enclosed block. + """ + old = rtsys.get_allocation_stats() + yield + new = rtsys.get_allocation_stats() + total_alloc = new.alloc - old.alloc + total_free = new.free - old.free + total_mi_alloc = new.mi_alloc - old.mi_alloc + total_mi_free = new.mi_free - old.mi_free + self.assertEqual(total_alloc, total_free, + "number of data allocs != number of data frees") + self.assertEqual(total_mi_alloc, total_mi_free, + "number of meminfo allocs != number of meminfo frees") + + + _bool_types = (bool, np.bool_) + _exact_typesets = [_bool_types, (int,), (str,), (np.integer,), + (bytes, np.bytes_)] + _approx_typesets = [(float,), (complex,), (np.inexact)] + _sequence_typesets = [(tuple, list)] + _float_types = (float, np.floating) + _complex_types = (complex, np.complexfloating) + + def _detect_family(self, numeric_object): + """ + This function returns a string description of the type family + that the object in question belongs to. Possible return values + are: "exact", "complex", "approximate", "sequence", and "unknown" + """ + if isinstance(numeric_object, np.ndarray): + return "ndarray" + + if isinstance(numeric_object, enum.Enum): + return "enum" + + for tp in self._sequence_typesets: + if isinstance(numeric_object, tp): + return "sequence" + + for tp in self._exact_typesets: + if isinstance(numeric_object, tp): + return "exact" + + for tp in self._complex_types: + if isinstance(numeric_object, tp): + return "complex" + + for tp in self._approx_typesets: + if isinstance(numeric_object, tp): + return "approximate" + + return "unknown" + + def _fix_dtype(self, dtype): + """ + Fix the given *dtype* for comparison. + """ + # Under 64-bit Windows, Numpy may return either int32 or int64 + # arrays depending on the function. + if (sys.platform == 'win32' and sys.maxsize > 2**32 and + dtype == np.dtype('int32')): + return np.dtype('int64') + else: + return dtype + + def _fix_strides(self, arr): + """ + Return the strides of the given array, fixed for comparison. + Strides for 0- or 1-sized dimensions are ignored. + """ + if arr.size == 0: + return [0] * arr.ndim + else: + return [stride / arr.itemsize + for (stride, shape) in zip(arr.strides, arr.shape) + if shape > 1] + + def assertStridesEqual(self, first, second): + """ + Test that two arrays have the same shape and strides. + """ + self.assertEqual(first.shape, second.shape, "shapes differ") + self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ") + self.assertEqual(self._fix_strides(first), self._fix_strides(second), + "strides differ") + + def assertPreciseEqual(self, first, second, prec='exact', ulps=1, + msg=None, ignore_sign_on_zero=False, + abs_tol=None + ): + """ + Versatile equality testing function with more built-in checks than + standard assertEqual(). + + For arrays, test that layout, dtype, shape are identical, and + recursively call assertPreciseEqual() on the contents. + + For other sequences, recursively call assertPreciseEqual() on + the contents. + + For scalars, test that two scalars or have similar types and are + equal up to a computed precision. + If the scalars are instances of exact types or if *prec* is + 'exact', they are compared exactly. + If the scalars are instances of inexact types (float, complex) + and *prec* is not 'exact', then the number of significant bits + is computed according to the value of *prec*: 53 bits if *prec* + is 'double', 24 bits if *prec* is single. This number of bits + can be lowered by raising the *ulps* value. + ignore_sign_on_zero can be set to True if zeros are to be considered + equal regardless of their sign bit. + abs_tol if this is set to a float value its value is used in the + following. If, however, this is set to the string "eps" then machine + precision of the type(first) is used in the following instead. This + kwarg is used to check if the absolute difference in value between first + and second is less than the value set, if so the numbers being compared + are considered equal. (This is to handle small numbers typically of + magnitude less than machine precision). + + Any value of *prec* other than 'exact', 'single' or 'double' + will raise an error. + """ + try: + self._assertPreciseEqual(first, second, prec, ulps, msg, + ignore_sign_on_zero, abs_tol) + except AssertionError as exc: + failure_msg = str(exc) + # Fall off of the 'except' scope to avoid Python 3 exception + # chaining. + else: + return + # Decorate the failure message with more information + self.fail("when comparing %s and %s: %s" % (first, second, failure_msg)) + + def _assertPreciseEqual(self, first, second, prec='exact', ulps=1, + msg=None, ignore_sign_on_zero=False, + abs_tol=None): + """Recursive workhorse for assertPreciseEqual().""" + + def _assertNumberEqual(first, second, delta=None): + if (delta is None or first == second == 0.0 + or math.isinf(first) or math.isinf(second)): + self.assertEqual(first, second, msg=msg) + # For signed zeros + if not ignore_sign_on_zero: + try: + if math.copysign(1, first) != math.copysign(1, second): + self.fail( + self._formatMessage(msg, + "%s != %s" % + (first, second))) + except TypeError: + pass + else: + self.assertAlmostEqual(first, second, delta=delta, msg=msg) + + first_family = self._detect_family(first) + second_family = self._detect_family(second) + + assertion_message = "Type Family mismatch. (%s != %s)" % (first_family, + second_family) + if msg: + assertion_message += ': %s' % (msg,) + self.assertEqual(first_family, second_family, msg=assertion_message) + + # We now know they are in the same comparison family + compare_family = first_family + + # For recognized sequences, recurse + if compare_family == "ndarray": + dtype = self._fix_dtype(first.dtype) + self.assertEqual(dtype, self._fix_dtype(second.dtype)) + self.assertEqual(first.ndim, second.ndim, + "different number of dimensions") + self.assertEqual(first.shape, second.shape, + "different shapes") + self.assertEqual(first.flags.writeable, second.flags.writeable, + "different mutability") + # itemsize is already checked by the dtype test above + self.assertEqual(self._fix_strides(first), + self._fix_strides(second), "different strides") + if first.dtype != dtype: + first = first.astype(dtype) + if second.dtype != dtype: + second = second.astype(dtype) + for a, b in zip(first.flat, second.flat): + self._assertPreciseEqual(a, b, prec, ulps, msg, + ignore_sign_on_zero, abs_tol) + return + + elif compare_family == "sequence": + self.assertEqual(len(first), len(second), msg=msg) + for a, b in zip(first, second): + self._assertPreciseEqual(a, b, prec, ulps, msg, + ignore_sign_on_zero, abs_tol) + return + + elif compare_family == "exact": + exact_comparison = True + + elif compare_family in ["complex", "approximate"]: + exact_comparison = False + + elif compare_family == "enum": + self.assertIs(first.__class__, second.__class__) + self._assertPreciseEqual(first.value, second.value, + prec, ulps, msg, + ignore_sign_on_zero, abs_tol) + return + + elif compare_family == "unknown": + # Assume these are non-numeric types: we will fall back + # on regular unittest comparison. + self.assertIs(first.__class__, second.__class__) + exact_comparison = True + + else: + assert 0, "unexpected family" + + # If a Numpy scalar, check the dtype is exactly the same too + # (required for datetime64 and timedelta64). + if hasattr(first, 'dtype') and hasattr(second, 'dtype'): + self.assertEqual(first.dtype, second.dtype) + + # Mixing bools and non-bools should always fail + if (isinstance(first, self._bool_types) != + isinstance(second, self._bool_types)): + assertion_message = ("Mismatching return types (%s vs. %s)" + % (first.__class__, second.__class__)) + if msg: + assertion_message += ': %s' % (msg,) + self.fail(assertion_message) + + try: + if cmath.isnan(first) and cmath.isnan(second): + # The NaNs will compare unequal, skip regular comparison + return + except TypeError: + # Not floats. + pass + + # if absolute comparison is set, use it + if abs_tol is not None: + if abs_tol == "eps": + rtol = np.finfo(type(first)).eps + elif isinstance(abs_tol, float): + rtol = abs_tol + else: + raise ValueError("abs_tol is not \"eps\" or a float, found %s" + % abs_tol) + if abs(first - second) < rtol: + return + + exact_comparison = exact_comparison or prec == 'exact' + + if not exact_comparison and prec != 'exact': + if prec == 'single': + bits = 24 + elif prec == 'double': + bits = 53 + else: + raise ValueError("unsupported precision %r" % (prec,)) + k = 2 ** (ulps - bits - 1) + delta = k * (abs(first) + abs(second)) + else: + delta = None + if isinstance(first, self._complex_types): + _assertNumberEqual(first.real, second.real, delta) + _assertNumberEqual(first.imag, second.imag, delta) + elif isinstance(first, (np.timedelta64, np.datetime64)): + # Since Np 1.16 NaT == NaT is False, so special comparison needed + if np.isnat(first): + self.assertEqual(np.isnat(first), np.isnat(second)) + else: + _assertNumberEqual(first, second, delta) + else: + _assertNumberEqual(first, second, delta) + + def subprocess_test_runner(self, test_module, test_class=None, + test_name=None, envvars=None, timeout=60, + flags=None, _subproc_test_env="1"): + """ + Runs named unit test(s) as specified in the arguments as: + test_module.test_class.test_name. test_module must always be supplied + and if no further refinement is made with test_class and test_name then + all tests in the module will be run. The tests will be run in a + subprocess with environment variables specified in `envvars`. + If given, envvars must be a map of form: + environment variable name (str) -> value (str) + If given, flags must be a map of form: + flag including the `-` (str) -> value (str) + It is most convenient to use this method in conjunction with + @needs_subprocess as the decorator will cause the decorated test to be + skipped unless the `SUBPROC_TEST` environment variable is set to + the same value of ``_subproc_test_env`` + (this special environment variable is set by this method such that the + specified test(s) will not be skipped in the subprocess). + + + Following execution in the subprocess this method will check the test(s) + executed without error. The timeout kwarg can be used to allow more time + for longer running tests, it defaults to 60 seconds. + """ + themod = self.__module__ + thecls = type(self).__name__ + parts = (test_module, test_class, test_name) + fully_qualified_test = '.'.join(x for x in parts if x is not None) + flags_args = [] + if flags is not None: + for flag, value in flags.items(): + flags_args.append(f'{flag}') + flags_args.append(f'{value}') + cmd = [sys.executable, *flags_args, '-m', 'numba.runtests', + fully_qualified_test] + env_copy = os.environ.copy() + env_copy['SUBPROC_TEST'] = _subproc_test_env + try: + env_copy['COVERAGE_PROCESS_START'] = os.environ['COVERAGE_RCFILE'] + except KeyError: + pass # ignored + envvars = pytypes.MappingProxyType({} if envvars is None else envvars) + env_copy.update(envvars) + status = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, timeout=timeout, + env=env_copy, universal_newlines=True) + streams = (f'\ncaptured stdout: {status.stdout}\n' + f'captured stderr: {status.stderr}') + self.assertEqual(status.returncode, 0, streams) + # Python 3.12.1 report + no_tests_ran = "NO TESTS RAN" + if no_tests_ran in status.stderr: + self.skipTest(no_tests_ran) + else: + self.assertIn('OK', status.stderr) + return status + + def run_test_in_subprocess(maybefunc=None, timeout=60, envvars=None): + """Runs the decorated test in a subprocess via invoking numba's test + runner. kwargs timeout and envvars are passed through to + subprocess_test_runner.""" + def wrapper(func): + def inner(self, *args, **kwargs): + if os.environ.get("SUBPROC_TEST", None) != func.__name__: + # Not in a subprocess test env, so stage the call to run the + # test in a subprocess which will set the env var. + class_name = self.__class__.__name__ + self.subprocess_test_runner( + test_module=self.__module__, + test_class=class_name, + test_name=func.__name__, + timeout=timeout, + envvars=envvars, + _subproc_test_env=func.__name__, + ) + else: + # env var is set, so we're in the subprocess, run the + # actual test. + func(self) + return inner + + if isinstance(maybefunc, pytypes.FunctionType): + return wrapper(maybefunc) + else: + return wrapper + + def make_dummy_type(self): + """Use to generate a dummy type unique to this test. Returns a python + Dummy class and a corresponding Numba type DummyType.""" + + # Use test_id to make sure no collision is possible. + test_id = self.id() + DummyType = type('DummyTypeFor{}'.format(test_id), (types.Opaque,), {}) + + dummy_type = DummyType("my_dummy") + register_model(DummyType)(OpaqueModel) + + class Dummy(object): + pass + + @typeof_impl.register(Dummy) + def typeof_dummy(val, c): + return dummy_type + + @unbox(DummyType) + def unbox_dummy(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + return Dummy, DummyType + + def skip_if_no_external_compiler(self): + """ + Call this to ensure the test is skipped if no suitable external compiler + is found. This is a method on the TestCase opposed to a stand-alone + decorator so as to make it "lazy" via runtime evaluation opposed to + running at test-discovery time. + """ + # This is a local import to avoid deprecation warnings being generated + # through the use of the numba.pycc module. + from numba.pycc.platform import external_compiler_works + if not external_compiler_works(): + self.skipTest("No suitable external compiler was found.") + + +class SerialMixin(object): + """Mixin to mark test for serial execution. + """ + _numba_parallel_test_ = False + + +# Various helpers + +@contextlib.contextmanager +def override_config(name, value): + """ + Return a context manager that temporarily sets Numba config variable + *name* to *value*. *name* must be the name of an existing variable + in numba.config. + """ + old_value = getattr(config, name) + setattr(config, name, value) + try: + yield + finally: + setattr(config, name, old_value) + + +@contextlib.contextmanager +def override_env_config(name, value): + """ + Return a context manager that temporarily sets an Numba config environment + *name* to *value*. + """ + old = os.environ.get(name) + os.environ[name] = value + config.reload_config() + + try: + yield + finally: + if old is None: + # If it wasn't set originally, delete the environ var + del os.environ[name] + else: + # Otherwise, restore to the old value + os.environ[name] = old + # Always reload config + config.reload_config() + + +def compile_function(name, code, globs): + """ + Given a *code* string, compile it with globals *globs* and return + the function named *name*. + """ + co = compile(code.rstrip(), "", "single") + ns = {} + eval(co, globs, ns) + return ns[name] + + +_trashcan_dir = 'numba-tests' + +if os.name == 'nt': + # Under Windows, gettempdir() points to the user-local temp dir + _trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir) +else: + # Mix the UID into the directory name to allow different users to + # run the test suite without permission errors (issue #1586) + _trashcan_dir = os.path.join(tempfile.gettempdir(), + "%s.%s" % (_trashcan_dir, os.getuid())) + +# Stale temporary directories are deleted after they are older than this value. +# The test suite probably won't ever take longer than this... +_trashcan_timeout = 24 * 3600 # 1 day + +def _create_trashcan_dir(): + try: + os.mkdir(_trashcan_dir) + except FileExistsError: + pass + +def _purge_trashcan_dir(): + freshness_threshold = time.time() - _trashcan_timeout + for fn in sorted(os.listdir(_trashcan_dir)): + fn = os.path.join(_trashcan_dir, fn) + try: + st = os.stat(fn) + if st.st_mtime < freshness_threshold: + shutil.rmtree(fn, ignore_errors=True) + except OSError as e: + # In parallel testing, several processes can attempt to + # remove the same entry at once, ignore. + pass + +def _create_trashcan_subdir(prefix): + _purge_trashcan_dir() + path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir) + return path + +def temp_directory(prefix): + """ + Create a temporary directory with the given *prefix* that will survive + at least as long as this process invocation. The temporary directory + will be eventually deleted when it becomes stale enough. + + This is necessary because a DLL file can't be deleted while in use + under Windows. + + An interesting side-effect is to be able to inspect the test files + shortly after a test suite run. + """ + _create_trashcan_dir() + return _create_trashcan_subdir(prefix) + + +def import_dynamic(modname): + """ + Import and return a module of the given name. Care is taken to + avoid issues due to Python's internal directory caching. + """ + import importlib + importlib.invalidate_caches() + __import__(modname) + return sys.modules[modname] + + +# From CPython + +@contextlib.contextmanager +def captured_output(stream_name): + """Return a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO.""" + orig_stdout = getattr(sys, stream_name) + setattr(sys, stream_name, io.StringIO()) + try: + yield getattr(sys, stream_name) + finally: + setattr(sys, stream_name, orig_stdout) + +def captured_stdout(): + """Capture the output of sys.stdout: + + with captured_stdout() as stdout: + print("hello") + self.assertEqual(stdout.getvalue(), "hello\n") + """ + return captured_output("stdout") + +def captured_stderr(): + """Capture the output of sys.stderr: + + with captured_stderr() as stderr: + print("hello", file=sys.stderr) + self.assertEqual(stderr.getvalue(), "hello\n") + """ + return captured_output("stderr") + + +@contextlib.contextmanager +def capture_cache_log(): + with captured_stdout() as out: + with override_config('DEBUG_CACHE', True): + yield out + + +class EnableNRTStatsMixin(object): + """Mixin to enable the NRT statistics counters.""" + + def setUp(self): + _nrt.memsys_enable_stats() + + def tearDown(self): + _nrt.memsys_disable_stats() + + +class MemoryLeak(object): + + __enable_leak_check = True + + def memory_leak_setup(self): + # Clean up any NRT-backed objects hanging in a dead reference cycle + gc.collect() + self.__init_stats = rtsys.get_allocation_stats() + + def memory_leak_teardown(self): + if self.__enable_leak_check: + self.assert_no_memory_leak() + + def assert_no_memory_leak(self): + old = self.__init_stats + new = rtsys.get_allocation_stats() + total_alloc = new.alloc - old.alloc + total_free = new.free - old.free + total_mi_alloc = new.mi_alloc - old.mi_alloc + total_mi_free = new.mi_free - old.mi_free + self.assertEqual(total_alloc, total_free) + self.assertEqual(total_mi_alloc, total_mi_free) + + def disable_leak_check(self): + # For per-test use when MemoryLeakMixin is injected into a TestCase + self.__enable_leak_check = False + + +class MemoryLeakMixin(EnableNRTStatsMixin, MemoryLeak): + + def setUp(self): + super(MemoryLeakMixin, self).setUp() + self.memory_leak_setup() + + def tearDown(self): + gc.collect() + self.memory_leak_teardown() + super(MemoryLeakMixin, self).tearDown() + + +@contextlib.contextmanager +def forbid_codegen(): + """ + Forbid LLVM code generation during the execution of the context + manager's enclosed block. + + If code generation is invoked, a RuntimeError is raised. + """ + from numba.core import codegen + patchpoints = ['CPUCodeLibrary._finalize_final_module'] + + old = {} + def fail(*args, **kwargs): + raise RuntimeError("codegen forbidden by test case") + try: + # XXX use the mock library instead? + for name in patchpoints: + parts = name.split('.') + obj = codegen + for attrname in parts[:-1]: + obj = getattr(obj, attrname) + attrname = parts[-1] + value = getattr(obj, attrname) + assert callable(value), ("%r should be callable" % name) + old[obj, attrname] = value + setattr(obj, attrname, fail) + yield + finally: + for (obj, attrname), value in old.items(): + setattr(obj, attrname, value) + + +# For details about redirection of file-descriptor, read +# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/ + +@contextlib.contextmanager +def redirect_fd(fd): + """ + Temporarily redirect *fd* to a pipe's write end and return a file object + wrapping the pipe's read end. + """ + + from numba import _helperlib + libnumba = ctypes.CDLL(_helperlib.__file__) + + libnumba._numba_flush_stdout() + save = os.dup(fd) + r, w = os.pipe() + try: + os.dup2(w, fd) + yield io.open(r, "r") + finally: + libnumba._numba_flush_stdout() + os.close(w) + os.dup2(save, fd) + os.close(save) + + +def redirect_c_stdout(): + """Redirect C stdout + """ + fd = sys.__stdout__.fileno() + return redirect_fd(fd) + + +def redirect_c_stderr(): + """Redirect C stderr + """ + fd = sys.__stderr__.fileno() + return redirect_fd(fd) + + +def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True): + """Spawn a new process to run `func` with a temporary cache directory. + + The childprocess's stdout and stderr will be captured and redirected to + the current process's stdout and stderr. + + Returns + ------- + ret : dict + exitcode: 0 for success. 1 for exception-raised. + stdout: str + stderr: str + """ + cache_dir = temp_directory(cache_dir_prefix) + return run_in_new_process_in_cache_dir(func, cache_dir, verbose=verbose) + + +def run_in_new_process_in_cache_dir(func, cache_dir, verbose=True): + """Spawn a new process to run `func` with a temporary cache directory. + + The childprocess's stdout and stderr will be captured and redirected to + the current process's stdout and stderr. + + Similar to ``run_in_new_process_caching()`` but the ``cache_dir`` is a + directory path instead of a name prefix for the directory path. + + Returns + ------- + ret : dict + exitcode: 0 for success. 1 for exception-raised. + stdout: str + stderr: str + """ + ctx = mp.get_context('spawn') + qout = ctx.Queue() + with override_env_config('NUMBA_CACHE_DIR', cache_dir): + proc = ctx.Process(target=_remote_runner, args=[func, qout]) + proc.start() + proc.join() + stdout = qout.get_nowait() + stderr = qout.get_nowait() + if verbose and stdout.strip(): + print() + print('STDOUT'.center(80, '-')) + print(stdout) + if verbose and stderr.strip(): + print(file=sys.stderr) + print('STDERR'.center(80, '-'), file=sys.stderr) + print(stderr, file=sys.stderr) + return { + 'exitcode': proc.exitcode, + 'stdout': stdout, + 'stderr': stderr, + } + + +def _remote_runner(fn, qout): + """Used by `run_in_new_process_caching()` + """ + with captured_stderr() as stderr: + with captured_stdout() as stdout: + try: + fn() + except Exception: + traceback.print_exc() + exitcode = 1 + else: + exitcode = 0 + qout.put(stdout.getvalue()) + qout.put(stderr.getvalue()) + sys.exit(exitcode) + +class CheckWarningsMixin(object): + @contextlib.contextmanager + def check_warnings(self, messages, category=RuntimeWarning): + with warnings.catch_warnings(record=True) as catch: + warnings.simplefilter("always") + yield + found = 0 + for w in catch: + for m in messages: + if m in str(w.message): + self.assertEqual(w.category, category) + found += 1 + self.assertEqual(found, len(messages)) + + +def _format_jit_options(**jit_options): + if not jit_options: + return '' + out = [] + for key, value in jit_options.items(): + if isinstance(value, str): + value = '"{}"'.format(value) + out.append('{}={}'.format(key, value)) + return ', '.join(out) + + +@contextlib.contextmanager +def create_temp_module(source_lines, **jit_options): + """A context manager that creates and imports a temporary module + from sources provided in ``source_lines``. + + Optionally it is possible to provide jit options for ``jit_module`` if it + is explicitly used in ``source_lines`` like ``jit_module({jit_options})``. + """ + # Use try/finally so cleanup happens even when an exception is raised + try: + tempdir = temp_directory('test_temp_module') + # Generate random module name + temp_module_name = 'test_temp_module_{}'.format( + str(uuid.uuid4()).replace('-', '_')) + temp_module_path = os.path.join(tempdir, temp_module_name + '.py') + + jit_options = _format_jit_options(**jit_options) + with open(temp_module_path, 'w') as f: + lines = source_lines.format(jit_options=jit_options) + f.write(lines) + # Add test_module to sys.path so it can be imported + sys.path.insert(0, tempdir) + test_module = importlib.import_module(temp_module_name) + yield test_module + finally: + sys.modules.pop(temp_module_name, None) + sys.path.remove(tempdir) + shutil.rmtree(tempdir) + + +def run_in_subprocess(code, flags=None, env=None, timeout=30): + """Run a snippet of Python code in a subprocess with flags, if any are + given. 'env' is passed to subprocess.Popen(). 'timeout' is passed to + popen.communicate(). + + Returns the stdout and stderr of the subprocess after its termination. + """ + if flags is None: + flags = [] + cmd = [sys.executable,] + flags + ["-c", code] + popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, env=env) + out, err = popen.communicate(timeout=timeout) + if popen.returncode != 0: + msg = "process failed with code %s: stderr follows\n%s\n" + raise AssertionError(msg % (popen.returncode, err.decode())) + return out, err + + +def strace(work, syscalls, timeout=10): + """Runs strace whilst executing the function work() in the current process, + captures the listed syscalls (list of strings). Takes an optional timeout in + seconds, default is 10, if this is exceeded the process will be sent a + SIGKILL. Returns a list of lines that are output by strace. + """ + + # Open a tmpfile for strace to write into. + with tempfile.NamedTemporaryFile('w+t') as ntf: + + parent_pid = os.getpid() + strace_binary = shutil.which('strace') + if strace_binary is None: + raise ValueError("No valid 'strace' binary could be found") + cmd = [strace_binary, # strace + '-q', # quietly (no attach/detach print out) + '-p', str(parent_pid), # this PID + '-e', ','.join(syscalls), # these syscalls + '-o', ntf.name] # put output into this file + + # redirect stdout, stderr is handled by the `-o` flag to strace. + popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,) + strace_pid = popen.pid + thread_timeout = threading.Timer(timeout, popen.kill) + thread_timeout.start() + + def check_return(problem=''): + ret = popen.returncode + if ret != 0: + msg = ("strace exited non-zero, process return code was:" + f"{ret}. {problem}") + raise RuntimeError(msg) + try: + # push the communication onto a thread so it doesn't block. + # start comms thread + thread_comms = threading.Thread(target=popen.communicate) + thread_comms.start() + + # do work + work() + # Flush the output buffer file + ntf.flush() + # interrupt the strace process to stop it if it's still running + if popen.poll() is None: + os.kill(strace_pid, signal.SIGINT) + else: + # it's not running, probably an issue, raise + problem="If this is SIGKILL, increase the timeout?" + check_return(problem) + # Make sure the return code is 0, SIGINT to detach is considered + # a successful exit. + popen.wait() + check_return() + # collect the data + strace_data = ntf.readlines() + finally: + # join communication, should be stopped now as process has + # exited + thread_comms.join() + # should be stopped already + thread_timeout.cancel() + + return strace_data + + +def strace_supported(): + """Checks if strace is supported and working""" + + # Only support this on linux where the `strace` binary is likely to be the + # strace needed. + if not sys.platform.startswith('linux'): + return False + + def force_clone(): # subprocess triggers a clone + subprocess.run([sys.executable, '-c', 'exit()']) + + syscall = 'clone' + try: + trace = strace(force_clone, [syscall,]) + except Exception: + return False + return syscall in ''.join(trace) + + +class IRPreservingTestPipeline(CompilerBase): + """ Same as the standard pipeline, but preserves the func_ir into the + metadata store after legalisation, useful for testing IR changes""" + + def define_pipelines(self): + pipeline = DefaultPassBuilder.define_nopython_pipeline( + self.state, "ir_preserving_custom_pipe") + # mangle the default pipeline and inject DCE and IR preservation ahead + # of legalisation + + # TODO: add a way to not do this! un-finalizing is not a good idea + pipeline._finalized = False + pipeline.add_pass_after(PreserveIR, IRLegalization) + + pipeline.finalize() + return [pipeline] + + +def print_azure_matrix(): + """This is a utility function that prints out the map of NumPy to Python + versions and how many of that combination are being tested across all the + declared config for azure-pipelines. It is useful to run when updating the + azure-pipelines config to be able to quickly see what the coverage is.""" + import yaml + from yaml import Loader + base_path = os.path.dirname(os.path.abspath(__file__)) + azure_pipe = os.path.join(base_path, '..', '..', 'azure-pipelines.yml') + if not os.path.isfile(azure_pipe): + raise RuntimeError("'azure-pipelines.yml' is not available") + with open(os.path.abspath(azure_pipe), 'rt') as f: + data = f.read() + pipe_yml = yaml.load(data, Loader=Loader) + + templates = pipe_yml['jobs'] + # first look at the items in the first two templates, this is osx/linux + py2np_map = defaultdict(lambda: defaultdict(int)) + for tmplt in templates[:2]: + matrix = tmplt['parameters']['matrix'] + for setup in matrix.values(): + py2np_map[setup['NUMPY']][setup['PYTHON']]+=1 + + # next look at the items in the windows only template + winpath = ['..', '..', 'buildscripts', 'azure', 'azure-windows.yml'] + azure_windows = os.path.join(base_path, *winpath) + if not os.path.isfile(azure_windows): + raise RuntimeError("'azure-windows.yml' is not available") + with open(os.path.abspath(azure_windows), 'rt') as f: + data = f.read() + windows_yml = yaml.load(data, Loader=Loader) + + # There's only one template in windows and its keyed differently to the + # above, get its matrix. + matrix = windows_yml['jobs'][0]['strategy']['matrix'] + for setup in matrix.values(): + py2np_map[setup['NUMPY']][setup['PYTHON']]+=1 + + print("NumPy | Python | Count") + print("-----------------------") + for npver, pys in sorted(py2np_map.items()): + for pyver, count in pys.items(): + print(f" {npver} | {pyver:<4} | {count}") + + # print the "reverse" map + rev_map = defaultdict(lambda: defaultdict(int)) + for npver, pys in sorted(py2np_map.items()): + for pyver, count in pys.items(): + rev_map[pyver][npver] = count + print("\nPython | NumPy | Count") + print("-----------------------") + sorter = lambda x: int(x[0].split('.')[1]) + for pyver, nps in sorted(rev_map.items(), key=sorter): + for npver, count in nps.items(): + print(f" {pyver:<4} | {npver} | {count}") diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_alignment.py b/venv/lib/python3.10/site-packages/numba/tests/test_alignment.py new file mode 100644 index 0000000000000000000000000000000000000000..77eee17b26e1ee10985b99a5d192907cc890534d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_alignment.py @@ -0,0 +1,40 @@ +# See also numba.cuda.tests.test_alignment + +import numpy as np +from numba import from_dtype, njit, void +from numba.tests.support import TestCase + + +class TestAlignment(TestCase): + + def test_record_alignment(self): + rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')], align=True) + rec = from_dtype(rec_dtype) + + @njit((rec[:],)) + def foo(a): + for i in range(a.size): + a[i].a = a[i].b + + a_recarray = np.recarray(3, dtype=rec_dtype) + for i in range(a_recarray.size): + a_rec = a_recarray[i] + a_rec.a = 0 + a_rec.b = (i + 1) * 123 + + foo(a_recarray) + np.testing.assert_equal(a_recarray.a, a_recarray.b) + + def test_record_misaligned(self): + rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')]) + rec = from_dtype(rec_dtype) + + # Unlike the CUDA target, this will not generate an error + @njit((rec[:],)) + def foo(a): + for i in range(a.size): + a[i].a = a[i].b + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_analysis.py b/venv/lib/python3.10/site-packages/numba/tests/test_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..c50defe335e09a0af1e4e7a2ba5a97dd4e85fc6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_analysis.py @@ -0,0 +1,1007 @@ +# Tests numba.analysis functions +import collections +import types as pytypes + +import numpy as np +from numba.core.compiler import run_frontend, Flags, StateDict +from numba import jit, njit, literal_unroll +from numba.core import types, errors, ir, rewrites, ir_utils, cpu +from numba.core import postproc +from numba.core.inline_closurecall import InlineClosureCallPass +from numba.tests.support import (TestCase, MemoryLeakMixin, SerialMixin, + IRPreservingTestPipeline) +from numba.core.analysis import dead_branch_prune, rewrite_semantic_constants +from numba.core.untyped_passes import (ReconstructSSA, TranslateByteCode, + IRProcessing, DeadBranchPrune, + PreserveIR) +from numba.core.compiler import DefaultPassBuilder, CompilerBase, PassManager + + +_GLOBAL = 123 + +enable_pyobj_flags = Flags() +enable_pyobj_flags.enable_pyobject = True + + +def compile_to_ir(func): + func_ir = run_frontend(func) + state = StateDict() + state.func_ir = func_ir + state.typemap = None + state.calltypes = None + # Transform to SSA + ReconstructSSA().run_pass(state) + # call this to get print etc rewrites + rewrites.rewrite_registry.apply('before-inference', state) + return func_ir + + +class TestBranchPruneBase(MemoryLeakMixin, TestCase): + """ + Tests branch pruning + """ + _DEBUG = False + + # find *all* branches + def find_branches(self, the_ir): + branches = [] + for blk in the_ir.blocks.values(): + tmp = [_ for _ in blk.find_insts(cls=ir.Branch)] + branches.extend(tmp) + return branches + + def assert_prune(self, func, args_tys, prune, *args, **kwargs): + # This checks that the expected pruned branches have indeed been pruned. + # func is a python function to assess + # args_tys is the numba types arguments tuple + # prune arg is a list, one entry per branch. The value in the entry is + # encoded as follows: + # True: using constant inference only, the True branch will be pruned + # False: using constant inference only, the False branch will be pruned + # None: under no circumstances should this branch be pruned + # *args: the argument instances to pass to the function to check + # execution is still valid post transform + # **kwargs: + # - flags: args to pass to `jit` default is `nopython=True`, + # e.g. permits use of e.g. object mode. + + func_ir = compile_to_ir(func) + before = func_ir.copy() + if self._DEBUG: + print("=" * 80) + print("before inline") + func_ir.dump() + + # run closure inlining to ensure that nonlocals in closures are visible + inline_pass = InlineClosureCallPass(func_ir, + cpu.ParallelOptions(False),) + inline_pass.run() + + # Remove all Dels, and re-run postproc + post_proc = postproc.PostProcessor(func_ir) + post_proc.run() + + rewrite_semantic_constants(func_ir, args_tys) + if self._DEBUG: + print("=" * 80) + print("before prune") + func_ir.dump() + + dead_branch_prune(func_ir, args_tys) + + after = func_ir + if self._DEBUG: + print("after prune") + func_ir.dump() + + before_branches = self.find_branches(before) + self.assertEqual(len(before_branches), len(prune)) + + # what is expected to be pruned + expect_removed = [] + for idx, prune in enumerate(prune): + branch = before_branches[idx] + if prune is True: + expect_removed.append(branch.truebr) + elif prune is False: + expect_removed.append(branch.falsebr) + elif prune is None: + pass # nothing should be removed! + elif prune == 'both': + expect_removed.append(branch.falsebr) + expect_removed.append(branch.truebr) + else: + assert 0, "unreachable" + + # compare labels + original_labels = set([_ for _ in before.blocks.keys()]) + new_labels = set([_ for _ in after.blocks.keys()]) + # assert that the new labels are precisely the original less the + # expected pruned labels + try: + self.assertEqual(new_labels, original_labels - set(expect_removed)) + except AssertionError as e: + print("new_labels", sorted(new_labels)) + print("original_labels", sorted(original_labels)) + print("expect_removed", sorted(expect_removed)) + raise e + + supplied_flags = kwargs.pop('flags', {'nopython': True}) + # NOTE: original testing used `compile_isolated` hence use of `cres`. + cres = jit(args_tys, **supplied_flags)(func).overloads[args_tys] + if args is None: + res = cres.entry_point() + expected = func() + else: + res = cres.entry_point(*args) + expected = func(*args) + self.assertEqual(res, expected) + + +class TestBranchPrune(TestBranchPruneBase, SerialMixin): + + def test_single_if(self): + + def impl(x): + if 1 == 0: + return 3.14159 + + self.assert_prune(impl, (types.NoneType('none'),), [True], None) + + def impl(x): + if 1 == 1: + return 3.14159 + + self.assert_prune(impl, (types.NoneType('none'),), [False], None) + + def impl(x): + if x is None: + return 3.14159 + + self.assert_prune(impl, (types.NoneType('none'),), [False], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10) + + def impl(x): + if x == 10: + return 3.14159 + + self.assert_prune(impl, (types.NoneType('none'),), [True], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10) + + def impl(x): + if x == 10: + z = 3.14159 # noqa: F841 # no effect + + self.assert_prune(impl, (types.NoneType('none'),), [True], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10) + + def impl(x): + z = None + y = z + if x == y: + return 100 + + self.assert_prune(impl, (types.NoneType('none'),), [False], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10) + + def test_single_if_else(self): + + def impl(x): + if x is None: + return 3.14159 + else: + return 1.61803 + + self.assert_prune(impl, (types.NoneType('none'),), [False], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10) + + def test_single_if_const_val(self): + + def impl(x): + if x == 100: + return 3.14159 + + self.assert_prune(impl, (types.NoneType('none'),), [True], None) + self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100) + + def impl(x): + # switch the condition order + if 100 == x: + return 3.14159 + + self.assert_prune(impl, (types.NoneType('none'),), [True], None) + self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100) + + def test_single_if_else_two_const_val(self): + + def impl(x, y): + if x == y: + return 3.14159 + else: + return 1.61803 + + self.assert_prune(impl, (types.IntegerLiteral(100),) * 2, [None], 100, + 100) + self.assert_prune(impl, (types.NoneType('none'),) * 2, [False], None, + None) + self.assert_prune(impl, (types.IntegerLiteral(100), + types.NoneType('none'),), [True], 100, None) + self.assert_prune(impl, (types.IntegerLiteral(100), + types.IntegerLiteral(1000)), [None], 100, 1000) + + def test_single_if_else_w_following_undetermined(self): + + def impl(x): + x_is_none_work = False + if x is None: + x_is_none_work = True + else: + dead = 7 # noqa: F841 # no effect + + if x_is_none_work: + y = 10 + else: + y = -3 + return y + + self.assert_prune(impl, (types.NoneType('none'),), [False, None], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10) + + def impl(x): + x_is_none_work = False + if x is None: + x_is_none_work = True + else: + pass + + if x_is_none_work: + y = 10 + else: + y = -3 + return y + + # Python 3.10 creates a block with a NOP in it for the `pass` which + # means it gets pruned. + self.assert_prune(impl, (types.NoneType('none'),), [False, None], + None) + + self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10) + + def test_double_if_else_rt_const(self): + + def impl(x): + one_hundred = 100 + x_is_none_work = 4 + if x is None: + x_is_none_work = 100 + else: + dead = 7 # noqa: F841 # no effect + + if x_is_none_work == one_hundred: + y = 10 + else: + y = -3 + + return y, x_is_none_work + + self.assert_prune(impl, (types.NoneType('none'),), [False, None], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10) + + def test_double_if_else_non_literal_const(self): + + def impl(x): + one_hundred = 100 + if x == one_hundred: + y = 3.14159 + else: + y = 1.61803 + return y + + # no prune as compilation specialization on literal value not permitted + self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10) + self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100) + + def test_single_two_branches_same_cond(self): + + def impl(x): + if x is None: + y = 10 + else: + y = 40 + + if x is not None: + z = 100 + else: + z = 400 + + return z, y + + self.assert_prune(impl, (types.NoneType('none'),), [False, True], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10) + + def test_cond_is_kwarg_none(self): + + def impl(x=None): + if x is None: + y = 10 + else: + y = 40 + + if x is not None: + z = 100 + else: + z = 400 + + return z, y + + self.assert_prune(impl, (types.Omitted(None),), + [False, True], None) + self.assert_prune(impl, (types.NoneType('none'),), [False, True], None) + self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10) + + def test_cond_is_kwarg_value(self): + + def impl(x=1000): + if x == 1000: + y = 10 + else: + y = 40 + + if x != 1000: + z = 100 + else: + z = 400 + + return z, y + + self.assert_prune(impl, (types.Omitted(1000),), [None, None], 1000) + self.assert_prune(impl, (types.IntegerLiteral(1000),), [None, None], + 1000) + self.assert_prune(impl, (types.IntegerLiteral(0),), [None, None], 0) + self.assert_prune(impl, (types.NoneType('none'),), [True, False], None) + + def test_cond_rewrite_is_correct(self): + # this checks that when a condition is replaced, it is replace by a + # true/false bit that correctly represents the evaluated condition + def fn(x): + if x is None: + return 10 + return 12 + + def check(func, arg_tys, bit_val): + func_ir = compile_to_ir(func) + + # check there is 1 branch + before_branches = self.find_branches(func_ir) + self.assertEqual(len(before_branches), 1) + + # check the condition in the branch is a binop + pred_var = before_branches[0].cond + pred_defn = ir_utils.get_definition(func_ir, pred_var) + self.assertEqual(pred_defn.op, 'call') + condition_var = pred_defn.args[0] + condition_op = ir_utils.get_definition(func_ir, condition_var) + self.assertEqual(condition_op.op, 'binop') + + # do the prune, this should kill the dead branch and rewrite the + #'condition to a true/false const bit + if self._DEBUG: + print("=" * 80) + print("before prune") + func_ir.dump() + dead_branch_prune(func_ir, arg_tys) + if self._DEBUG: + print("=" * 80) + print("after prune") + func_ir.dump() + + # after mutation, the condition should be a const value `bit_val` + new_condition_defn = ir_utils.get_definition(func_ir, condition_var) + self.assertTrue(isinstance(new_condition_defn, ir.Const)) + self.assertEqual(new_condition_defn.value, bit_val) + + check(fn, (types.NoneType('none'),), 1) + check(fn, (types.IntegerLiteral(10),), 0) + + def test_global_bake_in(self): + + def impl(x): + if _GLOBAL == 123: + return x + else: + return x + 10 + + self.assert_prune(impl, (types.IntegerLiteral(1),), [False], 1) + + global _GLOBAL + tmp = _GLOBAL + + try: + _GLOBAL = 5 + + def impl(x): + if _GLOBAL == 123: + return x + else: + return x + 10 + + self.assert_prune(impl, (types.IntegerLiteral(1),), [True], 1) + finally: + _GLOBAL = tmp + + def test_freevar_bake_in(self): + + _FREEVAR = 123 + + def impl(x): + if _FREEVAR == 123: + return x + else: + return x + 10 + + self.assert_prune(impl, (types.IntegerLiteral(1),), [False], 1) + + _FREEVAR = 12 + + def impl(x): + if _FREEVAR == 123: + return x + else: + return x + 10 + + self.assert_prune(impl, (types.IntegerLiteral(1),), [True], 1) + + def test_redefined_variables_are_not_considered_in_prune(self): + # see issue #4163, checks that if a variable that is an argument is + # redefined in the user code it is not considered const + + def impl(array, a=None): + if a is None: + a = 0 + if a < 0: + return 10 + return 30 + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.NoneType('none'),), + [None, None], + np.zeros((2, 3)), None) + + def test_comparison_operators(self): + # see issue #4163, checks that a variable that is an argument and has + # value None survives TypeError from invalid comparison which should be + # dead + + def impl(array, a=None): + x = 0 + if a is None: + return 10 # dynamic exec would return here + # static analysis requires that this is executed with a=None, + # hence TypeError + if a < 0: + return 20 + return x + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.NoneType('none'),), + [False, 'both'], + np.zeros((2, 3)), None) + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.float64,), + [None, None], + np.zeros((2, 3)), 12.) + + def test_redefinition_analysis_same_block(self): + # checks that a redefinition in a block with prunable potential doesn't + # break + + def impl(array, x, a=None): + b = 2 + if x < 4: + b = 12 + if a is None: # known true + a = 7 # live + else: + b = 15 # dead + if a < 0: # valid as a result of the redefinition of 'a' + return 10 + return 30 + b + a + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.float64, types.NoneType('none'),), + [None, False, None], + np.zeros((2, 3)), 1., None) + + def test_redefinition_analysis_different_block_can_exec(self): + # checks that a redefinition in a block that may be executed prevents + # pruning + + def impl(array, x, a=None): + b = 0 + if x > 5: + a = 11 # a redefined, cannot tell statically if this will exec + if x < 4: + b = 12 + if a is None: # cannot prune, cannot determine if re-defn occurred + b += 5 + else: + b += 7 + if a < 0: + return 10 + return 30 + b + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.float64, types.NoneType('none'),), + [None, None, None, None], + np.zeros((2, 3)), 1., None) + + def test_redefinition_analysis_different_block_cannot_exec(self): + # checks that a redefinition in a block guarded by something that + # has prune potential + + def impl(array, x=None, a=None): + b = 0 + if x is not None: + a = 11 + if a is None: + b += 5 + else: + b += 7 + return 30 + b + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.NoneType('none'), types.NoneType('none')), + [True, None], + np.zeros((2, 3)), None, None) + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.NoneType('none'), types.float64), + [True, None], + np.zeros((2, 3)), None, 1.2) + + self.assert_prune(impl, + (types.Array(types.float64, 2, 'C'), + types.float64, types.NoneType('none')), + [None, None], + np.zeros((2, 3)), 1.2, None) + + def test_closure_and_nonlocal_can_prune(self): + # Closures must be inlined ahead of branch pruning in case nonlocal + # is used. See issue #6585. + def impl(): + x = 1000 + + def closure(): + nonlocal x + x = 0 + + closure() + + if x == 0: + return True + else: + return False + + self.assert_prune(impl, (), [False,],) + + def test_closure_and_nonlocal_cannot_prune(self): + # Closures must be inlined ahead of branch pruning in case nonlocal + # is used. See issue #6585. + def impl(n): + x = 1000 + + def closure(t): + nonlocal x + x = t + + closure(n) + + if x == 0: + return True + else: + return False + + self.assert_prune(impl, (types.int64,), [None,], 1) + + +class TestBranchPrunePredicates(TestBranchPruneBase, SerialMixin): + # Really important thing to remember... the branch on predicates end up as + # POP_JUMP_IF_ and the targets are backwards compared to normal, i.e. + # the true condition is far jump and the false the near i.e. `if x` would + # end up in Numba IR as e.g. `branch x 10, 6`. + + _TRUTHY = (1, "String", True, 7.4, 3j) + _FALSEY = (0, "", False, 0.0, 0j, None) + + def _literal_const_sample_generator(self, pyfunc, consts): + """ + This takes a python function, pyfunc, and manipulates its co_const + __code__ member to create a new function with different co_consts as + supplied in argument consts. + + consts is a dict {index: value} of co_const tuple index to constant + value used to update a pyfunc clone's co_const. + """ + pyfunc_code = pyfunc.__code__ + + # translate consts spec to update the constants + co_consts = {k: v for k, v in enumerate(pyfunc_code.co_consts)} + for k, v in consts.items(): + co_consts[k] = v + new_consts = tuple([v for _, v in sorted(co_consts.items())]) + + # create code object with mutation + new_code = pyfunc_code.replace(co_consts=new_consts) + + # get function + return pytypes.FunctionType(new_code, globals()) + + def test_literal_const_code_gen(self): + def impl(x): + _CONST1 = "PLACEHOLDER1" + if _CONST1: + return 3.14159 + else: + _CONST2 = "PLACEHOLDER2" + return _CONST2 + 4 + + new = self._literal_const_sample_generator(impl, {1:0, 3:20}) + iconst = impl.__code__.co_consts + nconst = new.__code__.co_consts + self.assertEqual(iconst, (None, "PLACEHOLDER1", 3.14159, + "PLACEHOLDER2", 4)) + self.assertEqual(nconst, (None, 0, 3.14159, 20, 4)) + self.assertEqual(impl(None), 3.14159) + self.assertEqual(new(None), 24) + + def test_single_if_const(self): + + def impl(x): + _CONST1 = "PLACEHOLDER1" + if _CONST1: + return 3.14159 + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + func = self._literal_const_sample_generator(impl, {1: const}) + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_negate_const(self): + + def impl(x): + _CONST1 = "PLACEHOLDER1" + if not _CONST1: + return 3.14159 + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + func = self._literal_const_sample_generator(impl, {1: const}) + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_else_const(self): + + def impl(x): + _CONST1 = "PLACEHOLDER1" + if _CONST1: + return 3.14159 + else: + return 1.61803 + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + func = self._literal_const_sample_generator(impl, {1: const}) + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_else_negate_const(self): + + def impl(x): + _CONST1 = "PLACEHOLDER1" + if not _CONST1: + return 3.14159 + else: + return 1.61803 + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + func = self._literal_const_sample_generator(impl, {1: const}) + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_freevar(self): + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + + def func(x): + if const: + return 3.14159, const + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_negate_freevar(self): + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + + def func(x): + if not const: + return 3.14159, const + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_else_freevar(self): + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + + def func(x): + if const: + return 3.14159, const + else: + return 1.61803, const + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_else_negate_freevar(self): + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for const in c_inp: + + def func(x): + if not const: + return 3.14159, const + else: + return 1.61803, const + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + # globals in this section have absurd names after their test usecase names + # so as to prevent collisions and permit tests to run in parallel + def test_single_if_global(self): + global c_test_single_if_global + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for c in c_inp: + c_test_single_if_global = c + + def func(x): + if c_test_single_if_global: + return 3.14159, c_test_single_if_global + + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_negate_global(self): + global c_test_single_if_negate_global + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for c in c_inp: + c_test_single_if_negate_global = c + + def func(x): + if c_test_single_if_negate_global: + return 3.14159, c_test_single_if_negate_global + + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_else_global(self): + global c_test_single_if_else_global + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for c in c_inp: + c_test_single_if_else_global = c + + def func(x): + if c_test_single_if_else_global: + return 3.14159, c_test_single_if_else_global + else: + return 1.61803, c_test_single_if_else_global + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_single_if_else_negate_global(self): + global c_test_single_if_else_negate_global + + for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True): + for c in c_inp: + c_test_single_if_else_negate_global = c + + def func(x): + if not c_test_single_if_else_negate_global: + return 3.14159, c_test_single_if_else_negate_global + else: + return 1.61803, c_test_single_if_else_negate_global + self.assert_prune(func, (types.NoneType('none'),), [prune], + None) + + def test_issue_5618(self): + + @njit + def foo(): + values = np.zeros(1) + tmp = 666 + if tmp: + values[0] = tmp + return values + + self.assertPreciseEqual(foo.py_func()[0], 666.) + self.assertPreciseEqual(foo()[0], 666.) + + +class TestBranchPruneSSA(MemoryLeakMixin, TestCase): + # Tests SSA rewiring of phi nodes after branch pruning. + + class SSAPrunerCompiler(CompilerBase): + def define_pipelines(self): + # This is a simple pipeline that does branch pruning on IR in SSA + # form, then types and lowers as per the standard nopython pipeline. + pm = PassManager("testing pm") + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(IRProcessing, "processing IR") + # SSA early + pm.add_pass(ReconstructSSA, "ssa") + pm.add_pass(DeadBranchPrune, "dead branch pruning") + # type and then lower as usual + pm.add_pass(PreserveIR, "preserves the IR as metadata") + dpb = DefaultPassBuilder + typed_passes = dpb.define_typed_pipeline(self.state) + pm.passes.extend(typed_passes.passes) + lowering_passes = dpb.define_nopython_lowering_pipeline(self.state) + pm.passes.extend(lowering_passes.passes) + pm.finalize() + return [pm] + + def test_ssa_update_phi(self): + # This checks that dead branch pruning is rewiring phi nodes correctly + # after a block containing an incoming for a phi is removed. + + @njit(pipeline_class=self.SSAPrunerCompiler) + def impl(p=None, q=None): + z = 1 + r = False + if p is None: + r = True # live + + if r and q is not None: + z = 20 # dead + + # one of the incoming blocks for z is dead, the phi needs an update + # were this not done, it would refer to variables that do not exist + # and result in a lowering error. + return z, r + + self.assertPreciseEqual(impl(), impl.py_func()) + + def test_ssa_replace_phi(self): + # This checks that when a phi only has one incoming, because the other + # has been pruned, that a direct assignment is used instead. + + @njit(pipeline_class=self.SSAPrunerCompiler) + def impl(p=None): + z = 0 + if p is None: + z = 10 + else: + z = 20 + + return z + + self.assertPreciseEqual(impl(), impl.py_func()) + func_ir = impl.overloads[impl.signatures[0]].metadata['preserved_ir'] + + # check the func_ir, make sure there's no phi nodes + for blk in func_ir.blocks.values(): + self.assertFalse([*blk.find_exprs('phi')]) + + +class TestBranchPrunePostSemanticConstRewrites(TestBranchPruneBase): + # Tests that semantic constants rewriting works by virtue of branch pruning + + def test_array_ndim_attr(self): + + def impl(array): + if array.ndim == 2: + if array.shape[1] == 2: + return 1 + else: + return 10 + + self.assert_prune(impl, (types.Array(types.float64, 2, 'C'),), [False, + None], + np.zeros((2, 3))) + self.assert_prune(impl, (types.Array(types.float64, 1, 'C'),), [True, + 'both'], + np.zeros((2,))) + + def test_tuple_len(self): + + def impl(tup): + if len(tup) == 3: + if tup[2] == 2: + return 1 + else: + return 0 + + self.assert_prune(impl, (types.UniTuple(types.int64, 3),), [False, + None], + tuple([1, 2, 3])) + self.assert_prune(impl, (types.UniTuple(types.int64, 2),), [True, + 'both'], + tuple([1, 2])) + + def test_attr_not_len(self): + # The purpose of this test is to make sure that the conditions guarding + # the rewrite part do not themselves raise exceptions. + # This produces an `ir.Expr` call node for `float.as_integer_ratio`, + # which is a getattr() on `float`. + + @njit + def test(): + float.as_integer_ratio(1.23) + + # this should raise a TypingError + with self.assertRaises(errors.TypingError) as e: + test() + + self.assertIn("Unknown attribute 'as_integer_ratio'", str(e.exception)) + + def test_ndim_not_on_array(self): + + FakeArray = collections.namedtuple('FakeArray', ['ndim']) + fa = FakeArray(ndim=2) + + def impl(fa): + if fa.ndim == 2: + return fa.ndim + else: + object() + + # check prune works for array ndim + self.assert_prune(impl, (types.Array(types.float64, 2, 'C'),), [False], + np.zeros((2, 3))) + + # check prune fails for something with `ndim` attr that is not array + FakeArrayType = types.NamedUniTuple(types.int64, 1, FakeArray) + self.assert_prune(impl, (FakeArrayType,), [None], fa, + flags={'nopython':False, 'forceobj':True}) + + def test_semantic_const_propagates_before_static_rewrites(self): + # see issue #5015, the ndim needs writing in as a const before + # the rewrite passes run to make e.g. getitems static where possible + @njit + def impl(a, b): + return a.shape[:b.ndim] + + args = (np.zeros((5, 4, 3, 2)), np.zeros((1, 1))) + + self.assertPreciseEqual(impl(*args), impl.py_func(*args)) + + def test_tuple_const_propagation(self): + @njit(pipeline_class=IRPreservingTestPipeline) + def impl(*args): + s = 0 + for arg in literal_unroll(args): + s += len(arg) + return s + + inp = ((), (1, 2, 3), ()) + self.assertPreciseEqual(impl(*inp), impl.py_func(*inp)) + + ol = impl.overloads[impl.signatures[0]] + func_ir = ol.metadata['preserved_ir'] + # make sure one of the inplace binop args is a Const + binop_consts = set() + for blk in func_ir.blocks.values(): + for expr in blk.find_exprs('inplace_binop'): + inst = blk.find_variable_assignment(expr.rhs.name) + self.assertIsInstance(inst.value, ir.Const) + binop_consts.add(inst.value.value) + self.assertEqual(binop_consts, {len(x) for x in inp}) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_annotations.py b/venv/lib/python3.10/site-packages/numba/tests/test_annotations.py new file mode 100644 index 0000000000000000000000000000000000000000..90e70eac808722d4b156c7ed36bb8ddd09bf9000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_annotations.py @@ -0,0 +1,252 @@ +import re +from io import StringIO + +import numba +from numba.core import types +from numba import jit, njit +from numba.tests.support import override_config, TestCase +import unittest + +try: + import jinja2 +except ImportError: + jinja2 = None + +try: + import pygments +except ImportError: + pygments = None + + +@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package") +class TestAnnotation(TestCase): + + @TestCase.run_test_in_subprocess # annotations compound per module + def test_exercise_code_path(self): + """ + Ensures template.html is available + """ + + def foo(n, a): + s = a + for i in range(n): + s += i + return s + + cfunc = njit((types.int32, types.int32))(foo) + cres = cfunc.overloads[cfunc.signatures[0]] + ta = cres.type_annotation + + buf = StringIO() + ta.html_annotate(buf) + output = buf.getvalue() + buf.close() + self.assertIn("foo", output) + + @TestCase.run_test_in_subprocess # annotations compound per module + def test_exercise_code_path_with_lifted_loop(self): + """ + Ensures that lifted loops are handled correctly in obj mode + """ + # the functions to jit + def bar(x): + return x + + def foo(x): + h = 0. + for i in range(x): # py 38 needs two loops for one to lift?! + h = h + i + for k in range(x): + h = h + k + if x: + h = h - bar(x) + return h + + # compile into an isolated context + cfunc = jit((types.intp,), forceobj=True, looplift=True)(foo) + cres = cfunc.overloads[cfunc.signatures[0]] + + ta = cres.type_annotation + + buf = StringIO() + ta.html_annotate(buf) + output = buf.getvalue() + buf.close() + self.assertIn("bar", output) + self.assertIn("foo", output) + self.assertIn("LiftedLoop", output) + + @TestCase.run_test_in_subprocess # annotations compound per module + def test_html_output_with_lifted_loop(self): + """ + Test some format and behavior of the html annotation with lifted loop + """ + @numba.jit(forceobj=True) + def udt(x): + object() # to force object mode + z = 0 + for i in range(x): # this line is tagged + z += i + return z + + # Regex pattern to check for the "lifted_tag" in the line of the loop + re_lifted_tag = re.compile( + r'\s*' + r'\s*
    ' + r'\s*' + r'\s*' + r'\s*[0-9]+:' + r'\s*[ ]+for i in range\(x\): # this line is tagged\s*', + re.MULTILINE) + + # Compile int64 version + sig_i64 = (types.int64,) + udt.compile(sig_i64) # compile with lifted loop + cres = udt.overloads[sig_i64] + + # Make html output + buf = StringIO() + cres.type_annotation.html_annotate(buf) + output = buf.getvalue() + buf.close() + + # There should be only one function output. + self.assertEqual(output.count("Function name: udt"), 1) + + sigfmt = "with signature: {} -> pyobject" + self.assertEqual(output.count(sigfmt.format(sig_i64)), 1) + # Ensure the loop is tagged + self.assertEqual(len(re.findall(re_lifted_tag, output)), 1, + msg='%s not found in %s' % (re_lifted_tag, output)) + + # Compile float64 version + sig_f64 = (types.float64,) + udt.compile(sig_f64) + cres = udt.overloads[sig_f64] + + # Make html output + buf = StringIO() + cres.type_annotation.html_annotate(buf) + output = buf.getvalue() + buf.close() + + # There should be two function output + self.assertEqual(output.count("Function name: udt"), 2) + self.assertEqual(output.count(sigfmt.format(sig_i64)), 1) + self.assertEqual(output.count(sigfmt.format(sig_f64)), 1) + # Ensure the loop is tagged in both output + self.assertEqual(len(re.findall(re_lifted_tag, output)), 2) + + @unittest.skipIf(pygments is None, "please install the 'pygments' package") + def test_pretty_print(self): + + @numba.njit + def foo(x, y): + return x, y + + foo(1, 2) + # Exercise the method + foo.inspect_types(pretty=True) + + # Exercise but supply a not None file kwarg, this is invalid + with self.assertRaises(ValueError) as raises: + foo.inspect_types(pretty=True, file='should be None') + self.assertIn('`file` must be None if `pretty=True`', + str(raises.exception)) + + +class TestTypeAnnotation(unittest.TestCase): + + def findpatloc(self, lines, pat): + for i, ln in enumerate(lines): + if pat in ln: + return i + raise ValueError("can't find {!r}".format(pat)) + + def getlines(self, func): + strbuf = StringIO() + func.inspect_types(strbuf) + return strbuf.getvalue().splitlines() + + def test_delete(self): + @numba.njit + def foo(appleorange, berrycherry): + return appleorange + berrycherry + + foo(1, 2) + + lines = self.getlines(foo) + + # Ensure deletion show up after their use + sa = self.findpatloc(lines, 'appleorange = arg(0, name=appleorange)') + sb = self.findpatloc(lines, 'berrycherry = arg(1, name=berrycherry)') + + ea = self.findpatloc(lines, 'del appleorange') + eb = self.findpatloc(lines, 'del berrycherry') + + self.assertLess(sa, ea) + self.assertLess(sb, eb) + + def _lifetimes_impl(self, extend): + with override_config('EXTEND_VARIABLE_LIFETIMES', extend): + @njit + def foo(a): + b = a + return b + x = 10 + b = foo(x) + self.assertEqual(b, x) + + lines = self.getlines(foo) + + sa = self.findpatloc(lines, 'a = arg(0, name=a)') + sb = self.findpatloc(lines, 'b = a') + + cast_ret = self.findpatloc(lines, 'cast(value=b)') + + dela = self.findpatloc(lines, 'del a') + delb = self.findpatloc(lines, 'del b') + + return sa, sb, cast_ret, dela, delb + + def test_delete_standard_lifetimes(self): + # without extended lifetimes, dels occur as soon as dead + # + # label 0 + # a = arg(0, name=a) :: int64 + # b = a :: int64 + # del a + # $8return_value.2 = cast(value=b) :: int64 + # del b + # return $8return_value.2 + + sa, sb, cast_ret, dela, delb = self._lifetimes_impl(extend=0) + + self.assertLess(sa, dela) + self.assertLess(sb, delb) + # del a is before cast and del b is after + self.assertLess(dela, cast_ret) + self.assertGreater(delb, cast_ret) + + def test_delete_extended_lifetimes(self): + # with extended lifetimes, dels are last in block: + # + # label 0 + # a = arg(0, name=a) :: int64 + # b = a :: int64 + # $8return_value.2 = cast(value=b) :: int64 + # del a + # del b + # return $8return_value.2 + + sa, sb, cast_ret, dela, delb = self._lifetimes_impl(extend=1) + + self.assertLess(sa, dela) + self.assertLess(sb, delb) + # dels are after the cast + self.assertGreater(dela, cast_ret) + self.assertGreater(delb, cast_ret) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_api.py b/venv/lib/python3.10/site-packages/numba/tests/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..adc67315459012812de11b7a631f2b80c1e4fe5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_api.py @@ -0,0 +1,91 @@ +import warnings + +import numba +from numba import jit, njit + +from numba.tests.support import TestCase, always_test +import unittest + + +class TestNumbaModule(TestCase): + """ + Test the APIs exposed by the top-level `numba` module. + """ + + def check_member(self, name): + self.assertTrue(hasattr(numba, name), name) + self.assertIn(name, numba.__all__) + + @always_test + def test_numba_module(self): + # jit + self.check_member("jit") + self.check_member("vectorize") + self.check_member("guvectorize") + self.check_member("njit") + # errors + self.check_member("NumbaError") + self.check_member("TypingError") + # types + self.check_member("int32") + # misc + numba.__version__ # not in __all__ + + +class TestJitDecorator(TestCase): + """ + Test the jit and njit decorators + """ + def test_jit_nopython_forceobj(self): + with self.assertRaises(ValueError) as cm: + jit(nopython=True, forceobj=True) + self.assertIn( + "Only one of 'nopython' or 'forceobj' can be True.", + str(cm.exception) + ) + + def py_func(x): + return x + + jit_func = jit(nopython=True)(py_func) + jit_func(1) + # Check length of nopython_signatures to check + # which mode the function was compiled in + self.assertEqual(len(jit_func.nopython_signatures), 1) + + jit_func = jit(forceobj=True)(py_func) + jit_func(1) + self.assertEqual(len(jit_func.nopython_signatures), 0) + + def test_njit_nopython_forceobj(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', RuntimeWarning) + njit(forceobj=True) + self.assertEqual(len(w), 1) + self.assertIn( + 'forceobj is set for njit and is ignored', str(w[0].message) + ) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', RuntimeWarning) + njit(nopython=True) + self.assertEqual(len(w), 1) + self.assertIn( + 'nopython is set for njit and is ignored', str(w[0].message) + ) + + def py_func(x): + return x + + jit_func = njit(nopython=True)(py_func) + jit_func(1) + self.assertEqual(len(jit_func.nopython_signatures), 1) + + jit_func = njit(forceobj=True)(py_func) + jit_func(1) + # Since forceobj is ignored this has to compile in nopython mode + self.assertEqual(len(jit_func.nopython_signatures), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_analysis.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..27bcbb30365851791f495f0a20706102a000a35f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_analysis.py @@ -0,0 +1,1140 @@ +import itertools + +import numpy as np +import sys +from collections import namedtuple +from io import StringIO + +from numba import njit, typeof, prange +from numba.core import ( + types, + typing, + ir, + bytecode, + postproc, + cpu, + registry, + utils, +) +from numba.tests.support import (TestCase, tag, skip_parfors_unsupported, + skip_unless_scipy) +from numba.parfors.array_analysis import EquivSet, ArrayAnalysis +from numba.core.compiler import Compiler, Flags, PassManager +from numba.core.ir_utils import remove_dead +from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode, FixupArgs, + IRProcessing, DeadBranchPrune, + RewriteSemanticConstants, GenericRewrites, + WithLifting, PreserveIR, InlineClosureLikes) + +from numba.core.typed_passes import (NopythonTypeInference, AnnotateTypes, + NopythonRewrites, IRLegalization) + +from numba.core.compiler_machinery import FunctionPass, PassManager, register_pass +from numba.experimental import jitclass +import unittest + + +skip_unsupported = skip_parfors_unsupported + + +# test class for #3700 +@jitclass([('L', types.int32), ('T', types.int32)]) +class ExampleClass3700(object): + def __init__(self, n): + self.L = n + self.T = n + 1 + + +# test value for test_global_tuple +GVAL = (1.2,) +GVAL2 = (3, 4) + + +class TestEquivSet(TestCase): + + """ + Test array_analysis.EquivSet. + """ + def test_insert_equiv(self): + s1 = EquivSet() + s1.insert_equiv('a', 'b') + self.assertTrue(s1.is_equiv('a', 'b')) + self.assertTrue(s1.is_equiv('b', 'a')) + s1.insert_equiv('c', 'd') + self.assertTrue(s1.is_equiv('c', 'd')) + self.assertFalse(s1.is_equiv('c', 'a')) + s1.insert_equiv('a', 'c') + self.assertTrue(s1.is_equiv('a', 'b', 'c', 'd')) + self.assertFalse(s1.is_equiv('a', 'e')) + + def test_intersect(self): + s1 = EquivSet() + s2 = EquivSet() + r = s1.intersect(s2) + self.assertTrue(r.is_empty()) + s1.insert_equiv('a', 'b') + r = s1.intersect(s2) + self.assertTrue(r.is_empty()) + s2.insert_equiv('b', 'c') + r = s1.intersect(s2) + self.assertTrue(r.is_empty()) + s2.insert_equiv('d', 'a') + r = s1.intersect(s2) + self.assertTrue(r.is_empty()) + s1.insert_equiv('a', 'e') + s2.insert_equiv('c', 'd') + r = s1.intersect(s2) + self.assertTrue(r.is_equiv('a', 'b')) + self.assertFalse(r.is_equiv('a', 'e')) + self.assertFalse(r.is_equiv('c', 'd')) + + +@register_pass(analysis_only=False, mutates_CFG=True) +class ArrayAnalysisPass(FunctionPass): + _name = "array_analysis_pass" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + state.array_analysis = ArrayAnalysis(state.typingctx, state.func_ir, + state.typemap, state.calltypes) + state.array_analysis.run(state.func_ir.blocks) + post_proc = postproc.PostProcessor(state.func_ir) + post_proc.run() + state.func_ir_copies.append(state.func_ir.copy()) + if state.test_idempotence and len(state.func_ir_copies) > 1: + state.test_idempotence(state.func_ir_copies) + return False + + +class ArrayAnalysisTester(Compiler): + + @classmethod + def mk_pipeline(cls, args, return_type=None, flags=None, locals={}, + library=None, typing_context=None, target_context=None): + if not flags: + flags = Flags() + flags.nrt = True + if typing_context is None: + typing_context = registry.cpu_target.typing_context + if target_context is None: + target_context = registry.cpu_target.target_context + return cls(typing_context, target_context, library, args, return_type, + flags, locals) + + def compile_to_ir(self, func, test_idempotence=None): + """ + Populate and run compiler pipeline + """ + self.state.func_id = bytecode.FunctionIdentity.from_function(func) + ExtractByteCode().run_pass(self.state) + + self.state.lifted = () + self.state.lifted_from = None + state = self.state + state.func_ir_copies = [] + state.test_idempotence = test_idempotence + + name = 'array_analysis_testing' + pm = PassManager(name) + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(FixupArgs, "fix up args") + pm.add_pass(IRProcessing, "processing IR") + # pre typing + if not state.flags.no_rewrites: + pm.add_pass(GenericRewrites, "nopython rewrites") + pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants") + pm.add_pass(DeadBranchPrune, "dead branch pruning") + pm.add_pass(InlineClosureLikes, + "inline calls to locally defined closures") + # typing + pm.add_pass(NopythonTypeInference, "nopython frontend") + + if not state.flags.no_rewrites: + pm.add_pass(NopythonRewrites, "nopython rewrites") + + # Array Analysis pass + pm.add_pass(ArrayAnalysisPass, "array analysis") + if test_idempotence: + # Do another pass of array analysis to test idempotence + pm.add_pass(ArrayAnalysisPass, "idempotence array analysis") + # legalise + pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering") + pm.add_pass(AnnotateTypes, "annotate types") + + # partial compile + pm.finalize() + pm.run(state) + return state.array_analysis + + +class TestArrayAnalysis(TestCase): + + def compare_ir(self, ir_list): + outputs = [] + for func_ir in ir_list: + remove_dead(func_ir.blocks, func_ir.arg_names, func_ir) + output = StringIO() + func_ir.dump(file=output) + outputs.append(output.getvalue()) + self.assertTrue(len(set(outputs)) == 1) # assert all outputs are equal + + def _compile_and_test(self, fn, arg_tys, asserts=[], equivs=[], idempotent=True): + """ + Compile the given function and get its IR. + """ + test_pipeline = ArrayAnalysisTester.mk_pipeline(arg_tys) + test_idempotence = self.compare_ir if idempotent else lambda x:() + analysis = test_pipeline.compile_to_ir(fn, test_idempotence) + if equivs: + for func in equivs: + # only test the equiv_set of the first block + func(analysis.equiv_sets[0]) + if asserts is None: + self.assertTrue(self._has_no_assertcall(analysis.func_ir)) + else: + for func in asserts: + func(analysis.func_ir, analysis.typemap) + + def _has_assertcall(self, func_ir, typemap, args): + msg = "Sizes of {} do not match".format(', '.join(args)) + for label, block in func_ir.blocks.items(): + for expr in block.find_exprs(op='call'): + fn = func_ir.get_definition(expr.func.name) + if isinstance(fn, ir.Global) and fn.name == 'assert_equiv': + typ = typemap[expr.args[0].name] + if typ.literal_value.startswith(msg): + return True + return False + + def _has_shapecall(self, func_ir, x): + for label, block in func_ir.blocks.items(): + for expr in block.find_exprs(op='getattr'): + if expr.attr == 'shape': + y = func_ir.get_definition(expr.value, lhs_only=True) + z = func_ir.get_definition(x, lhs_only=True) + y = y.name if isinstance(y, ir.Var) else y + z = z.name if isinstance(z, ir.Var) else z + if y == z: + return True + return False + + def _has_no_assertcall(self, func_ir): + for label, block in func_ir.blocks.items(): + for expr in block.find_exprs(op='call'): + fn = func_ir.get_definition(expr.func.name) + if isinstance(fn, ir.Global) and fn.name == 'assert_equiv': + return False + return True + + def with_assert(self, *args): + return lambda func_ir, typemap: self.assertTrue( + self._has_assertcall(func_ir, typemap, args)) + + def without_assert(self, *args): + return lambda func_ir, typemap: self.assertFalse( + self._has_assertcall(func_ir, typemap, args)) + + def with_equiv(self, *args): + def check(equiv_set): + n = len(args) + for i in range(n - 1): + if not equiv_set.is_equiv(args[i], args[n - 1]): + return False + return True + return lambda equiv_set: self.assertTrue(check(equiv_set)) + + def without_equiv(self, *args): + def check(equiv_set): + n = len(args) + for i in range(n - 1): + if equiv_set.is_equiv(args[i], args[n - 1]): + return False + return True + return lambda equiv_set: self.assertTrue(check(equiv_set)) + + def with_shapecall(self, x): + return lambda func_ir, s: self.assertTrue(self._has_shapecall(func_ir, x)) + + def without_shapecall(self, x): + return lambda func_ir, s: self.assertFalse(self._has_shapecall(func_ir, x)) + + def test_base_cases(self): + def test_0(): + a = np.zeros(0) + b = np.zeros(1) + m = 0 + n = 1 + c = np.zeros((m, n)) + return + self._compile_and_test(test_0, (), + equivs=[self.with_equiv('a', (0,)), + self.with_equiv('b', (1,)), + self.with_equiv('c', (0, 1))]) + + def test_1(n): + a = np.zeros(n) + b = np.zeros(n) + return a + b + self._compile_and_test(test_1, (types.intp,), asserts=None) + + def test_2(m, n): + a = np.zeros(n) + b = np.zeros(m) + return a + b + self._compile_and_test(test_2, (types.intp, types.intp), + asserts=[self.with_assert('a', 'b')]) + + def test_3(n): + a = np.zeros(n) + return a + n + self._compile_and_test(test_3, (types.intp,), asserts=None) + + def test_4(n): + a = np.zeros(n) + b = a + 1 + c = a + 2 + return a + c + self._compile_and_test(test_4, (types.intp,), asserts=None) + + def test_5(n): + a = np.zeros((n, n)) + m = n + b = np.zeros((m, n)) + return a + b + self._compile_and_test(test_5, (types.intp,), asserts=None) + + def test_6(m, n): + a = np.zeros(n) + b = np.zeros(m) + d = a + b + e = a - b + return d + e + self._compile_and_test(test_6, (types.intp, types.intp), + asserts=[self.with_assert('a', 'b'), + self.without_assert('d', 'e')]) + + def test_7(m, n): + a = np.zeros(n) + b = np.zeros(m) + if m == 10: + d = a + b + else: + d = a - b + return d + a + self._compile_and_test(test_7, (types.intp, types.intp), + asserts=[self.with_assert('a', 'b'), + self.without_assert('d', 'a')]) + + def test_8(m, n): + a = np.zeros(n) + b = np.zeros(m) + if m == 10: + d = b + a + else: + d = a + a + return b + d + self._compile_and_test(test_8, (types.intp, types.intp), + asserts=[self.with_assert('b', 'a'), + self.with_assert('b', 'd')]) + + def test_9(m): + A = np.ones(m) + s = 0 + while m < 2: + m += 1 + B = np.ones(m) + s += np.sum(A + B) + return s + self._compile_and_test(test_9, (types.intp,), + asserts=[self.with_assert('A', 'B')]) + + def test_10(m, n): + p = m - 1 + q = n + 1 + r = q + 1 + A = np.zeros(p) + B = np.zeros(q) + C = np.zeros(r) + D = np.zeros(m) + s = np.sum(A + B) + t = np.sum(C + D) + return s + t + self._compile_and_test(test_10, (types.intp,types.intp,), + asserts=[self.with_assert('A', 'B'), + self.without_assert('C', 'D')]) + + def test_11(): + a = np.ones(5) + b = np.ones(5) + c = a[1:] + d = b[:-1] + e = len(c) + f = len(d) + return e == f + self._compile_and_test(test_11, (), + equivs=[self.with_equiv('e', 'f')]) + + def test_12(): + a = np.ones(25).reshape((5,5)) + b = np.ones(25).reshape((5,5)) + c = a[1:,:] + d = b[:-1,:] + e = c.shape[0] + f = d.shape[0] + g = len(d) + return e == f + self._compile_and_test(test_12, (), + equivs=[self.with_equiv('e', 'f', 'g')]) + + def test_tup_arg(T): + T2 = T + return T2[0] + + int_arr_typ = types.Array(types.intp, 1, 'C') + self._compile_and_test(test_tup_arg, + (types.Tuple((int_arr_typ, int_arr_typ)),), asserts=None) + + def test_arr_in_tup(m): + A = np.ones(m) + S = (A,) + B = np.ones(len(S[0])) + return B + + self._compile_and_test(test_arr_in_tup, (types.intp,), + equivs=[self.with_equiv("A", "B")]) + + T = namedtuple("T", ['a','b']) + def test_namedtuple(n): + r = T(n, n) + return r[0] + self._compile_and_test(test_namedtuple, (types.intp,), + equivs=[self.with_equiv('r', ('n', 'n'))],) + + # np.where is tricky since it returns tuple of arrays + def test_np_where_tup_return(A): + c = np.where(A) + return len(c[0]) + + self._compile_and_test(test_np_where_tup_return, + (types.Array(types.intp, 1, 'C'),), asserts=None) + + def test_shape(A): + (m, n) = A.shape + B = np.ones((m, n)) + return A + B + self._compile_and_test(test_shape, (types.Array(types.intp, 2, 'C'),), + asserts=None) + + def test_cond(l, m, n): + A = np.ones(l) + B = np.ones(m) + C = np.ones(n) + if l == m: + r = np.sum(A + B) + else: + r = 0 + if m != n: + s = 0 + else: + s = np.sum(B + C) + t = 0 + if l == m: + if m == n: + t = np.sum(A + B + C) + return r + s + t + self._compile_and_test(test_cond, (types.intp, types.intp, types.intp), + asserts=None) + + def test_assert_1(m, n): + assert(m == n) + A = np.ones(m) + B = np.ones(n) + return np.sum(A + B) + self._compile_and_test(test_assert_1, (types.intp, types.intp), + asserts=None) + + def test_assert_2(A, B): + assert(A.shape == B.shape) + return np.sum(A + B) + + self._compile_and_test(test_assert_2, (types.Array(types.intp, 1, 'C'), + types.Array(types.intp, 1, 'C'),), + asserts=None) + self._compile_and_test(test_assert_2, (types.Array(types.intp, 2, 'C'), + types.Array(types.intp, 2, 'C'),), + asserts=None) + # expected failure + with self.assertRaises(AssertionError) as raises: + self._compile_and_test(test_assert_2, (types.Array(types.intp, 1, 'C'), + types.Array(types.intp, 2, 'C'),), + asserts=None) + msg = "Dimension mismatch" + self.assertIn(msg, str(raises.exception)) + + + def test_stencilcall(self): + from numba.stencils.stencil import stencil + @stencil + def kernel_1(a): + return 0.25 * (a[0,1] + a[1,0] + a[0,-1] + a[-1,0]) + + def test_1(n): + a = np.ones((n,n)) + b = kernel_1(a) + return a + b + + self._compile_and_test(test_1, (types.intp,), + equivs=[self.with_equiv('a', 'b')], + asserts=[self.without_assert('a', 'b')]) + + def test_2(n): + a = np.ones((n,n)) + b = np.ones((n+1,n+1)) + kernel_1(a, out=b) + return a + + self._compile_and_test(test_2, (types.intp,), + equivs=[self.without_equiv('a', 'b')]) + + @stencil(standard_indexing=('c',)) + def kernel_2(a, b, c): + return a[0,1,0] + b[0,-1,0] + c[0] + + def test_3(n): + a = np.arange(64).reshape(4,8,2) + b = np.arange(64).reshape(n,8,2) + u = np.zeros(1) + v = kernel_2(a, b, u) + return v + + # standard indexed arrays are not considered in size equivalence + self._compile_and_test(test_3, (types.intp,), + equivs=[self.with_equiv('a', 'b', 'v'), + self.without_equiv('a', 'u')], + asserts=[self.with_assert('a', 'b')]) + + def test_slice(self): + def test_1(m, n): + A = np.zeros(m) + B = np.zeros(n) + s = np.sum(A + B) + C = A[1:m-1] + D = B[1:n-1] + t = np.sum(C + D) + return s + t + self._compile_and_test(test_1, (types.intp,types.intp,), + asserts=[self.with_assert('A', 'B'), + self.without_assert('C', 'D')], + idempotent=False) + + def test_2(m): + A = np.zeros(m) + B = A[0:m-3] + C = A[1:m-2] + D = A[2:m-1] + E = B + C + return D + E + self._compile_and_test(test_2, (types.intp,), + asserts=[self.without_assert('B', 'C'), + self.without_assert('D', 'E')], + idempotent=False) + + def test_3(m): + A = np.zeros((m,m)) + B = A[0:m-2,0:m-2] + C = A[1:m-1,1:m-1] + E = B + C + return E + self._compile_and_test(test_3, (types.intp,), + asserts=[self.without_assert('B', 'C')], + idempotent=False) + + def test_4(m): + A = np.zeros((m,m)) + B = A[0:m-2,:] + C = A[1:m-1,:] + E = B + C + return E + self._compile_and_test(test_4, (types.intp,), + asserts=[self.without_assert('B', 'C')], + idempotent=False) + + def test_5(m,n): + A = np.zeros(m) + B = np.zeros(m) + B[0:m-2] = A[1:m-1] + C = np.zeros(n) + D = A[1:m-1] + C[0:n-2] = D + # B and C are not necessarily of the same size because we can't + # derive m == n from (m-2) % m == (n-2) % n + return B + C + self._compile_and_test(test_5, (types.intp,types.intp), + asserts=[self.without_assert('B', 'A'), + self.with_assert('C', 'D'), + self.with_assert('B', 'C')], + idempotent=False) + + def test_6(m): + A = np.zeros((m,m)) + B = A[0:m-2,:-1] + C = A[1:m-1,:-1] + E = B + C + return E + self._compile_and_test(test_6, (types.intp,), + asserts=[self.without_assert('B', 'C')], + idempotent=False) + + def test_7(m): + A = np.zeros((m,m)) + B = A[0:m-2,-3:-1] + C = A[1:m-1,-4:-2] + E = B + C + return E + self._compile_and_test(test_7, (types.intp,), + asserts=[self.with_assert('B', 'C')], + idempotent=False) + + def test_8(m): + A = np.zeros((m,m)) + B = A[:m-2,0:] + C = A[1:-1,:] + E = B + C + return E + self._compile_and_test(test_8, (types.intp,), + asserts=[self.without_assert('B', 'C')], + idempotent=False) + + def test_9(m): + # issues #3461 and #3554, checks equivalence on empty slices + # and across binop + A = np.zeros((m)) + B = A[:0] # B = array([], dtype=int64) + C = A[1:] + D = A[:-1:-1] # D = array([], dtype=int64) + E = B + D + F = E + F += 1 # F = array([], dtype=int64) + return A, C, F + self._compile_and_test(test_9, (types.intp,), + equivs=[self.without_equiv('B', 'C'), + self.with_equiv('A', 'm'), + self.with_equiv('B', 'D'), + self.with_equiv('F', 'D'),], + idempotent=False) + + @skip_unless_scipy + def test_numpy_calls(self): + def test_zeros(n): + a = np.zeros(n) + b = np.zeros((n, n)) + c = np.zeros(shape=(n, n)) + self._compile_and_test(test_zeros, (types.intp,), + equivs=[self.with_equiv('a', 'n'), + self.with_equiv('b', ('n', 'n')), + self.with_equiv('b', 'c')]) + + def test_0d_array(n): + a = np.array(1) + b = np.ones(2) + return a + b + self._compile_and_test(test_0d_array, (types.intp,), + equivs=[self.without_equiv('a', 'b')], + asserts=[self.without_shapecall('a')]) + + def test_ones(n): + a = np.ones(n) + b = np.ones((n, n)) + c = np.ones(shape=(n, n)) + self._compile_and_test(test_ones, (types.intp,), + equivs=[self.with_equiv('a', 'n'), + self.with_equiv('b', ('n', 'n')), + self.with_equiv('b', 'c')]) + + def test_empty(n): + a = np.empty(n) + b = np.empty((n, n)) + c = np.empty(shape=(n, n)) + self._compile_and_test(test_empty, (types.intp,), + equivs=[self.with_equiv('a', 'n'), + self.with_equiv('b', ('n', 'n')), + self.with_equiv('b', 'c')]) + + def test_eye(n): + a = np.eye(n) + b = np.eye(N=n) + c = np.eye(N=n, M=n) + d = np.eye(N=n, M=n + 1) + self._compile_and_test(test_eye, (types.intp,), + equivs=[self.with_equiv('a', ('n', 'n')), + self.with_equiv('b', ('n', 'n')), + self.with_equiv('b', 'c'), + self.without_equiv('b', 'd')]) + + def test_identity(n): + a = np.identity(n) + self._compile_and_test(test_identity, (types.intp,), + equivs=[self.with_equiv('a', ('n', 'n'))]) + + def test_diag(n): + a = np.identity(n) + b = np.diag(a) + c = np.diag(b) + d = np.diag(a, k=1) + self._compile_and_test(test_diag, (types.intp,), + equivs=[self.with_equiv('b', ('n',)), + self.with_equiv('c', ('n', 'n'))], + asserts=[self.with_shapecall('d'), + self.without_shapecall('c')]) + + def test_array_like(a): + b = np.empty_like(a) + c = np.zeros_like(a) + d = np.ones_like(a) + e = np.full_like(a, 1) + f = np.asfortranarray(a) + + self._compile_and_test(test_array_like, (types.Array(types.intp, 2, 'C'),), + equivs=[ + self.with_equiv('a', 'b', 'd', 'e', 'f')], + asserts=[self.with_shapecall('a'), + self.without_shapecall('b')]) + + def test_reshape(n): + a = np.ones(n * n) + b = a.reshape((n, n)) + return a.sum() + b.sum() + self._compile_and_test(test_reshape, (types.intp,), + equivs=[self.with_equiv('b', ('n', 'n'))], + asserts=[self.without_shapecall('b')]) + + + def test_transpose(m, n): + a = np.ones((m, n)) + b = a.T + c = a.transpose() + # Numba njit cannot compile explicit transpose call! + # c = np.transpose(b) + self._compile_and_test(test_transpose, (types.intp, types.intp), + equivs=[self.with_equiv('a', ('m', 'n')), + self.with_equiv('b', ('n', 'm')), + self.with_equiv('c', ('n', 'm'))]) + + + def test_transpose_3d(m, n, k): + a = np.ones((m, n, k)) + b = a.T + c = a.transpose() + d = a.transpose(2,0,1) + dt = a.transpose((2,0,1)) + e = a.transpose(0,2,1) + et = a.transpose((0,2,1)) + # Numba njit cannot compile explicit transpose call! + # c = np.transpose(b) + self._compile_and_test(test_transpose_3d, (types.intp, types.intp, types.intp), + equivs=[self.with_equiv('a', ('m', 'n', 'k')), + self.with_equiv('b', ('k', 'n', 'm')), + self.with_equiv('c', ('k', 'n', 'm')), + self.with_equiv('d', ('k', 'm', 'n')), + self.with_equiv('dt', ('k', 'm', 'n')), + self.with_equiv('e', ('m', 'k', 'n')), + self.with_equiv('et', ('m', 'k', 'n'))]) + + def test_real_imag_attr(m, n): + a = np.ones((m, n)) + b = a.real + c = a.imag + + self._compile_and_test(test_real_imag_attr, (types.intp, types.intp), + equivs=[self.with_equiv('a', ('m', 'n')), + self.with_equiv('b', ('m', 'n')), + self.with_equiv('c', ('m', 'n')), + ]) + + def test_random(n): + a0 = np.random.rand(n) + a1 = np.random.rand(n, n) + b0 = np.random.randn(n) + b1 = np.random.randn(n, n) + c0 = np.random.ranf(n) + c1 = np.random.ranf((n, n)) + c2 = np.random.ranf(size=(n, n)) + d0 = np.random.random_sample(n) + d1 = np.random.random_sample((n, n)) + d2 = np.random.random_sample(size=(n, n)) + e0 = np.random.sample(n) + e1 = np.random.sample((n, n)) + e2 = np.random.sample(size=(n, n)) + f0 = np.random.random(n) + f1 = np.random.random((n, n)) + f2 = np.random.random(size=(n, n)) + g0 = np.random.standard_normal(n) + g1 = np.random.standard_normal((n, n)) + g2 = np.random.standard_normal(size=(n, n)) + h0 = np.random.chisquare(10, n) + h1 = np.random.chisquare(10, (n, n)) + h2 = np.random.chisquare(10, size=(n, n)) + i0 = np.random.weibull(10, n) + i1 = np.random.weibull(10, (n, n)) + i2 = np.random.weibull(10, size=(n, n)) + j0 = np.random.power(10, n) + j1 = np.random.power(10, (n, n)) + j2 = np.random.power(10, size=(n, n)) + k0 = np.random.geometric(0.1, n) + k1 = np.random.geometric(0.1, (n, n)) + k2 = np.random.geometric(0.1, size=(n, n)) + l0 = np.random.exponential(10, n) + l1 = np.random.exponential(10, (n, n)) + l2 = np.random.exponential(10, size=(n, n)) + m0 = np.random.poisson(10, n) + m1 = np.random.poisson(10, (n, n)) + m2 = np.random.poisson(10, size=(n, n)) + n0 = np.random.rayleigh(10, n) + n1 = np.random.rayleigh(10, (n, n)) + n2 = np.random.rayleigh(10, size=(n, n)) + o0 = np.random.normal(0, 1, n) + o1 = np.random.normal(0, 1, (n, n)) + o2 = np.random.normal(0, 1, size=(n, n)) + p0 = np.random.uniform(0, 1, n) + p1 = np.random.uniform(0, 1, (n, n)) + p2 = np.random.uniform(0, 1, size=(n, n)) + q0 = np.random.beta(0.1, 1, n) + q1 = np.random.beta(0.1, 1, (n, n)) + q2 = np.random.beta(0.1, 1, size=(n, n)) + r0 = np.random.binomial(0, 1, n) + r1 = np.random.binomial(0, 1, (n, n)) + r2 = np.random.binomial(0, 1, size=(n, n)) + s0 = np.random.f(0.1, 1, n) + s1 = np.random.f(0.1, 1, (n, n)) + s2 = np.random.f(0.1, 1, size=(n, n)) + t0 = np.random.gamma(0.1, 1, n) + t1 = np.random.gamma(0.1, 1, (n, n)) + t2 = np.random.gamma(0.1, 1, size=(n, n)) + u0 = np.random.lognormal(0, 1, n) + u1 = np.random.lognormal(0, 1, (n, n)) + u2 = np.random.lognormal(0, 1, size=(n, n)) + v0 = np.random.laplace(0, 1, n) + v1 = np.random.laplace(0, 1, (n, n)) + v2 = np.random.laplace(0, 1, size=(n, n)) + w0 = np.random.randint(0, 10, n) + w1 = np.random.randint(0, 10, (n, n)) + w2 = np.random.randint(0, 10, size=(n, n)) + x0 = np.random.triangular(-3, 0, 10, n) + x1 = np.random.triangular(-3, 0, 10, (n, n)) + x2 = np.random.triangular(-3, 0, 10, size=(n, n)) + + last = ord('x') + 1 + vars1d = [('n',)] + [chr(x) + '0' for x in range(ord('a'), last)] + vars2d = [('n', 'n')] + [chr(x) + '1' for x in range(ord('a'), last)] + vars2d += [chr(x) + '1' for x in range(ord('c'), last)] + self._compile_and_test(test_random, (types.intp,), + equivs=[self.with_equiv(*vars1d), + self.with_equiv(*vars2d)]) + + def test_concatenate(m, n): + a = np.ones(m) + b = np.ones(n) + c = np.concatenate((a, b)) + d = np.ones((2, n)) + e = np.ones((3, n)) + f = np.concatenate((d, e)) + # Numba njit cannot compile concatenate with single array! + # g = np.ones((3,4,5)) + # h = np.concatenate(g) + i = np.ones((m, 2)) + j = np.ones((m, 3)) + k = np.concatenate((i, j), axis=1) + l = np.ones((m, n)) + o = np.ones((m, n)) + p = np.concatenate((l, o)) + # Numba njit cannot support list argument! + # q = np.concatenate([d, e]) + self._compile_and_test(test_concatenate, (types.intp, types.intp), + equivs=[self.with_equiv('f', (5, 'n')), + #self.with_equiv('h', (3 + 4 + 5, )), + self.with_equiv('k', ('m', 5))], + asserts=[self.with_shapecall('c'), + self.without_shapecall('f'), + self.without_shapecall('k'), + self.with_shapecall('p')]) + + def test_vsd_stack(): + k = np.ones((2,)) + l = np.ones((2, 3)) + o = np.ones((2, 3, 4)) + p = np.vstack((k, k)) + q = np.vstack((l, l)) + r = np.hstack((k, k)) + s = np.hstack((l, l)) + t = np.dstack((k, k)) + u = np.dstack((l, l)) + v = np.dstack((o, o)) + + self._compile_and_test(test_vsd_stack, (), + equivs=[self.with_equiv('p', (2, 2)), + self.with_equiv('q', (4, 3)), + self.with_equiv('r', (4,)), + self.with_equiv('s', (2, 6)), + self.with_equiv('t', (1, 2, 2)), + self.with_equiv('u', (2, 3, 2)), + self.with_equiv('v', (2, 3, 8)), + ]) + + def test_stack(m, n): + a = np.ones(m) + b = np.ones(n) + c = np.stack((a, b)) + d = np.ones((m, n)) + e = np.ones((m, n)) + f = np.stack((d, e)) + g = np.stack((d, e), axis=0) + h = np.stack((d, e), axis=1) + i = np.stack((d, e), axis=2) + j = np.stack((d, e), axis=-1) + + self._compile_and_test(test_stack, (types.intp, types.intp), + equivs=[self.with_equiv('m', 'n'), + self.with_equiv('c', (2, 'm')), + self.with_equiv( + 'f', 'g', (2, 'm', 'n')), + self.with_equiv( + 'h', ('m', 2, 'n')), + self.with_equiv( + 'i', 'j', ('m', 'n', 2)), + ]) + + def test_linspace(m, n): + a = np.linspace(m, n) + b = np.linspace(m, n, 10) + # Numba njit does not support num keyword to linspace call! + # c = np.linspace(m,n,num=10) + self._compile_and_test(test_linspace, (types.float64, types.float64), + equivs=[self.with_equiv('a', (50,)), + self.with_equiv('b', (10,))]) + + def test_dot(l, m, n): + a = np.dot(np.ones(1), np.ones(1)) + b = np.dot(np.ones(2), np.ones((2, 3))) + # Numba njit does not support higher dimensional inputs + #c = np.dot(np.ones(2),np.ones((3,2,4))) + #d = np.dot(np.ones(2),np.ones((3,5,2,4))) + e = np.dot(np.ones((1, 2)), np.ones(2,)) + #f = np.dot(np.ones((1,2,3)),np.ones(3,)) + #g = np.dot(np.ones((1,2,3,4)),np.ones(4,)) + h = np.dot(np.ones((2, 3)), np.ones((3, 4))) + i = np.dot(np.ones((m, n)), np.ones((n, m))) + j = np.dot(np.ones((m, m)), np.ones((l, l))) + + self._compile_and_test(test_dot, (types.intp, types.intp, types.intp), + equivs=[self.without_equiv('a', (1,)), # not array + self.with_equiv('b', (3,)), + self.with_equiv('e', (1,)), + self.with_equiv('h', (2, 4)), + self.with_equiv('i', ('m', 'm')), + self.with_equiv('j', ('m', 'm')), + ], + asserts=[self.with_assert('m', 'l')]) + + def test_broadcast(m, n): + a = np.ones((m, n)) + b = np.ones(n) + c = a + b + d = np.ones((1, n)) + e = a + c - d + self._compile_and_test(test_broadcast, (types.intp, types.intp), + equivs=[self.with_equiv('a', 'c', 'e')], + asserts=None) + + # make sure shape of a global tuple of ints is handled properly + def test_global_tuple(): + a = np.ones(GVAL2) + b = np.ones(GVAL2) + + self._compile_and_test(test_global_tuple, (), + equivs=[self.with_equiv('a', 'b')], + asserts=None) + + +class TestArrayAnalysisParallelRequired(TestCase): + """This is to just split out tests that need the parallel backend and + therefore serialised execution. + """ + + _numba_parallel_test_ = False + + @skip_unsupported + def test_misc(self): + + @njit + def swap(x, y): + return(y, x) + + def test_bug2537(m): + a = np.ones(m) + b = np.ones(m) + for i in range(m): + a[i], b[i] = swap(a[i], b[i]) + + try: + njit(test_bug2537, parallel=True)(10) + except IndexError: + self.fail("test_bug2537 raised IndexError!") + + @skip_unsupported + def test_global_namedtuple(self): + Row = namedtuple('Row', ['A']) + row = Row(3) + + def test_impl(): + rr = row + res = rr.A + if res == 2: + res = 3 + return res + + self.assertEqual(njit(test_impl, parallel=True)(), test_impl()) + + @skip_unsupported + def test_array_T_issue_3700(self): + + def test_impl(t_obj, X): + for i in prange(t_obj.T): + X[i] = i + return X.sum() + + n = 5 + t_obj = ExampleClass3700(n) + X1 = np.zeros(t_obj.T) + X2 = np.zeros(t_obj.T) + self.assertEqual( + njit(test_impl, parallel=True)(t_obj, X1), test_impl(t_obj, X2)) + + @skip_unsupported + def test_slice_shape_issue_3380(self): + # these tests shouldn't throw error in array analysis + def test_impl1(): + a = slice(None, None) + return True + + self.assertEqual(njit(test_impl1, parallel=True)(), test_impl1()) + + def test_impl2(A, a): + b = a + return A[b] + + A = np.arange(10) + a = slice(None) + np.testing.assert_array_equal( + njit(test_impl2, parallel=True)(A, a), test_impl2(A, a)) + + @skip_unsupported + def test_slice_dtype_issue_5056(self): + # see issue 5056 + + @njit(parallel=True) + def test_impl(data): + N = data.shape[0] + sums = np.zeros(N) + for i in prange(N): + sums[i] = np.sum(data[np.int32(0):np.int32(1)]) + return sums + + data = np.arange(10.) + np.testing.assert_array_equal(test_impl(data), test_impl.py_func(data)) + + @skip_unsupported + def test_global_tuple(self): + """make sure a global tuple with non-integer values does not cause errors + (test for #6726). + """ + + def test_impl(): + d = GVAL[0] + return d + + self.assertEqual(njit(test_impl, parallel=True)(), test_impl()) + + +class TestArrayAnalysisInterface(TestCase): + def test_analyze_op_call_interface(self): + # gather _analyze_op_call_* + aoc = {} + for fname in dir(ArrayAnalysis): + if fname.startswith('_analyze_op_call_'): + aoc[fname] = getattr(ArrayAnalysis, fname) + # check interface + def iface_stub(self, scope, equiv_set, loc, args, kws): + pass + expected = utils.pysignature(iface_stub) + for k, v in aoc.items(): + got = utils.pysignature(v) + with self.subTest(fname=k, sig=got): + self.assertEqual(got, expected) + + @skip_unsupported + def test_array_analysis_extensions(self): + # Test that the `array_analysis` object in `array_analysis_extensions` + # can perform analysis on the scope using `equiv_sets`. + from numba.parfors.parfor import Parfor + from numba.parfors import array_analysis + + orig_parfor = array_analysis.array_analysis_extensions[Parfor] + + shared = {'counter': 0} + + def testcode(array_analysis): + # Find call node corresponding to the ``A = empty(n)`` + func_ir = array_analysis.func_ir + for call in func_ir.blocks[0].find_exprs('call'): + callee = func_ir.get_definition(call.func) + if getattr(callee, "value", None) is empty: + if getattr(call.args[0], 'name', None) == 'n': + break + else: + return + + variable_A = func_ir.get_assignee(call) + # n must be equiv to + es = array_analysis.equiv_sets[0] + self.assertTrue(es.is_equiv('n', variable_A.name)) + shared['counter'] += 1 + + def new_parfor(parfor, equiv_set, typemap, array_analysis): + """Recursive array analysis for parfor nodes. + """ + testcode(array_analysis) + # Call original + return orig_parfor( + parfor, equiv_set, typemap, array_analysis, + ) + + try: + # Replace the array-analysis extension for Parfor node + array_analysis.array_analysis_extensions[Parfor] = new_parfor + + empty = np.empty # avoid scanning a getattr in the IR + def f(n): + A = empty(n) + for i in prange(n): + S = np.arange(i) + A[i] = S.sum() + return A + 1 + + got = njit(parallel=True)(f)(10) + executed_count = shared['counter'] + self.assertGreater(executed_count, 0) + finally: + # Re-install the original handler + array_analysis.array_analysis_extensions[Parfor] = orig_parfor + + # Check normal execution + expected = njit(parallel=True)(f)(10) + self.assertPreciseEqual(got, expected) + # Make sure we have uninstalled the handler + self.assertEqual(executed_count, shared['counter']) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_attr.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_attr.py new file mode 100644 index 0000000000000000000000000000000000000000..28e19458630384d59bf59150c1bfaf4ca7ec2902 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_attr.py @@ -0,0 +1,410 @@ +import numpy as np + +import unittest +from numba.np.numpy_support import from_dtype +from numba import njit, typeof +from numba.core import types +from numba.tests.support import (TestCase, MemoryLeakMixin, + skip_parfors_unsupported) +from numba.core.errors import TypingError +from numba.experimental import jitclass + + +def array_dtype(a): + return a.dtype + + +def use_dtype(a, b): + return a.view(b.dtype) + + +def dtype_eq_int64(a): + return a.dtype == np.dtype('int64') + + +def array_itemsize(a): + return a.itemsize + + +def array_nbytes(a): + return a.nbytes + + +def array_shape(a, i): + return a.shape[i] + + +def array_strides(a, i): + return a.strides[i] + + +def array_ndim(a): + return a.ndim + + +def array_size(a): + return a.size + + +def array_flags_contiguous(a): + return a.flags.contiguous + +def array_flags_c_contiguous(a): + return a.flags.c_contiguous + +def array_flags_f_contiguous(a): + return a.flags.f_contiguous + + +def nested_array_itemsize(a): + return a.f.itemsize + +def nested_array_nbytes(a): + return a.f.nbytes + +def nested_array_shape(a): + return a.f.shape + + +def nested_array_strides(a): + return a.f.strides + + +def nested_array_ndim(a): + return a.f.ndim + + +def nested_array_size(a): + return a.f.size + + +def size_after_slicing_usecase(buf, i): + sliced = buf[i] + # Make sure size attribute is not lost + return sliced.size + + +def array_ctypes_data(arr): + return arr.ctypes.data + + +def array_real(arr): + return arr.real + + +def array_imag(arr): + return arr.imag + + +class TestArrayAttr(MemoryLeakMixin, TestCase): + + def setUp(self): + super(TestArrayAttr, self).setUp() + self.a = np.arange(20, dtype=np.int32).reshape(4, 5) + + def check_unary(self, pyfunc, arr): + aryty = typeof(arr) + cfunc = self.get_cfunc(pyfunc, (aryty,)) + expected = pyfunc(arr) + self.assertPreciseEqual(cfunc(arr), expected) + # Retry with forced any layout + cfunc = self.get_cfunc(pyfunc, (aryty.copy(layout='A'),)) + self.assertPreciseEqual(cfunc(arr), expected) + + def check_unary_with_arrays(self, pyfunc,): + self.check_unary(pyfunc, self.a) + self.check_unary(pyfunc, self.a.T) + self.check_unary(pyfunc, self.a[::2]) + # 0-d array + arr = np.array([42]).reshape(()) + self.check_unary(pyfunc, arr) + # array with an empty dimension + arr = np.zeros(0) + self.check_unary(pyfunc, arr) + + # check with reshape + self.check_unary(pyfunc, arr.reshape((1, 0, 2))) + + def get_cfunc(self, pyfunc, argspec): + return njit(argspec)(pyfunc) + + def test_shape(self): + pyfunc = array_shape + cfunc = self.get_cfunc(pyfunc, (types.int32[:,:], types.int32)) + + for i in range(self.a.ndim): + self.assertEqual(pyfunc(self.a, i), cfunc(self.a, i)) + + def test_strides(self): + pyfunc = array_strides + cfunc = self.get_cfunc(pyfunc, (types.int32[:,:], types.int32)) + + for i in range(self.a.ndim): + self.assertEqual(pyfunc(self.a, i), cfunc(self.a, i)) + + def test_ndim(self): + self.check_unary_with_arrays(array_ndim) + + def test_size(self): + self.check_unary_with_arrays(array_size) + + def test_itemsize(self): + self.check_unary_with_arrays(array_itemsize) + + def test_nbytes(self): + self.check_unary_with_arrays(array_nbytes) + + def test_dtype(self): + pyfunc = array_dtype + self.check_unary(pyfunc, self.a) + dtype = np.dtype([('x', np.int8), ('y', np.int8)]) + arr = np.zeros(4, dtype=dtype) + self.check_unary(pyfunc, arr) + + def test_use_dtype(self): + # Test using the dtype attribute inside the Numba function itself + b = np.empty(1, dtype=np.int16) + pyfunc = use_dtype + cfunc = self.get_cfunc(pyfunc, (typeof(self.a), typeof(b))) + expected = pyfunc(self.a, b) + self.assertPreciseEqual(cfunc(self.a, b), expected) + + def test_dtype_equal(self): + # Test checking if a dtype is equal to another dtype + pyfunc = dtype_eq_int64 + self.check_unary(pyfunc, np.empty(1, dtype=np.int16)) + self.check_unary(pyfunc, np.empty(1, dtype=np.int64)) + + def test_flags_contiguous(self): + self.check_unary_with_arrays(array_flags_contiguous) + + def test_flags_c_contiguous(self): + self.check_unary_with_arrays(array_flags_c_contiguous) + + def test_flags_f_contiguous(self): + self.check_unary_with_arrays(array_flags_f_contiguous) + + +class TestNestedArrayAttr(MemoryLeakMixin, unittest.TestCase): + def setUp(self): + super(TestNestedArrayAttr, self).setUp() + dtype = np.dtype([('a', np.int32), ('f', np.int32, (2, 5))]) + self.a = np.recarray(1, dtype)[0] + self.nbrecord = from_dtype(self.a.dtype) + + def get_cfunc(self, pyfunc): + return njit((self.nbrecord,))(pyfunc) + + def test_shape(self): + pyfunc = nested_array_shape + cfunc = self.get_cfunc(pyfunc) + + self.assertEqual(pyfunc(self.a), cfunc(self.a)) + + def test_strides(self): + pyfunc = nested_array_strides + cfunc = self.get_cfunc(pyfunc) + + self.assertEqual(pyfunc(self.a), cfunc(self.a)) + + def test_ndim(self): + pyfunc = nested_array_ndim + cfunc = self.get_cfunc(pyfunc) + + self.assertEqual(pyfunc(self.a), cfunc(self.a)) + + def test_nbytes(self): + pyfunc = nested_array_nbytes + cfunc = self.get_cfunc(pyfunc) + + self.assertEqual(pyfunc(self.a), cfunc(self.a)) + + def test_size(self): + pyfunc = nested_array_size + cfunc = self.get_cfunc(pyfunc) + + self.assertEqual(pyfunc(self.a), cfunc(self.a)) + + def test_itemsize(self): + pyfunc = nested_array_itemsize + cfunc = self.get_cfunc(pyfunc) + + self.assertEqual(pyfunc(self.a), cfunc(self.a)) + + +class TestSlicedArrayAttr(MemoryLeakMixin, unittest.TestCase): + def test_size_after_slicing(self): + pyfunc = size_after_slicing_usecase + cfunc = njit(pyfunc) + arr = np.arange(2 * 5).reshape(2, 5) + for i in range(arr.shape[0]): + self.assertEqual(pyfunc(arr, i), cfunc(arr, i)) + arr = np.arange(2 * 5 * 3).reshape(2, 5, 3) + for i in range(arr.shape[0]): + self.assertEqual(pyfunc(arr, i), cfunc(arr, i)) + + +class TestArrayCTypes(MemoryLeakMixin, TestCase): + + _numba_parallel_test_ = False + + def test_array_ctypes_data(self): + pyfunc = array_ctypes_data + cfunc = njit(pyfunc) + arr = np.arange(3) + self.assertEqual(pyfunc(arr), cfunc(arr)) + + @skip_parfors_unsupported + def test_array_ctypes_ref_error_in_parallel(self): + # Issue #2887 + from ctypes import CFUNCTYPE, c_void_p, c_int32, c_double, c_bool + + @CFUNCTYPE(c_bool, c_void_p, c_int32, c_void_p) + def callback(inptr, size, outptr): + # A ctypes callback that manipulate the incoming pointers. + try: + inbuf = (c_double * size).from_address(inptr) + outbuf = (c_double * 1).from_address(outptr) + a = np.ndarray(size, buffer=inbuf, dtype=np.float64) + b = np.ndarray(1, buffer=outbuf, dtype=np.float64) + b[0] = (a + a.size)[0] + return True + except: + import traceback + traceback.print_exception() + return False + + + # parallel=True is required to reproduce the error. + @njit(parallel=True) + def foo(size): + arr = np.ones(size) + out = np.empty(1) + # Exercise array.ctypes + inct = arr.ctypes + outct = out.ctypes + # The reference to `arr` is dead by now + status = callback(inct.data, size, outct.data) + return status, out[0] + + size = 3 + status, got = foo(size) + self.assertTrue(status) + self.assertPreciseEqual(got, (np.ones(size) + size)[0]) + + +class TestRealImagAttr(MemoryLeakMixin, TestCase): + def check_complex(self, pyfunc): + cfunc = njit(pyfunc) + # test 1D + size = 10 + arr = np.arange(size) + np.arange(size) * 10j + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + # test 2D + arr = arr.reshape(2, 5) + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + + def test_complex_real(self): + self.check_complex(array_real) + + def test_complex_imag(self): + self.check_complex(array_imag) + + def check_number_real(self, dtype): + pyfunc = array_real + cfunc = njit(pyfunc) + # test 1D + size = 10 + arr = np.arange(size, dtype=dtype) + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + # test 2D + arr = arr.reshape(2, 5) + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + # test identity + self.assertEqual(arr.data, pyfunc(arr).data) + self.assertEqual(arr.data, cfunc(arr).data) + # test writable + real = cfunc(arr) + self.assertNotEqual(arr[0, 0], 5) + real[0, 0] = 5 + self.assertEqual(arr[0, 0], 5) + + def test_number_real(self): + """ + Testing .real of non-complex dtypes + """ + for dtype in [np.uint8, np.int32, np.float32, np.float64]: + self.check_number_real(dtype) + + def check_number_imag(self, dtype): + pyfunc = array_imag + cfunc = njit(pyfunc) + # test 1D + size = 10 + arr = np.arange(size, dtype=dtype) + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + # test 2D + arr = arr.reshape(2, 5) + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + # test are zeros + self.assertEqual(cfunc(arr).tolist(), np.zeros_like(arr).tolist()) + # test readonly + imag = cfunc(arr) + with self.assertRaises(ValueError) as raises: + imag[0] = 1 + self.assertEqual('assignment destination is read-only', + str(raises.exception)) + + def test_number_imag(self): + """ + Testing .imag of non-complex dtypes + """ + for dtype in [np.uint8, np.int32, np.float32, np.float64]: + self.check_number_imag(dtype) + + def test_record_real(self): + rectyp = np.dtype([('real', np.float32), ('imag', np.complex64)]) + arr = np.zeros(3, dtype=rectyp) + arr['real'] = np.random.random(arr.size) + arr['imag'] = np.random.random(arr.size) * 1.3j + + # check numpy behavior + # .real is identity + self.assertIs(array_real(arr), arr) + # .imag is zero_like + self.assertEqual(array_imag(arr).tolist(), np.zeros_like(arr).tolist()) + + # check numba behavior + # it's most likely a user error, anyway + jit_array_real = njit(array_real) + jit_array_imag = njit(array_imag) + + with self.assertRaises(TypingError) as raises: + jit_array_real(arr) + self.assertIn("cannot access .real of array of Record", + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + jit_array_imag(arr) + self.assertIn("cannot access .imag of array of Record", + str(raises.exception)) + +class TestJitclassFlagsSegfault(MemoryLeakMixin, TestCase): + """Regression test for: https://github.com/numba/numba/issues/4775 """ + + def test(self): + + @jitclass(dict()) + class B(object): + + def __init__(self): + pass + + def foo(self, X): + X.flags + + Z = B() + Z.foo(np.ones(4)) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_constants.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..944786fc15168b096e7e0cbdcf511eac8b3df5ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_constants.py @@ -0,0 +1,190 @@ +import numpy as np + +import unittest +from numba import njit +from numba.core.errors import TypingError +from numba import jit, typeof +from numba.core import types +from numba.tests.support import TestCase + + +a0 = np.array(42) + +s1 = np.int32(64) + +a1 = np.arange(12) +a2 = a1[::2] +a3 = a1.reshape((3, 4)).T + +dt = np.dtype([('x', np.int8), ('y', 'S3')]) + +a4 = np.arange(32, dtype=np.int8).view(dt) +a5 = a4[::-2] + +# A recognizable data string +a6 = np.frombuffer(b"XXXX_array_contents_XXXX", dtype=np.float32) + + +myarray = np.array([1, ]) + + +def getitem0(i): + return a0[()] + + +def getitem1(i): + return a1[i] + + +def getitem2(i): + return a2[i] + + +def getitem3(i): + return a3[i] + + +def getitem4(i): + return a4[i] + + +def getitem5(i): + return a5[i] + + +def getitem6(i): + return a6[i] + + +def use_arrayscalar_const(): + return s1 + + +def write_to_global_array(): + myarray[0] = 1 + + +def bytes_as_const_array(): + return np.frombuffer(b'foo', dtype=np.uint8) + + +class TestConstantArray(TestCase): + """ + Test array constants. + """ + + def check_array_const(self, pyfunc): + cfunc = njit((types.int32,))(pyfunc) + for i in [0, 1, 2]: + np.testing.assert_array_equal(pyfunc(i), cfunc(i)) + + def test_array_const_0d(self): + self.check_array_const(getitem0) + + def test_array_const_1d_contig(self): + self.check_array_const(getitem1) + + def test_array_const_1d_noncontig(self): + self.check_array_const(getitem2) + + def test_array_const_2d(self): + self.check_array_const(getitem3) + + def test_record_array_const_contig(self): + self.check_array_const(getitem4) + + def test_record_array_const_noncontig(self): + self.check_array_const(getitem5) + + def test_array_const_alignment(self): + """ + Issue #1933: the array declaration in the LLVM IR must have + the right alignment specified. + """ + sig = (types.intp,) + cfunc = jit(sig, nopython=True)(getitem6) + ir = cfunc.inspect_llvm(sig) + for line in ir.splitlines(): + if 'XXXX_array_contents_XXXX' in line: + self.assertIn("constant [24 x i8]", line) # sanity check + # Should be the ABI-required alignment for float32 + # on most platforms... + self.assertIn(", align 4", line) + break + else: + self.fail("could not find array declaration in LLVM IR") + + def test_arrayscalar_const(self): + pyfunc = use_arrayscalar_const + cfunc = njit((),)(pyfunc) + self.assertEqual(pyfunc(), cfunc()) + + def test_write_to_global_array(self): + pyfunc = write_to_global_array + with self.assertRaises(TypingError): + njit((),)(pyfunc) + + def test_issue_1850(self): + """ + This issue is caused by an unresolved bug in numpy since version 1.6. + See numpy GH issue #3147. + """ + constarr = np.array([86]) + + def pyfunc(): + return constarr[0] + + cfunc = njit((),)(pyfunc) + out = cfunc() + self.assertEqual(out, 86) + + @TestCase.run_test_in_subprocess # isolate MCJIT use + def test_too_big_to_freeze(self): + """ + Test issue https://github.com/numba/numba/issues/2188 where freezing + a constant array into the code that's prohibitively long and consumes + too much RAM. + """ + def test(biggie): + expect = np.copy(biggie) + self.assertEqual(typeof(biggie), typeof(expect)) + + def pyfunc(): + return biggie + + cfunc = njit((),)(pyfunc) + # Check that the array is not frozen into the LLVM IR. + # LLVM size must be less than the array size. + self.assertLess(len(cfunc.inspect_llvm((),)), biggie.nbytes) + # Run and test result + out = cfunc() + self.assertIs(biggie, out) + # Remove all local references to biggie + del out + biggie = None # del biggie is syntax error in py2 + # Run again and verify result + out = cfunc() + np.testing.assert_equal(expect, out) + self.assertEqual(typeof(expect), typeof(out)) + + nelem = 10**7 # 10 million items + + c_array = np.arange(nelem).reshape(nelem) + f_array = np.asfortranarray(np.random.random((2, nelem // 2))) + self.assertEqual(typeof(c_array).layout, 'C') + self.assertEqual(typeof(f_array).layout, 'F') + # Test C contig + test(c_array) + # Test F contig + test(f_array) + + +class TestConstantBytes(TestCase): + def test_constant_bytes(self): + pyfunc = bytes_as_const_array + cfunc = njit((),)(pyfunc) + np.testing.assert_array_equal(pyfunc(), cfunc()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_exprs.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_exprs.py new file mode 100644 index 0000000000000000000000000000000000000000..090bbd5ad483dde698299ac7f02d81812e11a0ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_exprs.py @@ -0,0 +1,742 @@ +import gc +from io import StringIO + +import numpy as np + +from numba import njit, vectorize +from numba import typeof +from numba.core import utils, types, typing, ir, compiler, cpu, cgutils +from numba.core.compiler import Compiler, Flags +from numba.core.registry import cpu_target +from numba.tests.support import (MemoryLeakMixin, TestCase, temp_directory, + create_temp_module) +from numba.extending import ( + overload, + models, + lower_builtin, + register_model, + make_attribute_wrapper, + type_callable, + typeof_impl +) +import operator +import textwrap + +import unittest + + +class Namespace(dict): + def __getattr__(s, k): + return s[k] if k in s else super(Namespace, s).__getattr__(k) + +def axy(a, x, y): + return a * x + y + +def ax2(a, x, y): + return a * x + y + +def pos_root(As, Bs, Cs): + return (-Bs + (((Bs ** 2.) - (4. * As * Cs)) ** 0.5)) / (2. * As) + +def neg_root_common_subexpr(As, Bs, Cs): + _2As = 2. * As + _4AsCs = 2. * _2As * Cs + _Bs2_4AsCs = (Bs ** 2. - _4AsCs) + return (-Bs - (_Bs2_4AsCs ** 0.5)) / _2As + +def neg_root_complex_subexpr(As, Bs, Cs): + _2As = 2. * As + _4AsCs = 2. * _2As * Cs + _Bs2_4AsCs = (Bs ** 2. - _4AsCs) + 0j # Force into the complex domain. + return (-Bs - (_Bs2_4AsCs ** 0.5)) / _2As + +vaxy = vectorize(axy) + +def call_stuff(a0, a1): + return np.cos(vaxy(a0, np.sin(a1) - 1., 1.)) + +def are_roots_imaginary(As, Bs, Cs): + return (Bs ** 2 - 4 * As * Cs) < 0 + +def div_add(As, Bs, Cs): + return As / Bs + Cs + +def cube(As): + return As ** 3 + +def explicit_output(a, b, out): + np.cos(a, out) + return np.add(out, b, out) + +def variable_name_reuse(a, b, c, d): + u = a + b + u = u - a * b + u = u * c + d + return u + + +# From issue #1264 +def distance_matrix(vectors): + n_vectors = vectors.shape[0] + result = np.empty((n_vectors, n_vectors), dtype=np.float64) + + for i in range(n_vectors): + for j in range(i, n_vectors): + result[i,j] = result[j,i] = np.sum( + (vectors[i] - vectors[j]) ** 2) ** 0.5 + + return result + + +class RewritesTester(Compiler): + @classmethod + def mk_pipeline(cls, args, return_type=None, flags=None, locals={}, + library=None, typing_context=None, target_context=None): + if not flags: + flags = Flags() + flags.nrt = True + if typing_context is None: + typing_context = cpu_target.typing_context + if target_context is None: + target_context = cpu_target.target_context + return cls(typing_context, target_context, library, args, return_type, + flags, locals) + + @classmethod + def mk_no_rw_pipeline(cls, args, return_type=None, flags=None, locals={}, + library=None, **kws): + if not flags: + flags = Flags() + flags.no_rewrites = True + return cls.mk_pipeline(args, return_type, flags, locals, library, **kws) + + +class TestArrayExpressions(MemoryLeakMixin, TestCase): + + def _compile_function(self, fn, arg_tys): + """ + Compile the given function both without and with rewrites enabled. + """ + control_pipeline = RewritesTester.mk_no_rw_pipeline(arg_tys) + cres_0 = control_pipeline.compile_extra(fn) + control_cfunc = cres_0.entry_point + + test_pipeline = RewritesTester.mk_pipeline(arg_tys) + cres_1 = test_pipeline.compile_extra(fn) + test_cfunc = cres_1.entry_point + + return control_pipeline, control_cfunc, test_pipeline, test_cfunc + + def test_simple_expr(self): + ''' + Using a simple array expression, verify that rewriting is taking + place, and is fusing loops. + ''' + A = np.linspace(0,1,10) + X = np.linspace(2,1,10) + Y = np.linspace(1,2,10) + arg_tys = [typeof(arg) for arg in (A, X, Y)] + + control_pipeline, nb_axy_0, test_pipeline, nb_axy_1 = \ + self._compile_function(axy, arg_tys) + + control_pipeline2 = RewritesTester.mk_no_rw_pipeline(arg_tys) + cres_2 = control_pipeline2.compile_extra(ax2) + nb_ctl = cres_2.entry_point + + expected = nb_axy_0(A, X, Y) + actual = nb_axy_1(A, X, Y) + control = nb_ctl(A, X, Y) + np.testing.assert_array_equal(expected, actual) + np.testing.assert_array_equal(control, actual) + + ir0 = control_pipeline.state.func_ir.blocks + ir1 = test_pipeline.state.func_ir.blocks + ir2 = control_pipeline2.state.func_ir.blocks + self.assertEqual(len(ir0), len(ir1)) + self.assertEqual(len(ir0), len(ir2)) + # The rewritten IR should be smaller than the original. + self.assertGreater(len(ir0[0].body), len(ir1[0].body)) + self.assertEqual(len(ir0[0].body), len(ir2[0].body)) + + def _get_array_exprs(self, block): + for instr in block: + if isinstance(instr, ir.Assign): + if isinstance(instr.value, ir.Expr): + if instr.value.op == 'arrayexpr': + yield instr + + def _array_expr_to_set(self, expr, out=None): + ''' + Convert an array expression tree into a set of operators. + ''' + if out is None: + out = set() + if not isinstance(expr, tuple): + raise ValueError("{0} not a tuple".format(expr)) + operation, operands = expr + processed_operands = [] + for operand in operands: + if isinstance(operand, tuple): + operand, _ = self._array_expr_to_set(operand, out) + processed_operands.append(operand) + processed_expr = operation, tuple(processed_operands) + out.add(processed_expr) + return processed_expr, out + + def _test_root_function(self, fn=pos_root): + A = np.random.random(10) + B = np.random.random(10) + 1. # Increase likelihood of real + # root (could add 2 to force all + # roots to be real). + C = np.random.random(10) + arg_tys = [typeof(arg) for arg in (A, B, C)] + + control_pipeline = RewritesTester.mk_no_rw_pipeline(arg_tys) + control_cres = control_pipeline.compile_extra(fn) + nb_fn_0 = control_cres.entry_point + + test_pipeline = RewritesTester.mk_pipeline(arg_tys) + test_cres = test_pipeline.compile_extra(fn) + nb_fn_1 = test_cres.entry_point + + np_result = fn(A, B, C) + nb_result_0 = nb_fn_0(A, B, C) + nb_result_1 = nb_fn_1(A, B, C) + np.testing.assert_array_almost_equal(np_result, nb_result_0) + np.testing.assert_array_almost_equal(nb_result_0, nb_result_1) + + return Namespace(locals()) + + def _test_cube_function(self, fn=cube): + A = np.arange(10, dtype=np.float64) + arg_tys = (typeof(A),) + + control_pipeline = RewritesTester.mk_no_rw_pipeline(arg_tys) + control_cres = control_pipeline.compile_extra(fn) + nb_fn_0 = control_cres.entry_point + + test_pipeline = RewritesTester.mk_pipeline(arg_tys) + test_cres = test_pipeline.compile_extra(fn) + nb_fn_1 = test_cres.entry_point + + expected = A ** 3 + self.assertPreciseEqual(expected, nb_fn_0(A)) + self.assertPreciseEqual(expected, nb_fn_1(A)) + + return Namespace(locals()) + + def _test_explicit_output_function(self, fn): + """ + Test function having a (a, b, out) signature where *out* is + an output array the function writes into. + """ + A = np.arange(10, dtype=np.float64) + B = A + 1 + arg_tys = (typeof(A),) * 3 + + control_pipeline, control_cfunc, test_pipeline, test_cfunc = \ + self._compile_function(fn, arg_tys) + + def run_func(fn): + out = np.zeros_like(A) + fn(A, B, out) + return out + + expected = run_func(fn) + self.assertPreciseEqual(expected, run_func(control_cfunc)) + self.assertPreciseEqual(expected, run_func(test_cfunc)) + + return Namespace(locals()) + + def _assert_array_exprs(self, block, expected_count): + """ + Assert the *block* has the expected number of array expressions + in it. + """ + rewrite_count = len(list(self._get_array_exprs(block))) + self.assertEqual(rewrite_count, expected_count) + + def _assert_total_rewrite(self, control_ir, test_ir, trivial=False): + """ + Given two dictionaries of Numba IR blocks, check to make sure the + control IR has no array expressions, while the test IR + contains one and only one. + """ + # Both IRs have the same number of blocks (presumably 1) + self.assertEqual(len(control_ir), len(test_ir)) + control_block = control_ir[0].body + test_block = test_ir[0].body + self._assert_array_exprs(control_block, 0) + self._assert_array_exprs(test_block, 1) + if not trivial: + # If the expression wasn't trivial, the block length should + # have decreased (since a sequence of exprs was replaced + # with a single nested array expr). + self.assertGreater(len(control_block), len(test_block)) + + def _assert_no_rewrite(self, control_ir, test_ir): + """ + Given two dictionaries of Numba IR blocks, check to make sure + the control IR and the test IR both have no array expressions. + """ + self.assertEqual(len(control_ir), len(test_ir)) + # All blocks should be identical, and not rewritten + for k, v in control_ir.items(): + control_block = v.body + test_block = test_ir[k].body + self.assertEqual(len(control_block), len(test_block)) + self._assert_array_exprs(control_block, 0) + self._assert_array_exprs(test_block, 0) + + def test_trivial_expr(self): + """ + Ensure even a non-nested expression is rewritten, as it can enable + scalar optimizations such as rewriting `x ** 2`. + """ + ns = self._test_cube_function() + self._assert_total_rewrite(ns.control_pipeline.state.func_ir.blocks, + ns.test_pipeline.state.func_ir.blocks, + trivial=True) + + def test_complicated_expr(self): + ''' + Using the polynomial root function, ensure the full expression is + being put in the same kernel with no remnants of intermediate + array expressions. + ''' + ns = self._test_root_function() + self._assert_total_rewrite(ns.control_pipeline.state.func_ir.blocks, + ns.test_pipeline.state.func_ir.blocks) + + def test_common_subexpressions(self, fn=neg_root_common_subexpr): + ''' + Attempt to verify that rewriting will incorporate user common + subexpressions properly. + ''' + ns = self._test_root_function(fn) + ir0 = ns.control_pipeline.state.func_ir.blocks + ir1 = ns.test_pipeline.state.func_ir.blocks + self.assertEqual(len(ir0), len(ir1)) + self.assertGreater(len(ir0[0].body), len(ir1[0].body)) + self.assertEqual(len(list(self._get_array_exprs(ir0[0].body))), 0) + # Verify that we didn't rewrite everything into a monolithic + # array expression since we stored temporary values in + # variables that might be used later (from the optimization's + # point of view). + array_expr_instrs = list(self._get_array_exprs(ir1[0].body)) + self.assertGreater(len(array_expr_instrs), 1) + # Now check that we haven't duplicated any subexpressions in + # the rewritten code. + array_sets = list(self._array_expr_to_set(instr.value.expr)[1] + for instr in array_expr_instrs) + for expr_set_0, expr_set_1 in zip(array_sets[:-1], array_sets[1:]): + intersections = expr_set_0.intersection(expr_set_1) + if intersections: + self.fail("Common subexpressions detected in array " + "expressions ({0})".format(intersections)) + + def test_complex_subexpression(self): + return self.test_common_subexpressions(neg_root_complex_subexpr) + + def test_ufunc_and_dufunc_calls(self): + ''' + Verify that ufunc and DUFunc calls are being properly included in + array expressions. + ''' + A = np.random.random(10) + B = np.random.random(10) + arg_tys = [typeof(arg) for arg in (A, B)] + + vaxy_descr = vaxy._dispatcher.targetdescr + control_pipeline = RewritesTester.mk_no_rw_pipeline( + arg_tys, + typing_context=vaxy_descr.typing_context, + target_context=vaxy_descr.target_context) + cres_0 = control_pipeline.compile_extra(call_stuff) + nb_call_stuff_0 = cres_0.entry_point + + test_pipeline = RewritesTester.mk_pipeline( + arg_tys, + typing_context=vaxy_descr.typing_context, + target_context=vaxy_descr.target_context) + cres_1 = test_pipeline.compile_extra(call_stuff) + nb_call_stuff_1 = cres_1.entry_point + + expected = call_stuff(A, B) + control = nb_call_stuff_0(A, B) + actual = nb_call_stuff_1(A, B) + np.testing.assert_array_almost_equal(expected, control) + np.testing.assert_array_almost_equal(expected, actual) + + self._assert_total_rewrite(control_pipeline.state.func_ir.blocks, + test_pipeline.state.func_ir.blocks) + + def test_cmp_op(self): + ''' + Verify that comparison operators are supported by the rewriter. + ''' + ns = self._test_root_function(are_roots_imaginary) + self._assert_total_rewrite(ns.control_pipeline.state.func_ir.blocks, + ns.test_pipeline.state.func_ir.blocks) + + def test_explicit_output(self): + """ + Check that ufunc calls with explicit outputs are not rewritten. + """ + ns = self._test_explicit_output_function(explicit_output) + self._assert_no_rewrite(ns.control_pipeline.state.func_ir.blocks, + ns.test_pipeline.state.func_ir.blocks) + + +class TestRewriteIssues(MemoryLeakMixin, TestCase): + + def test_issue_1184(self): + from numba import jit + import numpy as np + + @jit(nopython=True) + def foo(arr): + return arr + + @jit(nopython=True) + def bar(arr): + c = foo(arr) + d = foo(arr) # two calls to trigger rewrite + return c, d + + arr = np.arange(10) + out_c, out_d = bar(arr) + self.assertIs(out_c, out_d) + self.assertIs(out_c, arr) + + def test_issue_1264(self): + n = 100 + x = np.random.uniform(size=n*3).reshape((n,3)) + expected = distance_matrix(x) + actual = njit(distance_matrix)(x) + np.testing.assert_array_almost_equal(expected, actual) + # Avoid sporadic failures in MemoryLeakMixin.tearDown() + gc.collect() + + def test_issue_1372(self): + """Test array expression with duplicated term""" + from numba import njit + + @njit + def foo(a, b): + b = np.sin(b) + return b + b + a + + a = np.random.uniform(10) + b = np.random.uniform(10) + expect = foo.py_func(a, b) + got = foo(a, b) + np.testing.assert_allclose(got, expect) + + def test_unary_arrayexpr(self): + """ + Typing of unary array expression (np.negate) can be incorrect. + """ + @njit + def foo(a, b): + return b - a + -a + + b = 1.5 + a = np.arange(10, dtype=np.int32) + + expect = foo.py_func(a, b) + got = foo(a, b) + self.assertPreciseEqual(got, expect) + + def test_bitwise_arrayexpr(self): + """ + Typing of bitwise boolean array expression can be incorrect + (issue #1813). + """ + @njit + def foo(a, b): + return ~(a & (~b)) + + a = np.array([True, True, False, False]) + b = np.array([False, True, False, True]) + + expect = foo.py_func(a, b) + got = foo(a, b) + self.assertPreciseEqual(got, expect) + + def test_annotations(self): + """ + Type annotation of array expressions with disambiguated + variable names (issue #1466). + """ + cfunc = njit(variable_name_reuse) + + a = np.linspace(0, 1, 10) + cfunc(a, a, a, a) + + buf = StringIO() + cfunc.inspect_types(buf) + res = buf.getvalue() + self.assertIn("# u.1 = ", res) + self.assertIn("# u.2 = ", res) + + def test_issue_5599_name_collision(self): + # The original error will fail in lowering of the array_expr + @njit + def f(x): + arr = np.ones(x) + + for _ in range(2): + val = arr * arr + arr = arr.copy() + return arr + + got = f(5) + expect = f.py_func(5) + np.testing.assert_array_equal(got, expect) + + +class TestSemantics(MemoryLeakMixin, unittest.TestCase): + + def test_division_by_zero(self): + # Array expressions should follow the Numpy error model + # i.e. 1./0. returns +inf instead of raising ZeroDivisionError + pyfunc = div_add + cfunc = njit(pyfunc) + + a = np.float64([0.0, 1.0, float('inf')]) + b = np.float64([0.0, 0.0, 1.0]) + c = np.ones_like(a) + + expect = pyfunc(a, b, c) + got = cfunc(a, b, c) + np.testing.assert_array_equal(expect, got) + + +class TestOptionals(MemoryLeakMixin, unittest.TestCase): + """ Tests the arrival and correct lowering of Optional types at a arrayexpr + derived ufunc, see #3972""" + + def test_optional_scalar_type(self): + + @njit + def arr_expr(x, y): + return x + y + + @njit + def do_call(x, y): + if y > 0: + z = None + else: + z = y + return arr_expr(x, z) + + args = (np.arange(5), -1.2) + + # check result + res = do_call(*args) + expected = do_call.py_func(*args) + np.testing.assert_allclose(res, expected) + + # check type + s = arr_expr.signatures + oty = s[0][1] + self.assertTrue(isinstance(oty, types.Optional)) + self.assertTrue(isinstance(oty.type, types.Float)) + + def test_optional_array_type(self): + + @njit + def arr_expr(x, y): + return x + y + + @njit + def do_call(x, y): + if y[0] > 0: + z = None + else: + z = y + return arr_expr(x, z) + + args = (np.arange(5), np.arange(5.)) + + # check result + res = do_call(*args) + expected = do_call.py_func(*args) + np.testing.assert_allclose(res, expected) + + # check type + s = arr_expr.signatures + oty = s[0][1] + self.assertTrue(isinstance(oty, types.Optional)) + self.assertTrue(isinstance(oty.type, types.Array)) + self.assertTrue(isinstance(oty.type.dtype, types.Float)) + + +class TestOptionalsExceptions(MemoryLeakMixin, unittest.TestCase): + # same as above, but the Optional resolves to None and TypeError's + + def test_optional_scalar_type_exception_on_none(self): + + self.disable_leak_check() + + @njit + def arr_expr(x, y): + return x + y + + @njit + def do_call(x, y): + if y > 0: + z = None + else: + z = y + return arr_expr(x, z) + + args = (np.arange(5), 1.0) + + # check result + with self.assertRaises(TypeError) as raises: + do_call(*args) + + self.assertIn("expected float64, got None", str(raises.exception)) + + # check type + s = arr_expr.signatures + oty = s[0][1] + self.assertTrue(isinstance(oty, types.Optional)) + self.assertTrue(isinstance(oty.type, types.Float)) + + def test_optional_array_type_exception_on_none(self): + + self.disable_leak_check() + + @njit + def arr_expr(x, y): + return x + y + + @njit + def do_call(x, y): + if y[0] > 0: + z = None + else: + z = y + return arr_expr(x, z) + + args = (np.arange(5), np.arange(1., 5.)) + + # check result + with self.assertRaises(TypeError) as raises: + do_call(*args) + + excstr = str(raises.exception) + self.assertIn("expected array(float64,", excstr) + self.assertIn("got None", excstr) + + # check type + s = arr_expr.signatures + oty = s[0][1] + self.assertTrue(isinstance(oty, types.Optional)) + self.assertTrue(isinstance(oty.type, types.Array)) + self.assertTrue(isinstance(oty.type.dtype, types.Float)) + + +class TestExternalTypes(MemoryLeakMixin, unittest.TestCase): + """ Tests RewriteArrayExprs with external (user defined) types, + see #5157""" + + source_lines = textwrap.dedent(""" + from numba.core import types + + class FooType(types.Type): + def __init__(self): + super(FooType, self).__init__(name='Foo') + """) + + def make_foo_type(self, FooType): + class Foo(object): + def __init__(self, value): + self.value = value + + @register_model(FooType) + class FooModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [("value", types.intp)] + models.StructModel.__init__(self, dmm, fe_type, members) + + make_attribute_wrapper(FooType, "value", "value") + + @type_callable(Foo) + def type_foo(context): + def typer(value): + return FooType() + + return typer + + @lower_builtin(Foo, types.intp) + def impl_foo(context, builder, sig, args): + typ = sig.return_type + [value] = args + foo = cgutils.create_struct_proxy(typ)(context, builder) + foo.value = value + return foo._getvalue() + + @typeof_impl.register(Foo) + def typeof_foo(val, c): + return FooType() + + return Foo, FooType + + def test_external_type(self): + with create_temp_module(self.source_lines) as test_module: + Foo, FooType = self.make_foo_type(test_module.FooType) + + # sum of foo class instance and array return an array + # binary operation with foo class instance as one of args + @overload(operator.add) + def overload_foo_add(lhs, rhs): + if isinstance(lhs, FooType) and isinstance(rhs, types.Array): + def imp(lhs, rhs): + return np.array([lhs.value, rhs[0]]) + + return imp + + # sum of 2 foo class instances return an array + # binary operation with 2 foo class instances as args + @overload(operator.add) + def overload_foo_add(lhs, rhs): + if isinstance(lhs, FooType) and isinstance(rhs, FooType): + def imp(lhs, rhs): + return np.array([lhs.value, rhs.value]) + + return imp + + # neg of foo class instance return an array + # unary operation with foo class instance arg + @overload(operator.neg) + def overload_foo_neg(x): + if isinstance(x, FooType): + def imp(x): + return np.array([-x.value]) + + return imp + + @njit + def arr_expr_sum1(x, y): + return Foo(x) + np.array([y]) + + @njit + def arr_expr_sum2(x, y): + return Foo(x) + Foo(y) + + @njit + def arr_expr_neg(x): + return -Foo(x) + + np.testing.assert_array_equal(arr_expr_sum1(0, 1), np.array([0, 1])) + np.testing.assert_array_equal(arr_expr_sum2(2, 3), np.array([2, 3])) + np.testing.assert_array_equal(arr_expr_neg(4), np.array([-4])) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_iterators.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_iterators.py new file mode 100644 index 0000000000000000000000000000000000000000..40e273b35d752c27a24e38d109c87a7d7b6d5f5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_iterators.py @@ -0,0 +1,555 @@ +import itertools + +import numpy as np + +from numba import jit, njit, typeof +from numba.core import types +from numba.tests.support import TestCase, MemoryLeakMixin +import unittest + + +def array_iter(arr): + total = 0 + for i, v in enumerate(arr): + total += i * v + return total + +def array_iter_items(arr): + return list(iter(arr)) + +def array_view_iter(arr, idx): + total = 0 + for i, v in enumerate(arr[idx]): + total += i * v + return total + +def array_flat(arr, out): + for i, v in enumerate(arr.flat): + out[i] = v + +def array_flat_getitem(arr, ind): + return arr.flat[ind] + +def array_flat_setitem(arr, ind, val): + arr.flat[ind] = val + +def array_flat_sum(arr): + s = 0 + for i, v in enumerate(arr.flat): + s = s + (i + 1) * v + return s + +def array_flat_len(arr): + return len(arr.flat) + +def array_ndenumerate_sum(arr): + s = 0 + for (i, j), v in np.ndenumerate(arr): + s = s + (i + 1) * (j + 1) * v + return s + +def np_ndindex_empty(): + s = 0 + for ind in np.ndindex(()): + s += s + len(ind) + 1 + return s + +def np_ndindex(x, y): + s = 0 + n = 0 + for i, j in np.ndindex(x, y): + s = s + (i + 1) * (j + 1) + return s + +def np_ndindex_array(arr): + s = 0 + n = 0 + for indices in np.ndindex(arr.shape): + for i, j in enumerate(indices): + s = s + (i + 1) * (j + 1) + return s + +def np_nditer1(a): + res = [] + for u in np.nditer(a): + res.append(u.item()) + return res + +def np_nditer2(a, b): + res = [] + for u, v in np.nditer((a, b)): + res.append((u.item(), v.item())) + return res + +def np_nditer3(a, b, c): + res = [] + for u, v, w in np.nditer((a, b, c)): + res.append((u.item(), v.item(), w.item())) + return res + +def iter_next(arr): + it = iter(arr) + it2 = iter(arr) + return next(it), next(it), next(it2) + + +# +# Test premature free (see issue #2112). +# The following test allocates an array ``x`` inside the body. +# The compiler will put a ``del x`` right after the last use of ``x``, +# which is right after the creation of the array iterator and +# before the loop is entered. If the iterator does not incref the array, +# the iterator will be reading garbage data of free'ed memory. +# + +def array_flat_premature_free(size): + x = np.arange(size) + res = np.zeros_like(x, dtype=np.intp) + for i, v in enumerate(x.flat): + res[i] = v + return res + +def array_ndenumerate_premature_free(size): + x = np.arange(size) + res = np.zeros_like(x, dtype=np.intp) + for i, v in np.ndenumerate(x): + res[i] = v + return res + + +class TestArrayIterators(MemoryLeakMixin, TestCase): + """ + Test array.flat, np.ndenumerate(), etc. + """ + + def setUp(self): + super(TestArrayIterators, self).setUp() + + def check_array_iter_1d(self, arr): + pyfunc = array_iter + cfunc = njit((typeof(arr),))(pyfunc) + expected = pyfunc(arr) + self.assertPreciseEqual(cfunc(arr), expected) + + def check_array_iter_items(self, arr): + pyfunc = array_iter_items + cfunc = njit((typeof(arr),))(pyfunc) + expected = pyfunc(arr) + self.assertPreciseEqual(cfunc(arr), expected) + + def check_array_view_iter(self, arr, index): + pyfunc = array_view_iter + cfunc = njit((typeof(arr), typeof(index),))(pyfunc) + expected = pyfunc(arr, index) + self.assertPreciseEqual(cfunc(arr, index), expected) + + def check_array_flat(self, arr, arrty=None): + out = np.zeros(arr.size, dtype=arr.dtype) + nb_out = out.copy() + if arrty is None: + arrty = typeof(arr) + + cfunc = njit((arrty, typeof(out),))(array_flat) + + array_flat(arr, out) + cfunc(arr, nb_out) + + self.assertPreciseEqual(out, nb_out) + + def check_array_unary(self, arr, arrty, func): + cfunc = njit((arrty,))(func) + self.assertPreciseEqual(cfunc(arr), func(arr)) + + def check_array_ndenumerate_sum(self, arr, arrty): + self.check_array_unary(arr, arrty, array_ndenumerate_sum) + + def test_array_iter(self): + # Test iterating over arrays + arr = np.arange(6) + self.check_array_iter_1d(arr) + self.check_array_iter_items(arr) + arr = arr[::2] + self.assertFalse(arr.flags.c_contiguous) + self.assertFalse(arr.flags.f_contiguous) + self.check_array_iter_1d(arr) + self.check_array_iter_items(arr) + arr = np.bool_([1, 0, 0, 1]) + self.check_array_iter_1d(arr) + self.check_array_iter_items(arr) + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + self.check_array_iter_items(arr) + self.check_array_iter_items(arr.T) + + def test_array_iter_yielded_order(self): + # See issue #5692 + @jit(nopython=True) + def foo(arr): + t = [] + for y1 in arr: + for y2 in y1: + t.append(y2.ravel()) + return t + + # 'F' ordered + arr = np.arange(24).reshape((2, 3, 4), order='F') + expected = foo.py_func(arr) + got = foo(arr) + self.assertPreciseEqual(expected, got) + + # 'A' ordered, outer strided + arr = np.arange(64).reshape((4, 8, 2), order='F')[::2, :, :] + expected = foo.py_func(arr) + got = foo(arr) + self.assertPreciseEqual(expected, got) + + # 'A' ordered, middle strided + arr = np.arange(64).reshape((4, 8, 2), order='F')[:, ::2, :] + expected = foo.py_func(arr) + got = foo(arr) + self.assertPreciseEqual(expected, got) + + # 'A' ordered, inner strided + arr = np.arange(64).reshape((4, 8, 2), order='F')[:, :, ::2] + expected = foo.py_func(arr) + got = foo(arr) + self.assertPreciseEqual(expected, got) + + @jit(nopython=True) + def flag_check(arr): + out = [] + for sub in arr: + out.append((sub, sub.flags.c_contiguous, + sub.flags.f_contiguous)) + return out + + arr = np.arange(10).reshape((2, 5), order='F') + expected = flag_check.py_func(arr) + got = flag_check(arr) + + self.assertEqual(len(expected), len(got)) + ex_arr, e_flag_c, e_flag_f = expected[0] + go_arr, g_flag_c, g_flag_f = got[0] + np.testing.assert_allclose(ex_arr, go_arr) + self.assertEqual(e_flag_c, g_flag_c) + self.assertEqual(e_flag_f, g_flag_f) + + def test_array_view_iter(self): + # Test iterating over a 1d view over a 2d array + arr = np.arange(12).reshape((3, 4)) + self.check_array_view_iter(arr, 1) + self.check_array_view_iter(arr.T, 1) + arr = arr[::2] + self.check_array_view_iter(arr, 1) + arr = np.bool_([1, 0, 0, 1]).reshape((2, 2)) + self.check_array_view_iter(arr, 1) + + def test_array_flat_3d(self): + arr = np.arange(24).reshape(4, 2, 3) + + arrty = typeof(arr) + self.assertEqual(arrty.ndim, 3) + self.assertEqual(arrty.layout, 'C') + self.assertTrue(arr.flags.c_contiguous) + # Test with C-contiguous array + self.check_array_flat(arr) + # Test with Fortran-contiguous array + arr = arr.transpose() + self.assertFalse(arr.flags.c_contiguous) + self.assertTrue(arr.flags.f_contiguous) + self.assertEqual(typeof(arr).layout, 'F') + self.check_array_flat(arr) + # Test with non-contiguous array + arr = arr[::2] + self.assertFalse(arr.flags.c_contiguous) + self.assertFalse(arr.flags.f_contiguous) + self.assertEqual(typeof(arr).layout, 'A') + self.check_array_flat(arr) + # Boolean array + arr = np.bool_([1, 0, 0, 1] * 2).reshape((2, 2, 2)) + self.check_array_flat(arr) + + def test_array_flat_empty(self): + # Test .flat with various shapes of empty arrays, contiguous + # and non-contiguous (see issue #846). + + # Define a local checking function, Numba's `typeof` ends up aliasing + # 0d C and F ordered arrays, so the check needs to go via the compile + # result entry point to bypass type checking. + def check(arr, arrty): + cfunc = njit((arrty,))(array_flat_sum) + cres = cfunc.overloads[(arrty,)] + got = cres.entry_point(arr) + expected = cfunc.py_func(arr) + self.assertPreciseEqual(expected, got) + + arr = np.zeros(0, dtype=np.int32) + arr = arr.reshape(0, 2) + arrty = types.Array(types.int32, 2, layout='C') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='F') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='A') + check(arr, arrty) + arr = arr.reshape(2, 0) + arrty = types.Array(types.int32, 2, layout='C') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='F') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='A') + check(arr, arrty) + + def test_array_flat_getitem(self): + # Test indexing of array.flat object + pyfunc = array_flat_getitem + cfunc = njit(pyfunc) + def check(arr, ind): + expected = pyfunc(arr, ind) + self.assertEqual(cfunc(arr, ind), expected) + + arr = np.arange(24).reshape(4, 2, 3) + for i in range(arr.size): + check(arr, i) + arr = arr.T + for i in range(arr.size): + check(arr, i) + arr = arr[::2] + for i in range(arr.size): + check(arr, i) + arr = np.array([42]).reshape(()) + for i in range(arr.size): + check(arr, i) + # Boolean array + arr = np.bool_([1, 0, 0, 1]) + for i in range(arr.size): + check(arr, i) + arr = arr[::2] + for i in range(arr.size): + check(arr, i) + + def test_array_flat_setitem(self): + # Test indexing of array.flat object + pyfunc = array_flat_setitem + cfunc = njit(pyfunc) + def check(arr, ind): + # Use np.copy() to keep the layout + expected = np.copy(arr) + got = np.copy(arr) + pyfunc(expected, ind, 123) + cfunc(got, ind, 123) + self.assertPreciseEqual(got, expected) + + arr = np.arange(24).reshape(4, 2, 3) + for i in range(arr.size): + check(arr, i) + arr = arr.T + for i in range(arr.size): + check(arr, i) + arr = arr[::2] + for i in range(arr.size): + check(arr, i) + arr = np.array([42]).reshape(()) + for i in range(arr.size): + check(arr, i) + # Boolean array + arr = np.bool_([1, 0, 0, 1]) + for i in range(arr.size): + check(arr, i) + arr = arr[::2] + for i in range(arr.size): + check(arr, i) + + def test_array_flat_len(self): + # Test len(array.flat) + pyfunc = array_flat_len + cfunc = njit(array_flat_len) + def check(arr): + expected = pyfunc(arr) + self.assertPreciseEqual(cfunc(arr), expected) + + arr = np.arange(24).reshape(4, 2, 3) + check(arr) + arr = arr.T + check(arr) + arr = arr[::2] + check(arr) + arr = np.array([42]).reshape(()) + check(arr) + + def test_array_flat_premature_free(self): + cfunc = njit((types.intp,))(array_flat_premature_free) + expect = array_flat_premature_free(6) + got = cfunc(6) + self.assertTrue(got.sum()) + self.assertPreciseEqual(expect, got) + + def test_array_ndenumerate_2d(self): + arr = np.arange(12).reshape(4, 3) + arrty = typeof(arr) + self.assertEqual(arrty.ndim, 2) + self.assertEqual(arrty.layout, 'C') + self.assertTrue(arr.flags.c_contiguous) + # Test with C-contiguous array + self.check_array_ndenumerate_sum(arr, arrty) + # Test with Fortran-contiguous array + arr = arr.transpose() + self.assertFalse(arr.flags.c_contiguous) + self.assertTrue(arr.flags.f_contiguous) + arrty = typeof(arr) + self.assertEqual(arrty.layout, 'F') + self.check_array_ndenumerate_sum(arr, arrty) + # Test with non-contiguous array + arr = arr[::2] + self.assertFalse(arr.flags.c_contiguous) + self.assertFalse(arr.flags.f_contiguous) + arrty = typeof(arr) + self.assertEqual(arrty.layout, 'A') + self.check_array_ndenumerate_sum(arr, arrty) + # Boolean array + arr = np.bool_([1, 0, 0, 1]).reshape((2, 2)) + self.check_array_ndenumerate_sum(arr, typeof(arr)) + + def test_array_ndenumerate_empty(self): + # Define a local checking function, Numba's `typeof` ends up aliasing + # 0d C and F ordered arrays, so the check needs to go via the compile + # result entry point to bypass type checking. + def check(arr, arrty): + cfunc = njit((arrty,))(array_ndenumerate_sum) + cres = cfunc.overloads[(arrty,)] + got = cres.entry_point(arr) + expected = cfunc.py_func(arr) + np.testing.assert_allclose(expected, got) + + arr = np.zeros(0, dtype=np.int32) + arr = arr.reshape(0, 2) + arrty = types.Array(types.int32, 2, layout='C') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='F') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='A') + check(arr, arrty) + arr = arr.reshape(2, 0) + arrty = types.Array(types.int32, 2, layout='C') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='F') + check(arr, arrty) + arrty = types.Array(types.int32, 2, layout='A') + check(arr, arrty) + + def test_array_ndenumerate_premature_free(self): + cfunc = njit((types.intp,))(array_ndenumerate_premature_free) + expect = array_ndenumerate_premature_free(6) + got = cfunc(6) + self.assertTrue(got.sum()) + self.assertPreciseEqual(expect, got) + + def test_np_ndindex(self): + func = np_ndindex + cfunc = njit((types.int32, types.int32,))(func) + self.assertPreciseEqual(cfunc(3, 4), func(3, 4)) + self.assertPreciseEqual(cfunc(3, 0), func(3, 0)) + self.assertPreciseEqual(cfunc(0, 3), func(0, 3)) + self.assertPreciseEqual(cfunc(0, 0), func(0, 0)) + + def test_np_ndindex_array(self): + func = np_ndindex_array + arr = np.arange(12, dtype=np.int32) + 10 + self.check_array_unary(arr, typeof(arr), func) + arr = arr.reshape((4, 3)) + self.check_array_unary(arr, typeof(arr), func) + arr = arr.reshape((2, 2, 3)) + self.check_array_unary(arr, typeof(arr), func) + + def test_np_ndindex_empty(self): + func = np_ndindex_empty + cfunc = njit((),)(func) + self.assertPreciseEqual(cfunc(), func()) + + def test_iter_next(self): + # This also checks memory management with iter() and next() + func = iter_next + arr = np.arange(12, dtype=np.int32) + 10 + self.check_array_unary(arr, typeof(arr), func) + + +class TestNdIter(MemoryLeakMixin, TestCase): + """ + Test np.nditer() + """ + + def inputs(self): + # All those inputs are compatible with a (3, 4) main shape + + # scalars + yield np.float32(100) + + # 0-d arrays + yield np.array(102, dtype=np.int16) + + # 1-d arrays + yield np.arange(4).astype(np.complex64) + yield np.arange(8)[::2] + + # 2-d arrays + a = np.arange(12).reshape((3, 4)) + yield a + yield a.copy(order='F') + a = np.arange(24).reshape((6, 4))[::2] + yield a + + def basic_inputs(self): + yield np.arange(4).astype(np.complex64) + yield np.arange(8)[::2] + a = np.arange(12).reshape((3, 4)) + yield a + yield a.copy(order='F') + + def check_result(self, got, expected): + self.assertEqual(set(got), set(expected), (got, expected)) + + def test_nditer1(self): + pyfunc = np_nditer1 + cfunc = jit(nopython=True)(pyfunc) + for a in self.inputs(): + expected = pyfunc(a) + got = cfunc(a) + self.check_result(got, expected) + + def test_nditer2(self): + pyfunc = np_nditer2 + cfunc = jit(nopython=True)(pyfunc) + for a, b in itertools.product(self.inputs(), self.inputs()): + expected = pyfunc(a, b) + got = cfunc(a, b) + self.check_result(got, expected) + + def test_nditer3(self): + pyfunc = np_nditer3 + cfunc = jit(nopython=True)(pyfunc) + # Use a restricted set of inputs, to shorten test time + inputs = self.basic_inputs + for a, b, c in itertools.product(inputs(), inputs(), inputs()): + expected = pyfunc(a, b, c) + got = cfunc(a, b, c) + self.check_result(got, expected) + + def test_errors(self): + # Incompatible shapes + pyfunc = np_nditer2 + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + + def check_incompatible(a, b): + with self.assertRaises(ValueError) as raises: + cfunc(a, b) + self.assertIn("operands could not be broadcast together", + str(raises.exception)) + + check_incompatible(np.arange(2), np.arange(3)) + a = np.arange(12).reshape((3, 4)) + b = np.arange(3) + check_incompatible(a, b) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_manipulation.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_manipulation.py new file mode 100644 index 0000000000000000000000000000000000000000..2539319fd560a759004484e715c31b9ed076a68e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_manipulation.py @@ -0,0 +1,1588 @@ +from functools import partial +from itertools import permutations + +import numpy as np + +import unittest +from numba import jit, njit, from_dtype, typeof +from numba.core.errors import TypingError +from numba.core import types, errors +from numba.tests.support import TestCase, MemoryLeakMixin + +enable_pyobj_flags = {'forceobj': True} + +no_pyobj_flags = {'_nrt': True, 'nopython': True} + + +def from_generic(pyfuncs_to_use): + """Decorator for generic check functions. + Iterates over 'pyfuncs_to_use', calling 'func' with the iterated + item as first argument. Example: + + @from_generic(numpy_array_reshape, array_reshape) + def check_only_shape(pyfunc, arr, shape, expected_shape): + # Only check Numba result to avoid Numpy bugs + self.memory_leak_setup() + got = generic_run(pyfunc, arr, shape) + self.assertEqual(got.shape, expected_shape) + self.assertEqual(got.size, arr.size) + del got + self.memory_leak_teardown() + """ + def decorator(func): + def result(*args, **kwargs): + return [func(pyfunc, *args, **kwargs) for pyfunc in pyfuncs_to_use] + return result + return decorator + + +@njit +def array_reshape(arr, newshape): + return arr.reshape(newshape) + +@njit +def numpy_array_reshape(arr, newshape): + return np.reshape(arr, newshape) + + +def numpy_broadcast_to(arr, shape): + return np.broadcast_to(arr, shape) + + +def numpy_broadcast_shapes(*args): + return np.broadcast_shapes(*args) + + +def numpy_broadcast_arrays(*args): + return np.broadcast_arrays(*args) + + +def numpy_broadcast_to_indexing(arr, shape, idx): + return np.broadcast_to(arr, shape)[idx] + + +def flatten_array(a): + return a.flatten() + + +def ravel_array(a): + return a.ravel() + + +def ravel_array_size(a): + return a.ravel().size + + +def numpy_ravel_array(a): + return np.ravel(a) + + +def transpose_array(a): + return a.transpose() + + +def numpy_transpose_array(a): + return np.transpose(a) + + +@njit +def numpy_transpose_array_axes_kwarg(arr, axes): + return np.transpose(arr, axes=axes) + + +@njit +def numpy_transpose_array_axes_kwarg_copy(arr, axes): + return np.transpose(arr, axes=axes).copy() + + +@njit +def array_transpose_axes(arr, axes): + return arr.transpose(axes) + + +@njit +def array_transpose_axes_copy(arr, axes): + return arr.transpose(axes).copy() + + +@njit +def transpose_issue_4708(m, n): + r1 = np.reshape(np.arange(m * n * 3), (m, 3, n)) + r2 = np.reshape(np.arange(n * 3), (n, 3)) + r_dif = (r1 - r2.T).T + r_dif = np.transpose(r_dif, (2, 0, 1)) + z = r_dif + 1 + return z + + +def squeeze_array(a): + return a.squeeze() + + +def expand_dims(a, axis): + return np.expand_dims(a, axis) + + +def atleast_1d(*args): + return np.atleast_1d(*args) + + +def atleast_2d(*args): + return np.atleast_2d(*args) + + +def atleast_3d(*args): + return np.atleast_3d(*args) + + +def as_strided1(a): + # as_strided() with implicit shape + strides = (a.strides[0] // 2,) + a.strides[1:] + return np.lib.stride_tricks.as_strided(a, strides=strides) + + +def as_strided2(a): + # Rolling window example as in https://github.com/numba/numba/issues/1884 + window = 3 + shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) + strides = a.strides + (a.strides[-1],) + return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) + + +@njit +def sliding_window_view(x, window_shape, axis=None): + return np.lib.stride_tricks.sliding_window_view(x, window_shape, axis=axis) + + +def bad_index(arr, arr2d): + x = arr.x, + y = arr.y + # note that `x` is a tuple, which causes a new axis to be created. + arr2d[x, y] = 1.0 + + +def bad_float_index(arr): + # 2D index required for this function because 1D index + # fails typing + return arr[1, 2.0] + + +def numpy_fill_diagonal(arr, val, wrap=False): + return np.fill_diagonal(arr, val, wrap) + + +def numpy_shape(arr): + return np.shape(arr) + +def numpy_size(arr): + return np.size(arr) + + +def numpy_flatnonzero(a): + return np.flatnonzero(a) + + +def numpy_argwhere(a): + return np.argwhere(a) + +def numpy_resize(a, new_shape): + return np.resize(a, new_shape) + + +class TestArrayManipulation(MemoryLeakMixin, TestCase): + """ + Check shape-changing operations on arrays. + """ + def test_array_reshape(self): + pyfuncs_to_use = [array_reshape, numpy_array_reshape] + + def generic_run(pyfunc, arr, shape): + return pyfunc(arr, shape) + + @from_generic(pyfuncs_to_use) + def check(pyfunc, arr, shape): + expected = pyfunc.py_func(arr, shape) + self.memory_leak_setup() + got = generic_run(pyfunc, arr, shape) + self.assertPreciseEqual(got, expected) + del got + self.memory_leak_teardown() + + @from_generic(pyfuncs_to_use) + def check_only_shape(pyfunc, arr, shape, expected_shape): + # Only check Numba result to avoid Numpy bugs + self.memory_leak_setup() + got = generic_run(pyfunc, arr, shape) + self.assertEqual(got.shape, expected_shape) + self.assertEqual(got.size, arr.size) + del got + self.memory_leak_teardown() + + @from_generic(pyfuncs_to_use) + def check_err_shape(pyfunc, arr, shape): + with self.assertRaises(NotImplementedError) as raises: + generic_run(pyfunc, arr, shape) + self.assertEqual(str(raises.exception), + "incompatible shape for array") + + @from_generic(pyfuncs_to_use) + def check_err_size(pyfunc, arr, shape): + with self.assertRaises(ValueError) as raises: + generic_run(pyfunc, arr, shape) + self.assertEqual(str(raises.exception), + "total size of new array must be unchanged") + + @from_generic(pyfuncs_to_use) + def check_err_multiple_negative(pyfunc, arr, shape): + with self.assertRaises(ValueError) as raises: + generic_run(pyfunc, arr, shape) + self.assertEqual(str(raises.exception), + "multiple negative shape values") + + + # C-contiguous + arr = np.arange(24) + check(arr, (24,)) + check(arr, (4, 6)) + check(arr, (8, 3)) + check(arr, (8, 1, 3)) + check(arr, (1, 8, 1, 1, 3, 1)) + arr = np.arange(24).reshape((2, 3, 4)) + check(arr, (24,)) + check(arr, (4, 6)) + check(arr, (8, 3)) + check(arr, (8, 1, 3)) + check(arr, (1, 8, 1, 1, 3, 1)) + check_err_size(arr, ()) + check_err_size(arr, (25,)) + check_err_size(arr, (8, 4)) + arr = np.arange(24).reshape((1, 8, 1, 1, 3, 1)) + check(arr, (24,)) + check(arr, (4, 6)) + check(arr, (8, 3)) + check(arr, (8, 1, 3)) + + # F-contiguous + arr = np.arange(24).reshape((2, 3, 4)).T + check(arr, (4, 3, 2)) + check(arr, (1, 4, 1, 3, 1, 2, 1)) + check_err_shape(arr, (2, 3, 4)) + check_err_shape(arr, (6, 4)) + check_err_shape(arr, (2, 12)) + + # Test negative shape value + arr = np.arange(25).reshape(5,5) + check(arr, -1) + check(arr, (-1,)) + check(arr, (-1, 5)) + check(arr, (5, -1, 5)) + check(arr, (5, 5, -1)) + check_err_size(arr, (-1, 4)) + check_err_multiple_negative(arr, (-1, -2, 5, 5)) + check_err_multiple_negative(arr, (5, 5, -1, -1)) + + # 0-sized arrays + def check_empty(arr): + check(arr, 0) + check(arr, (0,)) + check(arr, (1, 0, 2)) + check(arr, (0, 55, 1, 0, 2)) + # -1 is buggy in Numpy with 0-sized arrays + check_only_shape(arr, -1, (0,)) + check_only_shape(arr, (-1,), (0,)) + check_only_shape(arr, (0, -1), (0, 0)) + check_only_shape(arr, (4, -1), (4, 0)) + check_only_shape(arr, (-1, 0, 4), (0, 0, 4)) + check_err_size(arr, ()) + check_err_size(arr, 1) + check_err_size(arr, (1, 2)) + + arr = np.array([]) + check_empty(arr) + check_empty(arr.reshape((3, 2, 0))) + + # Exceptions leak references + self.disable_leak_check() + + def test_array_transpose_axes(self): + pyfuncs_to_use = [numpy_transpose_array_axes_kwarg, + numpy_transpose_array_axes_kwarg_copy, + array_transpose_axes, + array_transpose_axes_copy] + + @from_generic(pyfuncs_to_use) + def check(pyfunc, arr, axes): + expected = pyfunc.py_func(arr, axes) + got = pyfunc(arr, axes) + self.assertPreciseEqual(got, expected) + self.assertEqual(got.flags.f_contiguous, + expected.flags.f_contiguous) + self.assertEqual(got.flags.c_contiguous, + expected.flags.c_contiguous) + + @from_generic(pyfuncs_to_use) + def check_err_axis_repeated(pyfunc, arr, axes): + with self.assertRaises(ValueError) as raises: + pyfunc(arr, axes) + self.assertEqual(str(raises.exception), + "repeated axis in transpose") + + @from_generic(pyfuncs_to_use) + def check_err_axis_oob(pyfunc, arr, axes): + with self.assertRaises(ValueError) as raises: + pyfunc(arr, axes) + self.assertEqual(str(raises.exception), + "axis is out of bounds for array of given dimension") + + @from_generic(pyfuncs_to_use) + def check_err_invalid_args(pyfunc, arr, axes): + with self.assertRaises((TypeError, TypingError)): + pyfunc(arr, axes) + + arrs = [np.arange(24), + np.arange(24).reshape(4, 6), + np.arange(24).reshape(2, 3, 4), + np.arange(24).reshape(1, 2, 3, 4), + np.arange(64).reshape(8, 4, 2)[::3,::2,:]] + + for i in range(len(arrs)): + # First check `None`, the default, which is to reverse dims + check(arrs[i], None) + # Check supplied axis permutations + for axes in permutations(tuple(range(arrs[i].ndim))): + ndim = len(axes) + neg_axes = tuple([x - ndim for x in axes]) + check(arrs[i], axes) + check(arrs[i], neg_axes) + + @from_generic([transpose_issue_4708]) + def check_issue_4708(pyfunc, m, n): + expected = pyfunc.py_func(m, n) + got = pyfunc(m, n) + # values in arrays are equals, + # but stronger assertions not hold (layout and strides equality) + np.testing.assert_equal(got, expected) + + check_issue_4708(3, 2) + check_issue_4708(2, 3) + check_issue_4708(5, 4) + + # Exceptions leak references + self.disable_leak_check() + + check_err_invalid_args(arrs[1], "foo") + check_err_invalid_args(arrs[1], ("foo",)) + check_err_invalid_args(arrs[1], 5.3) + check_err_invalid_args(arrs[2], (1.2, 5)) + + check_err_axis_repeated(arrs[1], (0, 0)) + check_err_axis_repeated(arrs[2], (2, 0, 0)) + check_err_axis_repeated(arrs[3], (3, 2, 1, 1)) + + check_err_axis_oob(arrs[0], (1,)) + check_err_axis_oob(arrs[0], (-2,)) + check_err_axis_oob(arrs[1], (0, 2)) + check_err_axis_oob(arrs[1], (-3, 2)) + check_err_axis_oob(arrs[1], (0, -3)) + check_err_axis_oob(arrs[2], (3, 1, 2)) + check_err_axis_oob(arrs[2], (-4, 1, 2)) + check_err_axis_oob(arrs[3], (3, 1, 2, 5)) + check_err_axis_oob(arrs[3], (3, 1, 2, -5)) + + with self.assertRaises(TypingError) as e: + jit(nopython=True)(numpy_transpose_array)((np.array([0, 1]),)) + self.assertIn("np.transpose does not accept tuples", + str(e.exception)) + + def test_numpy_resize_basic(self): + pyfunc = numpy_resize + cfunc = njit(pyfunc) + def inputs(): + # Taken from https://github.com/numpy/numpy/blob/f0b2fca91a1f5f50ff696895072f6fe9e69c1466/numpy/core/tests/test_numeric.py#L24-L64 noqa: E501 + yield np.array([[1, 2], [3, 4]]), (2, 4) + yield np.array([[1, 2], [3, 4]]), (4, 2) + yield np.array([[1, 2], [3, 4]]), (4, 3) + yield np.array([[1, 2], [3, 4]]), (0,) + yield np.array([[1, 2], [3, 4]]), (0, 2) + yield np.array([[1, 2], [3, 4]]), (2, 0) + yield np.zeros(0, dtype = float), (2, 1) + # other + yield np.array([[1, 2], [3, 4]]), (4,) + yield np.array([[1, 2], [3, 4]]), 4 + yield np.zeros((1, 3), dtype = int), (2, 1) + yield np.array([], dtype = float), (4, 2) + yield [0, 1, 2, 3], (2, 3) + yield 4, (2, 3) + + for a, new_shape in inputs(): + self.assertPreciseEqual(pyfunc(a, new_shape), cfunc(a, new_shape)) + + def test_numpy_resize_exception(self): + # Exceptions leak references + self.disable_leak_check() + + cfunc = njit(numpy_resize) + + with self.assertRaises(TypingError) as raises: + cfunc("abc", (2, 3)) + self.assertIn(('The argument "a" must be array-like'), + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.array([[0,1],[2,3]]), "abc") + self.assertIn(('The argument "new_shape" must be an integer or ' + 'a tuple of integers'), + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(np.array([[0,1],[2,3]]), (-2, 3)) + self.assertIn(('All elements of `new_shape` must be non-negative'), + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(np.array([[0,1],[2,3]]), -4) + self.assertIn(('All elements of `new_shape` must be non-negative'), + str(raises.exception)) + + def test_expand_dims(self): + pyfunc = expand_dims + cfunc = njit(pyfunc) + + def check(arr, axis): + expected = pyfunc(arr, axis) + self.memory_leak_setup() + got = cfunc(arr, axis) + self.assertPreciseEqual(got, expected) + del got + self.memory_leak_teardown() + + def check_all_axes(arr): + for axis in range(-arr.ndim - 1, arr.ndim + 1): + check(arr, axis) + + # 1d + arr = np.arange(5) + check_all_axes(arr) + # 3d (C, F, A) + arr = np.arange(24).reshape((2, 3, 4)) + check_all_axes(arr) + check_all_axes(arr.T) + check_all_axes(arr[::-1]) + # 0d + arr = np.array(42) + check_all_axes(arr) + + def test_expand_dims_exceptions(self): + pyfunc = expand_dims + cfunc = jit(nopython=True)(pyfunc) + arr = np.arange(5) + + with self.assertTypingError() as raises: + cfunc('hello', 3) + self.assertIn('First argument "a" must be an array', str(raises.exception)) + + with self.assertTypingError() as raises: + cfunc(arr, 'hello') + self.assertIn('Argument "axis" must be an integer', str(raises.exception)) + + + def check_atleast_nd(self, pyfunc, cfunc): + def check_result(got, expected): + # We would like to check the result has the same contiguity, + # but we can't rely on the "flags" attribute when there are + # 1-sized dimensions. + self.assertStridesEqual(got, expected) + self.assertPreciseEqual(got.flatten(), expected.flatten()) + + def check_single(arg): + check_result(cfunc(arg), pyfunc(arg)) + + def check_tuple(*args): + expected_tuple = pyfunc(*args) + got_tuple = cfunc(*args) + self.assertEqual(len(got_tuple), len(expected_tuple)) + for got, expected in zip(got_tuple, expected_tuple): + check_result(got, expected) + + # 0d + a1 = np.array(42) + a2 = np.array(5j) + check_single(a1) + check_tuple(a1, a2) + # 1d + b1 = np.arange(5) + b2 = np.arange(6) + 1j + b3 = b1[::-1] + check_single(b1) + check_tuple(b1, b2, b3) + # 2d + c1 = np.arange(6).reshape((2, 3)) + c2 = c1.T + c3 = c1[::-1] + check_single(c1) + check_tuple(c1, c2, c3) + # 3d + d1 = np.arange(24).reshape((2, 3, 4)) + d2 = d1.T + d3 = d1[::-1] + check_single(d1) + check_tuple(d1, d2, d3) + # 4d + e = np.arange(16).reshape((2, 2, 2, 2)) + check_single(e) + # mixed dimensions + check_tuple(a1, b2, c3, d2) + + def test_atleast_1d(self): + pyfunc = atleast_1d + cfunc = jit(nopython=True)(pyfunc) + self.check_atleast_nd(pyfunc, cfunc) + + def test_atleast_2d(self): + pyfunc = atleast_2d + cfunc = jit(nopython=True)(pyfunc) + self.check_atleast_nd(pyfunc, cfunc) + + def test_atleast_3d(self): + pyfunc = atleast_3d + cfunc = jit(nopython=True)(pyfunc) + self.check_atleast_nd(pyfunc, cfunc) + + def check_as_strided(self, pyfunc): + cfunc = njit(pyfunc) + + def check(arr): + expected = pyfunc(arr) + got = cfunc(arr) + self.assertPreciseEqual(got, expected) + + arr = np.arange(24) + check(arr) + check(arr.reshape((6, 4))) + check(arr.reshape((4, 1, 6))) + + def test_as_strided(self): + self.check_as_strided(as_strided1) + self.check_as_strided(as_strided2) + + def test_as_strided_stride_none(self): + + @jit + def foo(): + arr = np.arange(24).reshape((6, 4)) + return np.lib.stride_tricks.as_strided(arr, strides=None) + + with self.assertRaises(errors.TypingError) as raises: + foo() + + msg = "strides argument cannot be None" + self.assertIn(msg, str(raises.exception)) + + def test_sliding_window_view(self): + def check(arr, window_shape, axis): + # Our version is always writeable (NumPy default is False). + expected = np.lib.stride_tricks.sliding_window_view( + arr, window_shape, axis, writeable=True + ) + got = sliding_window_view(arr, window_shape, axis) + self.assertPreciseEqual(got, expected) + + # 1d array, different ways of specifying the axis. + arr1 = np.arange(24) + for axis in [None, 0, -1, (0,)]: + with self.subTest(f"1d array, axis={axis}"): + check(arr1, 5, axis) + + # 2d array, 1d window. + arr2 = np.arange(200).reshape(10, 20) + for axis in [0, -1]: + with self.subTest(f"2d array, axis={axis}"): + check(arr2, 5, axis) + + # 2d array, 2d window. + for axis in [None, (0, 1), (1, 0), (1, -2)]: + with self.subTest(f"2d array, axis={axis}"): + check(arr2, (5, 8), axis) + + # 4d array, 2d window. + arr4 = np.arange(200).reshape(4, 5, 5, 2) + for axis in [(1, 2), (-2, -3)]: + with self.subTest(f"4d array, axis={axis}"): + check(arr4, (3, 2), axis) + + # Repeated axis. + with self.subTest("2d array, repeated axes"): + check(arr2, (5, 3, 3), (0, 1, 0)) + + def test_sliding_window_view_errors(self): + def _raises(msg, *args): + with self.assertRaises(ValueError) as raises: + sliding_window_view(*args) + self.assertIn(msg, str(raises.exception)) + + def _typing_error(msg, *args): + with self.assertRaises(errors.TypingError) as raises: + sliding_window_view(*args) + self.assertIn(msg, str(raises.exception)) + + # Exceptions leak references + self.disable_leak_check() + + arr1 = np.arange(24) + arr2 = np.arange(200).reshape(10, 20) + + # Window shape cannot be larger than dimension or negative. + with self.subTest("1d window shape too large"): + _raises("window_shape cannot be larger", arr1, 25, None) + with self.subTest("2d window shape too large"): + _raises("window_shape cannot be larger", arr2, (4, 21), None) + with self.subTest("1d window negative size"): + _raises("`window_shape` cannot contain negative", arr1, -1, None) + with self.subTest("2d window with a negative size"): + _raises("`window_shape` cannot contain negative", arr2, (4, -3), None) + + # window_shape and axis parameters must be compatible. + with self.subTest("1d array, 2d window shape"): + _raises("matching length window_shape and axis", arr1, (10, 2), None) + with self.subTest("2d window shape, only one axis given"): + _raises("matching length window_shape and axis", arr2, (10, 2), 1) + with self.subTest("1d window shape, 2 axes given"): + _raises("matching length window_shape and axis", arr1, 5, (0, 0)) + + # Axis values out of bounds. + with self.subTest("1d array, second axis"): + _raises("Argument axis out of bounds", arr1, 4, 1) + with self.subTest("1d array, axis -2"): + _raises("Argument axis out of bounds", arr1, 4, -2) + with self.subTest("2d array, fourth axis"): + _raises("Argument axis out of bounds", arr2, (4, 4), (0, 3)) + with self.subTest("2d array, axis -3"): + _raises("Argument axis out of bounds", arr2, (4, 4), (0, -3)) + + # Useful messages for unsupported types. + with self.subTest("window_shape=None"): + _typing_error( + "window_shape must be an integer or tuple of integer", arr1, None + ) + with self.subTest("window_shape=float"): + _typing_error( + "window_shape must be an integer or tuple of integer", arr1, 3.1 + ) + with self.subTest("window_shape=tuple(float)"): + _typing_error( + "window_shape must be an integer or tuple of integer", arr1, (3.1,) + ) + with self.subTest("axis=float"): + _typing_error( + "axis must be None, an integer or tuple of integer", arr1, 4, 3.1 + ) + with self.subTest("axis=tuple(float)"): + _typing_error( + "axis must be None, an integer or tuple of integer", arr1, 4, (3.1,) + ) + + + def test_flatten_array(self, flags=enable_pyobj_flags, layout='C'): + a = np.arange(9).reshape(3, 3) + if layout == 'F': + a = a.T + + pyfunc = flatten_array + arraytype1 = typeof(a) + if layout == 'A': + # Force A layout + arraytype1 = arraytype1.copy(layout='A') + + self.assertEqual(arraytype1.layout, layout) + cfunc = jit((arraytype1,), **flags)(pyfunc) + + expected = pyfunc(a) + got = cfunc(a) + np.testing.assert_equal(expected, got) + + def test_flatten_array_npm(self): + self.test_flatten_array(flags=no_pyobj_flags) + self.test_flatten_array(flags=no_pyobj_flags, layout='F') + self.test_flatten_array(flags=no_pyobj_flags, layout='A') + + def test_ravel_array(self, flags=enable_pyobj_flags): + def generic_check(pyfunc, a, assume_layout): + # compile + arraytype1 = typeof(a) + self.assertEqual(arraytype1.layout, assume_layout) + cfunc = jit((arraytype1,), **flags)(pyfunc) + + expected = pyfunc(a) + got = cfunc(a) + # Check result matches + np.testing.assert_equal(expected, got) + # Check copying behavior + py_copied = (a.ctypes.data != expected.ctypes.data) + nb_copied = (a.ctypes.data != got.ctypes.data) + self.assertEqual(py_copied, assume_layout != 'C') + self.assertEqual(py_copied, nb_copied) + + check_method = partial(generic_check, ravel_array) + check_function = partial(generic_check, numpy_ravel_array) + + def check(*args, **kwargs): + check_method(*args, **kwargs) + check_function(*args, **kwargs) + + # Check 2D + check(np.arange(9).reshape(3, 3), assume_layout='C') + check(np.arange(9).reshape(3, 3, order='F'), assume_layout='F') + check(np.arange(18).reshape(3, 3, 2)[:, :, 0], assume_layout='A') + + # Check 3D + check(np.arange(18).reshape(2, 3, 3), assume_layout='C') + check(np.arange(18).reshape(2, 3, 3, order='F'), assume_layout='F') + check(np.arange(36).reshape(2, 3, 3, 2)[:, :, :, 0], assume_layout='A') + + def test_ravel_array_size(self, flags=enable_pyobj_flags): + a = np.arange(9).reshape(3, 3) + + pyfunc = ravel_array_size + arraytype1 = typeof(a) + cfunc = jit((arraytype1,), **flags)(pyfunc) + + expected = pyfunc(a) + got = cfunc(a) + np.testing.assert_equal(expected, got) + + def test_ravel_array_npm(self): + self.test_ravel_array(flags=no_pyobj_flags) + + def test_ravel_array_size_npm(self): + self.test_ravel_array_size(flags=no_pyobj_flags) + + def test_transpose_array(self, flags=enable_pyobj_flags): + @from_generic([transpose_array, numpy_transpose_array]) + def check(pyfunc): + a = np.arange(9).reshape(3, 3) + + arraytype1 = typeof(a) + cfunc = jit((arraytype1,), **flags)(pyfunc) + + expected = pyfunc(a) + got = cfunc(a) + np.testing.assert_equal(expected, got) + + check() + + def test_transpose_array_npm(self): + self.test_transpose_array(flags=no_pyobj_flags) + + def test_squeeze_array(self, flags=enable_pyobj_flags): + a = np.arange(2 * 1 * 3 * 1 * 4).reshape(2, 1, 3, 1, 4) + + pyfunc = squeeze_array + arraytype1 = typeof(a) + cfunc = jit((arraytype1,), **flags)(pyfunc) + + expected = pyfunc(a) + got = cfunc(a) + np.testing.assert_equal(expected, got) + + def test_squeeze_array_npm(self): + with self.assertRaises(errors.TypingError) as raises: + self.test_squeeze_array(flags=no_pyobj_flags) + + self.assertIn("squeeze", str(raises.exception)) + + def test_add_axis(self): + @njit + def np_new_axis_getitem(a, idx): + return a[idx] + + @njit + def np_new_axis_setitem(a, idx, item): + a[idx] = item + return a + + a = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7)) + idx_cases = [ + (slice(None), np.newaxis), + (np.newaxis, slice(None)), + (slice(1), np.newaxis, 1), + (np.newaxis, 2, slice(None)), + (slice(1), Ellipsis, np.newaxis, 1), + (1, np.newaxis, Ellipsis), + (np.newaxis, slice(1), np.newaxis, 1), + (1, Ellipsis, None, np.newaxis), + (np.newaxis, slice(1), Ellipsis, np.newaxis, 1), + (1, np.newaxis, np.newaxis, Ellipsis), + (np.newaxis, 1, np.newaxis, Ellipsis), + (slice(3), 1, np.newaxis, None), + (np.newaxis, 1, Ellipsis, None), + ] + pyfunc_getitem = np_new_axis_getitem.py_func + cfunc_getitem = np_new_axis_getitem + + pyfunc_setitem = np_new_axis_setitem.py_func + cfunc_setitem = np_new_axis_setitem + + for idx in idx_cases: + expected = pyfunc_getitem(a, idx) + got = cfunc_getitem(a, idx) + np.testing.assert_equal(expected, got) + + a_empty = np.zeros_like(a) + item = a[idx] + + expected = pyfunc_setitem(a_empty.copy(), idx, item) + got = cfunc_setitem(a_empty.copy(), idx, item) + np.testing.assert_equal(expected, got) + + def test_bad_index_npm(self): + with self.assertTypingError() as raises: + arraytype1 = from_dtype(np.dtype([('x', np.int32), + ('y', np.int32)])) + arraytype2 = types.Array(types.int32, 2, 'C') + njit((arraytype1, arraytype2))(bad_index) + self.assertIn('Unsupported array index type', str(raises.exception)) + + def test_bad_float_index_npm(self): + with self.assertTypingError() as raises: + njit((types.Array(types.float64, 2, 'C'),))(bad_float_index) + self.assertIn('Unsupported array index type float64', + str(raises.exception)) + + def test_fill_diagonal_basic(self): + pyfunc = numpy_fill_diagonal + cfunc = jit(nopython=True)(pyfunc) + + def _shape_variations(n): + # square + yield (n, n) + # tall and thin + yield (2 * n, n) + # short and fat + yield (n, 2 * n) + # a bit taller than wide; odd numbers of rows and cols + yield ((2 * n + 1), (2 * n - 1)) + # 4d, all dimensions same + yield (n, n, n, n) + # weird edge case + yield (1, 1, 1) + + def _val_variations(): + yield 1 + yield 3.142 + yield np.nan + yield -np.inf + yield True + yield np.arange(4) + yield (4,) + yield [8, 9] + yield np.arange(54).reshape(9, 3, 2, 1) # contiguous C + yield np.asfortranarray(np.arange(9).reshape(3, 3)) # contiguous F + yield np.arange(9).reshape(3, 3)[::-1] # non-contiguous + + # contiguous arrays + def _multi_dimensional_array_variations(n): + for shape in _shape_variations(n): + yield np.zeros(shape, dtype=np.float64) + yield np.asfortranarray(np.ones(shape, dtype=np.float64)) + + # non-contiguous arrays + def _multi_dimensional_array_variations_strided(n): + for shape in _shape_variations(n): + tmp = np.zeros(tuple([x * 2 for x in shape]), dtype=np.float64) + slicer = tuple(slice(0, x * 2, 2) for x in shape) + yield tmp[slicer] + + def _check_fill_diagonal(arr, val): + for wrap in None, True, False: + a = arr.copy() + b = arr.copy() + + if wrap is None: + params = {} + else: + params = {'wrap': wrap} + + pyfunc(a, val, **params) + cfunc(b, val, **params) + self.assertPreciseEqual(a, b) + + for arr in _multi_dimensional_array_variations(3): + for val in _val_variations(): + _check_fill_diagonal(arr, val) + + for arr in _multi_dimensional_array_variations_strided(3): + for val in _val_variations(): + _check_fill_diagonal(arr, val) + + # non-numeric input arrays + arr = np.array([True] * 9).reshape(3, 3) + _check_fill_diagonal(arr, False) + _check_fill_diagonal(arr, [False, True, False]) + _check_fill_diagonal(arr, np.array([True, False, True])) + + def test_fill_diagonal_exception_cases(self): + pyfunc = numpy_fill_diagonal + cfunc = jit(nopython=True)(pyfunc) + val = 1 + + # Exceptions leak references + self.disable_leak_check() + + # first argument unsupported number of dimensions + for a in np.array([]), np.ones(5): + with self.assertRaises(TypingError) as raises: + cfunc(a, val) + assert "The first argument must be at least 2-D" in str(raises.exception) + + # multi-dimensional input where dimensions are not all equal + with self.assertRaises(ValueError) as raises: + a = np.zeros((3, 3, 4)) + cfunc(a, val) + self.assertEqual("All dimensions of input must be of equal length", str(raises.exception)) + + # cases where val has incompatible type / value + def _assert_raises(arr, val): + with self.assertRaises(ValueError) as raises: + cfunc(arr, val) + self.assertEqual("Unable to safely conform val to a.dtype", str(raises.exception)) + + arr = np.zeros((3, 3), dtype=np.int32) + val = np.nan + _assert_raises(arr, val) + + val = [3.3, np.inf] + _assert_raises(arr, val) + + val = np.array([1, 2, 1e10], dtype=np.int64) + _assert_raises(arr, val) + + arr = np.zeros((3, 3), dtype=np.float32) + val = [1.4, 2.6, -1e100] + _assert_raises(arr, val) + + val = 1.1e100 + _assert_raises(arr, val) + + val = np.array([-1e100]) + _assert_raises(arr, val) + + def test_broadcast_to(self): + pyfunc = numpy_broadcast_to + cfunc = jit(nopython=True)(pyfunc) + + # Tests taken from + # https://github.com/numpy/numpy/blob/75f852edf94a7293e7982ad516bee314d7187c2d/numpy/lib/tests/test_stride_tricks.py#L234-L257 # noqa: E501 + data = [ + [np.array(0), (0,)], + [np.array(0), (1,)], + [np.array(0), (3,)], + [np.ones(1), (1,)], + [np.ones(1), (2,)], + [np.ones(1), (1, 2, 3)], + [np.arange(3), (3,)], + [np.arange(3), (1, 3)], + [np.arange(3), (2, 3)], + # test if shape is not a tuple + [np.ones(0), 0], + [np.ones(1), 1], + [np.ones(1), 2], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs + [np.ones(1), (0,)], + [np.ones((1, 2)), (0, 2)], + [np.ones((2, 1)), (2, 0)], + # numpy accepts scalar values as first argument to np.broadcast_to + [2, (2, 2)], + # tuple input + [(1, 2), (2, 2)], + ] + for input_array, shape in data: + expected = pyfunc(input_array, shape) + got = cfunc(input_array, shape) + self.assertPreciseEqual(got, expected) + + def test_broadcast_to_0d_array(self): + pyfunc = numpy_broadcast_to + cfunc = jit(nopython=True)(pyfunc) + + inputs = [ + np.array(123), + 123, + True, + # can't do np.asarray() on the types below + # 'hello', + # np.timedelta64(10, 'Y'), + # np.datetime64(10, 'Y'), + ] + + shape = () + for arr in inputs: + expected = pyfunc(arr, shape) + got = cfunc(arr, shape) + self.assertPreciseEqual(expected, got) + # ensure that np.broadcast_to returned a read-only array + self.assertFalse(got.flags['WRITEABLE']) + + def test_broadcast_to_raises(self): + pyfunc = numpy_broadcast_to + cfunc = jit(nopython=True)(pyfunc) + + # Tests taken from + # https://github.com/numpy/numpy/blob/75f852edf94a7293e7982ad516bee314d7187c2d/numpy/lib/tests/test_stride_tricks.py#L260-L276 # noqa: E501 + data = [ + [np.zeros((0,)), (), TypingError, + 'Cannot broadcast a non-scalar to a scalar array'], + [np.zeros((1,)), (), TypingError, + 'Cannot broadcast a non-scalar to a scalar array'], + [np.zeros((3,)), (), TypingError, + 'Cannot broadcast a non-scalar to a scalar array'], + [(), (), TypingError, + 'Cannot broadcast a non-scalar to a scalar array'], + [(123,), (), TypingError, + 'Cannot broadcast a non-scalar to a scalar array'], + [np.zeros((3,)), (1,), ValueError, + 'operands could not be broadcast together with remapped shapes'], + [np.zeros((3,)), (2,), ValueError, + 'operands could not be broadcast together with remapped shapes'], + [np.zeros((3,)), (4,), ValueError, + 'operands could not be broadcast together with remapped shapes'], + [np.zeros((1, 2)), (2, 1), ValueError, + 'operands could not be broadcast together with remapped shapes'], + [np.zeros((1, 1)), (1,), ValueError, + 'input operand has more dimensions than allowed by the axis remapping'], + [np.zeros((2, 2)), (3,), ValueError, + 'input operand has more dimensions than allowed by the axis remapping'], + [np.zeros((1,)), -1, ValueError, + 'all elements of broadcast shape must be non-negative'], + [np.zeros((1,)), (-1,), ValueError, + 'all elements of broadcast shape must be non-negative'], + [np.zeros((1, 2)), (-1, 2), ValueError, + 'all elements of broadcast shape must be non-negative'], + [np.zeros((1, 2)), (1.1, 2.2), TypingError, + 'The second argument "shape" must be a tuple of integers'], + ['hello', (3,), TypingError, + 'The first argument "array" must be array-like'], + [3, (2, 'a'), TypingError, + 'object cannot be interpreted as an integer'], + ] + self.disable_leak_check() + for arr, target_shape, err, msg in data: + with self.assertRaises(err) as raises: + cfunc(arr, target_shape) + self.assertIn(msg, str(raises.exception)) + + def test_broadcast_to_corner_cases(self): + @njit + def _broadcast_to_1(): + return np.broadcast_to('a', (2, 3)) + + expected = _broadcast_to_1.py_func() + got = _broadcast_to_1() + self.assertPreciseEqual(expected, got) + + def test_broadcast_to_change_view(self): + pyfunc = numpy_broadcast_to + cfunc = jit(nopython=True)(pyfunc) + input_array = np.zeros(2, dtype=np.int32) + shape = (2, 2) + view = cfunc(input_array, shape) + input_array[0] = 10 + + self.assertEqual(input_array.sum(), 10) + self.assertEqual(view.sum(), 20) + + def test_broadcast_to_indexing(self): + pyfunc = numpy_broadcast_to_indexing + cfunc = jit(nopython=True)(pyfunc) + data = [ + [np.ones(2), (2, 2), (1,)], + ] + for input_array, shape, idx in data: + expected = pyfunc(input_array, shape, idx) + got = cfunc(input_array, shape, idx) + self.assertPreciseEqual(got, expected) + + def test_broadcast_to_array_attrs(self): + # See issue #8534. This tests that broadcast array attributes have the + # correct value when accessed. + @njit + def foo(arr): + ret = np.broadcast_to(arr, (2, 3)) + return ret, ret.size, ret.shape, ret.strides + + arr = np.arange(3) + + expected = foo.py_func(arr) + got = foo(arr) + self.assertPreciseEqual(expected, got) + + def test_broadcast_shapes(self): + pyfunc = numpy_broadcast_shapes + cfunc = jit(nopython=True)(pyfunc) + + # Tests taken from + # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/tests/test_stride_tricks.py#L296-L334 + data = [ + # [[], ()], # cannot compute fingerprint of empty list + [()], + [(), ()], + [(7,)], + [(1, 2),], + [(1, 1)], + [(1, 1), (3, 4)], + [(6, 7), (5, 6, 1), (7,), (5, 1, 7)], + [(5, 6, 1)], + [(1, 3), (3, 1)], + [(1, 0), (0, 0)], + [(0, 1), (0, 0)], + [(1, 0), (0, 1)], + [(1, 1), (0, 0)], + [(1, 1), (1, 0)], + [(1, 1), (0, 1)], + [(), (0,)], + [(0,), (0, 0)], + [(0,), (0, 1)], + [(1,), (0, 0)], + [(), (0, 0)], + [(1, 1), (0,)], + [(1,), (0, 1)], + [(1,), (1, 0)], + [(), (1, 0)], + [(), (0, 1)], + [(1,), (3,)], + [2, (3, 2)], + ] + for input_shape in data: + expected = pyfunc(*input_shape) + got = cfunc(*input_shape) + self.assertIsInstance(got, tuple) + self.assertPreciseEqual(expected, got) + + def test_broadcast_shapes_raises(self): + pyfunc = numpy_broadcast_shapes + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + + # Tests taken from + # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/tests/test_stride_tricks.py#L337-L351 + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3, 1), (3, 2), (10, 5)], + [2, (2, 3)], + ] + for input_shape in data: + with self.assertRaises(ValueError) as raises: + cfunc(*input_shape) + + self.assertIn("shape mismatch: objects cannot be broadcast to a single shape", + str(raises.exception)) + + def test_broadcast_shapes_negative_dimension(self): + pyfunc = numpy_broadcast_shapes + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + with self.assertRaises(ValueError) as raises: + cfunc((1, 2), (2), (-2)) + + self.assertIn("negative dimensions are not allowed", str(raises.exception)) + + def test_broadcast_shapes_invalid_type(self): + pyfunc = numpy_broadcast_shapes + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + + inps = [ + ((1, 2), ('hello',)), + (3.4,), + ('string',), + ((1.2, 'a')), + (1, ((1.2, 'a'))), + ] + + for inp in inps: + with self.assertRaises(TypingError) as raises: + cfunc(*inp) + + self.assertIn("must be either an int or tuple[int]", str(raises.exception)) + + def test_shape(self): + pyfunc = numpy_shape + cfunc = jit(nopython=True)(pyfunc) + + def check(x): + expected = pyfunc(x) + got = cfunc(x) + self.assertPreciseEqual(got, expected) + + # check arrays + for t in [(), (1,), (2, 3,), (4, 5, 6)]: + arr = np.empty(t) + check(arr) + + # check some types that go via asarray + for t in [1, False, [1,], [[1, 2,],[3, 4]], (1,), (1, 2, 3)]: + check(arr) + + with self.assertRaises(TypingError) as raises: + cfunc('a') + + self.assertIn("The argument to np.shape must be array-like", + str(raises.exception)) + + def test_size(self): + pyfunc = numpy_size + cfunc = jit(nopython=True)(pyfunc) + + def check(x): + expected = pyfunc(x) + got = cfunc(x) + self.assertPreciseEqual(got, expected) + + # check arrays + for t in [(), (1,), (2, 3,), (4, 5, 6)]: + arr = np.empty(t) + check(arr) + + # check scalar values + for t in [1, False, 3.14, np.int8(4), np.float32(2.718)]: + check(t) + + with self.assertRaises(TypingError) as raises: + cfunc('a') + + self.assertIn("The argument to np.size must be array-like", + str(raises.exception)) + + def test_flatnonzero_basic(self): + pyfunc = numpy_flatnonzero + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(-5, 5) + yield np.full(5, fill_value=0) + yield np.array([]) + a = self.random.randn(100) + a[np.abs(a) > 0.2] = 0.0 + yield a + yield a.reshape(5, 5, 4) + yield a.reshape(50, 2, order='F') + yield a.reshape(25, 4)[1::2] + yield a * 1j + + for a in a_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def test_argwhere_basic(self): + pyfunc = numpy_argwhere + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(-5, 5) > 2 + yield np.full(5, fill_value=0) + yield np.full(5, fill_value=1) + yield np.array([]) + yield np.array([-1.0, 0.0, 1.0]) + a = self.random.randn(100) + yield a > 0.2 + yield a.reshape(5, 5, 4) > 0.5 + yield a.reshape(50, 2, order='F') > 0.5 + yield a.reshape(25, 4)[1::2] > 0.5 + yield a == a - 1 + yield a > -a + + for a in a_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + @staticmethod + def array_like_variations(): + yield ((1.1, 2.2), (3.3, 4.4), (5.5, 6.6)) + yield (0.0, 1.0, 0.0, -6.0) + yield ([0, 1], [2, 3]) + yield () + yield np.nan + yield 0 + yield 1 + yield False + yield True + yield (True, False, True) + yield 2 + 1j + # the following are not array-like, but NumPy does not raise + yield None + yield 'a_string' + yield '' + + + def test_flatnonzero_array_like(self): + pyfunc = numpy_flatnonzero + cfunc = jit(nopython=True)(pyfunc) + + for a in self.array_like_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def test_argwhere_array_like(self): + pyfunc = numpy_argwhere + cfunc = jit(nopython=True)(pyfunc) + for a in self.array_like_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def broadcast_arrays_assert_correct_shape(self, input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = cfunc(*inarrays) + expected = [expected_shape] * len(inarrays) + got = [a.shape for a in outarrays] + self.assertPreciseEqual(expected, got) + + def test_broadcast_arrays_same_input_shapes(self): + # Tests taken from + # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/tests/test_stride_tricks.py#L83-L107 # noqa: E501 + # Check that the final shape is just the input shape. + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + + data = [ + # (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + self.broadcast_arrays_assert_correct_shape(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + self.broadcast_arrays_assert_correct_shape(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + self.broadcast_arrays_assert_correct_shape(input_shapes3, shape) + + def test_broadcast_arrays_two_compatible_by_ones_input_shapes(self): + # Tests taken from + # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/tests/test_stride_tricks.py#L110-L132 + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + self.broadcast_arrays_assert_correct_shape(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + self.broadcast_arrays_assert_correct_shape(input_shapes[::-1], expected_shape) + + def test_broadcast_arrays_two_compatible_by_prepending_ones_input_shapes(self): + # Tests taken from + # https://github.com/numpy/numpy/blob/623bc1fae1d47df24e7f1e29321d0c0ba2771ce0/numpy/lib/tests/test_stride_tricks.py#L135-L164 + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + self.broadcast_arrays_assert_correct_shape(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + self.broadcast_arrays_assert_correct_shape(input_shapes[::-1], expected_shape) + + def test_broadcast_arrays_scalar_input(self): + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + data = [ + [[True, False], (1,)], + [[1, 2], (1,)], + [[(1, 2), 2], (2,)], + ] + for inarrays, expected_shape in data: + outarrays = cfunc(*inarrays) + got = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + self.assertPreciseEqual(expected, got) + + def test_broadcast_arrays_tuple_input(self): + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + outarrays = cfunc((123, 456), (789,)) + expected = [(2,), (2,)] + got = [a.shape for a in outarrays] + self.assertPreciseEqual(expected, got) + + def test_broadcast_arrays_non_array_input(self): + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + outarrays = cfunc(np.intp(2), np.zeros((1, 3), dtype=np.intp)) + expected = [(1, 3), (1, 3)] + got = [a.shape for a in outarrays] + self.assertPreciseEqual(expected, got) + + def test_broadcast_arrays_invalid_mixed_input_types(self): + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + arr = np.arange(6).reshape((2, 3)) + b = True + cfunc(arr, b) + self.assertIn('Mismatch of argument types', str(raises.exception)) + + def test_broadcast_arrays_invalid_input(self): + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + arr = np.zeros(3, dtype=np.int64) + s = 'hello world' + cfunc(arr, s) + self.assertIn('Argument "1" must be array-like', str(raises.exception)) + + def test_broadcast_arrays_incompatible_shapes_raise_valueerror(self): + # Check that a ValueError is raised for incompatible shapes. + pyfunc = numpy_broadcast_arrays + cfunc = jit(nopython=True)(pyfunc) + + self.disable_leak_check() + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + for shape in [input_shapes, input_shapes[::-1]]: + # Reverse the input shapes since broadcasting should be symmetric. + with self.assertRaises(ValueError) as raises: + inarrays = [np.zeros(s) for s in shape] + cfunc(*inarrays) + self.assertIn("shape mismatch: objects cannot be broadcast to a single shape", + str(raises.exception)) + + def test_readonly_after_flatten(self): + # Reproduces Issue #8370 + def unfold_flatten(x, y): + r, c = x.shape + a = np.broadcast_to(x, (y, r, c)) + b = np.swapaxes(a, 0, 1) + cc = b.flatten() + d = np.reshape(cc, (-1, c)) + d[y - 1:, :] = d[: 1 - y] + return d + + pyfunc = unfold_flatten + cfunc = jit(nopython=True)(pyfunc) + + # If issue #8370 is not fixed: This will fail. + res_nb = cfunc(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), 2) + res_py = pyfunc(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), 2) + + np.testing.assert_array_equal(res_py, res_nb) + + def test_readonly_after_ravel(self): + # Reproduces another suggested problem in Issue #8370 + def unfold_ravel(x, y): + r, c = x.shape + a = np.broadcast_to(x, (y, r, c)) + b = np.swapaxes(a, 0, 1) + cc = b.ravel() + d = np.reshape(cc, (-1, c)) + d[y - 1:, :] = d[: 1 - y] + return d + + pyfunc = unfold_ravel + cfunc = jit(nopython=True)(pyfunc) + + # If issue #8370 is not fixed: This will fail. + res_nb = cfunc(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), 2) + res_py = pyfunc(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), 2) + + np.testing.assert_array_equal(res_py, res_nb) + + def test_mutability_after_ravel(self): + # Reproduces another suggested problem in Issue #8370 + # Namely that ravel should only return a writable array + # if a copy took place... otherwise leave it as it is. + self.disable_leak_check() + a_c = np.arange(9).reshape((3, 3)).copy() + a_f = a_c.copy(order='F') + a_c.flags.writeable = False + a_f.flags.writeable = False + + def try_ravel_w_copy(a): + result = a.ravel() + return result + + pyfunc = try_ravel_w_copy + cfunc = jit(nopython=True)(pyfunc) + + ret_c = cfunc(a_c) + ret_f = cfunc(a_f) + + msg = 'No copy was performed, so the ' \ + 'resulting array must not be writeable' + self.assertTrue(not ret_c.flags.writeable, msg) + + msg = 'A copy was performed, yet the resulting array is not modifiable' + self.assertTrue(ret_f.flags.writeable, msg) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_methods.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..84074a77f753b38ec4bf42337ac7a9b5605af9bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_methods.py @@ -0,0 +1,1817 @@ +from itertools import product, cycle +import gc +import sys +import unittest +import warnings + +import numpy as np + +from numba import jit, njit, typeof +from numba.core import types +from numba.core.errors import TypingError, NumbaValueError +from numba.np.numpy_support import as_dtype, numpy_version +from numba.tests.support import (TestCase, MemoryLeakMixin, + needs_blas, skip_if_numpy_2, + expected_failure_np2) + +TIMEDELTA_M = 'timedelta64[M]' +TIMEDELTA_Y = 'timedelta64[Y]' + + +def np_around_array(arr, decimals, out): + np.around(arr, decimals, out) + +def np_around_binary(val, decimals): + return np.around(val, decimals) + +def np_around_unary(val): + return np.around(val) + +def np_round_array(arr, decimals, out): + np.round(arr, decimals, out) + +def np_round__array(arr, decimals, out): + np.round_(arr, decimals, out) + +def np_round_binary(val, decimals): + return np.round(val, decimals) + +def np_round_unary(val): + return np.round(val) + +def _fixed_np_round(arr, decimals=0, out=None): + """ + A slightly bugfixed version of np.round(). + """ + if out is not None and arr.dtype.kind == 'c': + # workaround for https://github.com/numpy/numpy/issues/5779 + _fixed_np_round(arr.real, decimals, out.real) + _fixed_np_round(arr.imag, decimals, out.imag) + return out + else: + res = np.round(arr, decimals, out) + if out is None: + # workaround for https://github.com/numpy/numpy/issues/5780 + def fixup_signed_zero(arg, res): + if res == 0.0 and arg < 0: + return -np.abs(res) + else: + return res + if isinstance(arr, (complex, np.complexfloating)): + res = complex(fixup_signed_zero(arr.real, res.real), + fixup_signed_zero(arr.imag, res.imag)) + else: + res = fixup_signed_zero(arr, res) + return res + + +def array_T(arr): + return arr.T + +def array_transpose(arr): + return arr.transpose() + +def array_copy(arr): + return arr.copy() + +def np_copy(arr): + return np.copy(arr) + +def np_asfortranarray(arr): + return np.asfortranarray(arr) + +def np_ascontiguousarray(arr): + return np.ascontiguousarray(arr) + +def array_view(arr, newtype): + return arr.view(newtype) + +def array_take(arr, indices): + return arr.take(indices) + +def array_take_kws(arr, indices, axis): + return arr.take(indices, axis=axis) + +def np_arange_1(arg0): + return np.arange(arg0) + +def np_arange_2(arg0, arg1): + return np.arange(arg0, arg1) + +def np_arange_3(arg0, arg1, arg2): + return np.arange(arg0, arg1, arg2) + +def np_arange_4(arg0, arg1, arg2, arg3): + return np.arange(arg0, arg1, arg2, arg3) + +def np_arange_1_stop(arg0, stop): + return np.arange(arg0, stop=stop) + +def np_arange_1_step(arg0, step): + return np.arange(arg0, step=step) + +def np_arange_1_dtype(arg0, dtype): + return np.arange(arg0, dtype=dtype) + +def np_arange_2_step(arg0, arg1, step): + return np.arange(arg0, arg1, step=step) + +def np_arange_2_dtype(arg0, arg1, dtype): + return np.arange(arg0, arg1, dtype=dtype) + +def np_arange_start_stop(start, stop): + return np.arange(start, stop=stop) + +def np_arange_start_stop_step(start, stop, step): + return np.arange(start, stop=stop, step=step) + +def np_arange_start_stop_step_dtype(start, stop, step, dtype): + return np.arange(start, stop=stop, step=step, dtype=dtype) + +def array_fill(arr, val): + return arr.fill(val) + +# XXX Can't pass a dtype as a Dispatcher argument for now +def make_array_view(newtype): + def array_view(arr): + return arr.view(newtype) + return array_view + +def array_sliced_view(arr, ): + return arr[0:4].view(np.float32)[0] + +def make_array_astype(newtype): + def array_astype(arr): + return arr.astype(newtype) + return array_astype + + +def np_frombuffer(b): + """ + np.frombuffer() on a Python-allocated buffer. + """ + return np.frombuffer(b) + +def np_frombuffer_dtype(b): + return np.frombuffer(b, dtype=np.complex64) + +def np_frombuffer_dtype_str(b): + return np.frombuffer(b, dtype='complex64') + +def np_frombuffer_allocated(shape): + """ + np.frombuffer() on a Numba-allocated buffer. + """ + arr = np.ones(shape, dtype=np.int32) + return np.frombuffer(arr) + +def np_frombuffer_allocated_dtype(shape): + arr = np.ones(shape, dtype=np.int32) + return np.frombuffer(arr, dtype=np.complex64) + +def identity_usecase(a, b): + return (a is b), (a is not b) + +def array_nonzero(a): + return a.nonzero() + +def np_nonzero(a): + return np.nonzero(a) + +def np_where_1(c): + return np.where(c) + +def np_where_3(c, x, y): + return np.where(c, x, y) + +def array_item(a): + return a.item() + +def array_itemset(a, v): + a.itemset(v) + +def array_sum(a, *args): + return a.sum(*args) + +def array_sum_axis_kws(a, axis): + return a.sum(axis=axis) + +def array_sum_dtype_kws(a, dtype): + return a.sum(dtype=dtype) + +def array_sum_axis_dtype_kws(a, dtype, axis): + return a.sum(axis=axis, dtype=dtype) + +def array_sum_axis_dtype_pos(a, a1, a2): + return a.sum(a1, a2) + +def array_sum_const_multi(arr, axis): + # use np.sum with different constant args multiple times to check + # for internal compile cache to see if constant-specialization is + # applied properly. + a = np.sum(arr, axis=4) + b = np.sum(arr, 3) + # the last invocation uses runtime-variable + c = np.sum(arr, axis) + # as method + d = arr.sum(axis=5) + # negative const axis + e = np.sum(arr, axis=-1) + return a, b, c, d, e + +def array_sum_const_axis_neg_one(a, axis): + # use .sum with -1 axis, this is for use with 1D arrays where the above + # "const_multi" variant would raise errors + return a.sum(axis=-1) + +def array_cumsum(a, *args): + return a.cumsum(*args) + +def array_cumsum_kws(a, axis): + return a.cumsum(axis=axis) + +def array_real(a): + return np.real(a) + +def array_imag(a): + return np.imag(a) + +def np_clip_no_out(a, a_min, a_max): + return np.clip(a, a_min, a_max) + +def np_clip(a, a_min, a_max, out=None): + return np.clip(a, a_min, a_max, out) + +def np_clip_kwargs(a, a_min, a_max, out=None): + return np.clip(a, a_min, a_max, out=out) + +def array_clip(a, a_min=None, a_max=None, out=None): + return a.clip(a_min, a_max, out) + +def array_clip_kwargs(a, a_min=None, a_max=None, out=None): + return a.clip(a_min, a_max, out=out) + +def array_clip_no_out(a, a_min, a_max): + return a.clip(a_min, a_max) + +def array_conj(a): + return a.conj() + +def array_conjugate(a): + return a.conjugate() + +def np_unique(a): + return np.unique(a) + + +def array_dot(a, b): + return a.dot(b) + +def array_dot_chain(a, b): + return a.dot(b).dot(b) + +def array_ctor(n, dtype): + return np.ones(n, dtype=dtype) + +class TestArrayMethods(MemoryLeakMixin, TestCase): + """ + Test various array methods and array-related functions. + """ + + def setUp(self): + super(TestArrayMethods, self).setUp() + + def check_round_scalar(self, unary_pyfunc, binary_pyfunc): + base_values = [-3.0, -2.5, -2.25, -1.5, 1.5, 2.25, 2.5, 2.75] + complex_values = [x * (1 - 1j) for x in base_values] + int_values = [int(x) for x in base_values] + argtypes = (types.float64, types.float32, types.int32, + types.complex64, types.complex128) + argvalues = [base_values, base_values, int_values, + complex_values, complex_values] + + pyfunc = binary_pyfunc + for ty, values in zip(argtypes, argvalues): + cfunc = njit((ty, types.int32))(pyfunc) + for decimals in (1, 0, -1): + for v in values: + if decimals > 0: + v *= 10 + expected = _fixed_np_round(v, decimals) + got = cfunc(v, decimals) + self.assertPreciseEqual(got, expected) + + pyfunc = unary_pyfunc + for ty, values in zip(argtypes, argvalues): + cfunc = njit((ty,))(pyfunc) + for v in values: + expected = _fixed_np_round(v) + got = cfunc(v) + self.assertPreciseEqual(got, expected) + + def test_round_scalar(self): + self.check_round_scalar(np_round_unary, np_round_binary) + + def test_around_scalar(self): + self.check_round_scalar(np_around_unary, np_around_binary) + + def check_round_array(self, pyfunc): + def check_round(cfunc, values, inty, outty, decimals): + # Create input and output arrays of the right type + arr = values.astype(as_dtype(inty)) + out = np.zeros_like(arr).astype(as_dtype(outty)) + pyout = out.copy() + _fixed_np_round(arr, decimals, pyout) + self.memory_leak_setup() + cfunc(arr, decimals, out) + self.memory_leak_teardown() + np.testing.assert_allclose(out, pyout) + # Output shape mismatch + with self.assertRaises(ValueError) as raises: + cfunc(arr, decimals, out[1:]) + self.assertEqual(str(raises.exception), + "invalid output shape") + + def check_types(argtypes, outtypes, values): + for inty, outty in product(argtypes, outtypes): + argtys = (types.Array(inty, 1, 'A'), types.int32, + types.Array(outty, 1, 'A')) + cfunc = njit(argtys)(pyfunc) + check_round(cfunc, values, inty, outty, 0) + check_round(cfunc, values, inty, outty, 1) + if not isinstance(outty, types.Integer): + check_round(cfunc, values * 10, inty, outty, -1) + else: + # Avoid Numpy bug when output is an int: + # https://github.com/numpy/numpy/issues/5777 + pass + + values = np.array([-3.0, -2.5, -2.25, -1.5, 1.5, 2.25, 2.5, 2.75]) + + argtypes = (types.float64, types.float32) + check_types(argtypes, argtypes, values) + + argtypes = (types.complex64, types.complex128) + check_types(argtypes, argtypes, values * (1 - 1j)) + + # Exceptions leak references + self.disable_leak_check() + + def test_round_array(self): + self.check_round_array(np_round_array) + + def test_around_array(self): + self.check_round_array(np_around_array) + + @skip_if_numpy_2 + def test_round__array(self): + self.check_round_array(np_round__array) + + def test_around_bad_array(self): + for pyfunc in (np_round_unary, np_around_unary): + cfunc = jit(nopython=True)(pyfunc) + msg = '.*The argument "a" must be array-like.*' + with self.assertRaisesRegex(TypingError, msg): + cfunc(None) + + def test_around_bad_out(self): + funcs = [np_round_array, np_around_array] + if numpy_version < (2, 0): + funcs.append(np_round__array) + for py_func in funcs: + cfunc = jit(nopython=True)(py_func) + msg = '.*The argument "out" must be an array if it is provided.*' + with self.assertRaisesRegex(TypingError, msg): + cfunc(5, 0, out=6) + + def test_array_view(self): + + def run(arr, dtype): + pyfunc = make_array_view(dtype) + return njit(pyfunc)(arr) + + def check(arr, dtype): + expected = arr.view(dtype) + self.memory_leak_setup() + got = run(arr, dtype) + self.assertPreciseEqual(got, expected) + del got + self.memory_leak_teardown() + + def check_err(arr, dtype): + with self.assertRaises(ValueError) as raises: + run(arr, dtype) + self.assertEqual(str(raises.exception), + "new type not compatible with array") + + def check_err_noncontig_last_axis(arr, dtype): + # check NumPy interpreted version raises + msg = ("To change to a dtype of a different size, the last axis " + "must be contiguous") + with self.assertRaises(ValueError) as raises: + make_array_view(dtype)(arr) + self.assertEqual(str(raises.exception), msg) + # check Numba version raises + with self.assertRaises(ValueError) as raises: + run(arr, dtype) + self.assertEqual(str(raises.exception), msg) + + def check_err_0d(arr, dtype): + # check NumPy interpreted version raises + msg = ("Changing the dtype of a 0d array is only supported " + "if the itemsize is unchanged") + with self.assertRaises(ValueError) as raises: + make_array_view(dtype)(arr) + self.assertEqual(str(raises.exception), msg) + # check Numba version raises + with self.assertRaises(ValueError) as raises: + run(arr, dtype) + self.assertEqual(str(raises.exception), msg) + + def check_err_smaller_dtype(arr, dtype): + # check NumPy interpreted version raises + msg = ("When changing to a smaller dtype, its size must be a " + "divisor of the size of original dtype") + with self.assertRaises(ValueError) as raises: + make_array_view(dtype)(arr) + self.assertEqual(str(raises.exception), msg) + # check Numba version raises + with self.assertRaises(ValueError) as raises: + run(arr, dtype) + self.assertEqual(str(raises.exception), msg) + + def check_err_larger_dtype(arr, dtype): + # check NumPy interpreted version raises + msg = ("When changing to a larger dtype, its size must be a " + "divisor of the total size in bytes of the last axis " + "of the array.") + with self.assertRaises(ValueError) as raises: + make_array_view(dtype)(arr) + self.assertEqual(str(raises.exception), msg) + # check Numba version raises + with self.assertRaises(ValueError) as raises: + run(arr, dtype) + self.assertEqual(str(raises.exception), msg) + + dt1 = np.dtype([('a', np.int8), ('b', np.int8)]) + dt2 = np.dtype([('u', np.int16), ('v', np.int8)]) + dt3 = np.dtype([('x', np.int16), ('y', np.int16)]) + + check_error_larger_dt = check_err_larger_dtype + check_error_smaller_dt = check_err_smaller_dtype + check_error_noncontig = check_err_noncontig_last_axis + check_error_0d = check_err_0d + + # C-contiguous + arr = np.arange(24, dtype=np.int8) + check(arr, np.dtype('int16')) + check(arr, np.int16) + check(arr, np.int8) + check(arr, np.float32) + check(arr, np.complex64) + check(arr, dt1) + check(arr, dt2) + check_error_larger_dt(arr, np.complex128) + + # Last dimension must have a compatible size + arr = arr.reshape((3, 8)) + check(arr, np.int8) + check(arr, np.float32) + check(arr, np.complex64) + check(arr, dt1) + check_error_larger_dt(arr, dt2) + check_error_larger_dt(arr, np.complex128) + + # F-contiguous + f_arr = np.arange(24, dtype=np.int8).reshape((3, 8)).T + # neither F or C contiguous + not_f_or_c_arr = np.zeros((4, 4)).T[::2, ::2] + + check_maybe_error = check_err_noncontig_last_axis + + check(f_arr, np.int8) + check(not_f_or_c_arr, np.uint64) + check_maybe_error(f_arr, np.float32) + check_maybe_error(f_arr, np.complex64) + check_maybe_error(f_arr, dt1) + + check_error_noncontig(f_arr, dt2) + check_error_noncontig(f_arr, np.complex128) + check_error_noncontig(not_f_or_c_arr, np.int8) + + # Non-contiguous: only a type with the same itemsize can be used + arr = np.arange(16, dtype=np.int32)[::2] + check(arr, np.uint32) + check(arr, np.float32) + check(arr, dt3) + check_error_noncontig(arr, np.int8) + check_error_noncontig(arr, np.int16) + check_error_noncontig(arr, np.int64) + check_error_noncontig(arr, dt1) + check_error_noncontig(arr, dt2) + + ## Zero-dim array: only a type with the same itemsize can be used + arr = np.array([42], dtype=np.int32).reshape(()) + check(arr, np.uint32) + check(arr, np.float32) + check(arr, dt3) + check_error_0d(arr, np.int8) + check_error_0d(arr, np.int16) + check_error_0d(arr, np.int64) + check_error_0d(arr, dt1) + check_error_0d(arr, dt2) + + # Changing to smaller dtype + arr = np.array(['abcdef']) + check_error_smaller_dt(arr, np.complex128) + + # Exceptions leak references + self.disable_leak_check() + + def test_array_sliced_view(self): + """ + Test .view() on A layout array but has contiguous innermost dimension. + """ + pyfunc = array_sliced_view + cfunc = njit((types.uint8[:],))(pyfunc) + + orig = np.array([1.5, 2], dtype=np.float32) + byteary = orig.view(np.uint8) + + expect = pyfunc(byteary) + got = cfunc(byteary) + + self.assertEqual(expect, got) + + def test_array_astype(self): + + def run(arr, dtype): + pyfunc = make_array_astype(dtype) + return njit(pyfunc)(arr) + + def check(arr, dtype): + expected = arr.astype(dtype).copy(order='A') + got = run(arr, dtype) + self.assertPreciseEqual(got, expected) + + # C-contiguous + arr = np.arange(24, dtype=np.int8) + check(arr, np.dtype('int16')) + check(arr, np.int32) + check(arr, np.float32) + check(arr, np.complex128) + check(arr, "float32") + + # F-contiguous + arr = np.arange(24, dtype=np.int8).reshape((3, 8)).T + check(arr, np.float32) + + # Non-contiguous + arr = np.arange(16, dtype=np.int32)[::2] + check(arr, np.uint64) + + # check read only attr does not get copied + arr = np.arange(16, dtype=np.int32) + arr.flags.writeable = False + check(arr, np.int32) + + # Invalid conversion + dt = np.dtype([('x', np.int8)]) + with self.assertTypingError() as raises: + check(arr, dt) + self.assertIn('cannot convert from int32 to Record', + str(raises.exception)) + # Check non-Literal string raises + unicode_val = "float32" + with self.assertTypingError() as raises: + @jit(nopython=True) + def foo(dtype): + np.array([1]).astype(dtype) + foo(unicode_val) + self.assertIn('array.astype if dtype is a string it must be constant', + str(raises.exception)) + + def check_np_frombuffer(self, pyfunc): + + cfunc = njit(pyfunc) + + def check(buf): + old_refcnt = sys.getrefcount(buf) + expected = pyfunc(buf) + self.memory_leak_setup() + got = cfunc(buf) + self.assertPreciseEqual(got, expected) + del expected + # Note gc.collect is due to references in `except ... as e` that + # aren't immediately cleared + gc.collect() + self.assertEqual(sys.getrefcount(buf), old_refcnt + 1) + del got + gc.collect() + self.assertEqual(sys.getrefcount(buf), old_refcnt) + self.memory_leak_teardown() + + b = bytearray(range(16)) + check(b) + check(bytes(b)) + check(memoryview(b)) + check(np.arange(12)) + b = np.arange(12).reshape((3, 4)) + check(b) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertRaises(ValueError) as raises: + cfunc(bytearray(b"xxx")) + self.assertEqual("buffer size must be a multiple of element size", + str(raises.exception)) + + def test_np_frombuffer(self): + self.check_np_frombuffer(np_frombuffer) + + def test_np_frombuffer_dtype(self): + self.check_np_frombuffer(np_frombuffer_dtype) + + def test_np_frombuffer_dtype_str(self): + self.check_np_frombuffer(np_frombuffer_dtype_str) + + def test_np_frombuffer_dtype_non_const_str(self): + @jit(nopython=True) + def func(buf, dt): + np.frombuffer(buf, dtype=dt) + + with self.assertRaises(TypingError) as raises: + func(bytearray(range(16)), 'int32') + + excstr = str(raises.exception) + msg = ("If np.frombuffer dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + + def test_np_frombuffer_bad_buffer(self): + @jit(nopython=True) + def func(buf): + return np.frombuffer(buf) + + msg = '.*Argument "buffer" must be buffer-like.*' + with self.assertRaisesRegex(TypingError, msg) as raises: + func(None) + + def check_layout_dependent_func(self, pyfunc, fac=np.arange): + def is_same(a, b): + return a.ctypes.data == b.ctypes.data + def check_arr(arr): + cfunc = njit((typeof(arr),))(pyfunc) + expected = pyfunc(arr) + got = cfunc(arr) + self.assertPreciseEqual(expected, got) + self.assertEqual(is_same(expected, arr), is_same(got, arr)) + arr = fac(24) + check_arr(arr) + check_arr(arr.reshape((3, 8))) + check_arr(arr.reshape((3, 8)).T) + check_arr(arr.reshape((3, 8))[::2]) + check_arr(arr.reshape((2, 3, 4))) + check_arr(arr.reshape((2, 3, 4)).T) + check_arr(arr.reshape((2, 3, 4))[::2]) + arr = np.array([0]).reshape(()) + check_arr(arr) + + def test_array_transpose(self): + self.check_layout_dependent_func(array_transpose) + + def test_array_T(self): + self.check_layout_dependent_func(array_T) + + def test_array_copy(self): + self.check_layout_dependent_func(array_copy) + + def test_np_copy(self): + self.check_layout_dependent_func(np_copy) + + def check_ascontiguousarray_scalar(self, pyfunc): + def check_scalar(x): + cfunc = njit((typeof(x),))(pyfunc) + expected = pyfunc(x) + got = cfunc(x) + self.assertPreciseEqual(expected, got) + for x in [42, 42.0, 42j, np.float32(42), np.float64(42), True]: + check_scalar(x) + + def check_bad_array(self, pyfunc): + msg = '.*The argument "a" must be array-like.*' + with self.assertRaisesRegex(TypingError, msg) as raises: + njit((typeof('hello'), ))(pyfunc) + + def test_np_asfortranarray(self): + self.check_layout_dependent_func(np_asfortranarray) + self.check_bad_array(np_asfortranarray) + self.check_ascontiguousarray_scalar(np_asfortranarray) + + def test_np_ascontiguousarray(self): + self.check_layout_dependent_func(np_ascontiguousarray) + self.check_bad_array(np_asfortranarray) + self.check_ascontiguousarray_scalar(np_ascontiguousarray) + + def check_np_frombuffer_allocated(self, pyfunc): + + cfunc = njit(pyfunc) + + def check(shape): + expected = pyfunc(shape) + got = cfunc(shape) + self.assertPreciseEqual(got, expected) + + check((16,)) + check((4, 4)) + check((1, 0, 1)) + + def test_np_frombuffer_allocated(self): + self.check_np_frombuffer_allocated(np_frombuffer_allocated) + + def test_np_frombuffer_allocated2(self): + self.check_np_frombuffer_allocated(np_frombuffer_allocated_dtype) + + def check_nonzero(self, pyfunc): + def fac(N): + np.random.seed(42) + arr = np.random.random(N) + arr[arr < 0.3] = 0.0 + arr[arr > 0.7] = float('nan') + return arr + + def check_arr(arr): + cfunc = njit((typeof(arr),))(pyfunc) + expected = pyfunc(arr) + expected = [a.copy() for a in expected] + self.assertPreciseEqual(cfunc(arr), expected) + + arr = np.int16([1, 0, -1, 0]) + check_arr(arr) + arr = np.bool_([1, 0, 1]) + check_arr(arr) + + arr = fac(24) + check_arr(arr) + check_arr(arr.reshape((3, 8))) + check_arr(arr.reshape((3, 8)).T) + check_arr(arr.reshape((3, 8))[::2]) + check_arr(arr.reshape((2, 3, 4))) + check_arr(arr.reshape((2, 3, 4)).T) + check_arr(arr.reshape((2, 3, 4))[::2]) + + arr = np.array(["Hello", "", "world"]) + check_arr(arr) + + for v in (0.0, 1.5, float('nan')): + arr = np.array([v]).reshape(()) + if numpy_version < (2, 1): + check_arr(arr) + else: + with self.assertRaises((ValueError, TypingError)) as raises: + njit((typeof(arr),))(pyfunc) + msg = "Calling nonzero on 0d arrays is not allowed. Use " \ + "np.atleast_1d(scalar).nonzero() instead." + self.assertIn(msg, str(raises.exception)) + + def test_array_nonzero(self): + self.check_nonzero(array_nonzero) + + def test_np_nonzero(self): + self.check_nonzero(np_nonzero) + + def test_np_where_1(self): + self.check_nonzero(np_where_1) + + def test_np_where_3(self): + pyfunc = np_where_3 + def fac(N): + np.random.seed(42) + arr = np.random.random(N) + arr[arr < 0.3] = 0.0 + arr[arr > 0.7] = float('nan') + return arr + + layouts = cycle(['C', 'F', 'A']) + _types = [np.int32, np.int64, np.float32, np.float64, np.complex64, + np.complex128] + + np.random.seed(42) + + def check_arr(arr, layout=False): + np.random.shuffle(_types) + if layout != False: + x = np.zeros_like(arr, dtype=_types[0], order=layout) + y = np.zeros_like(arr, dtype=_types[1], order=layout) + arr = arr.copy(order=layout) + else: + x = np.zeros_like(arr, dtype=_types[0], order=next(layouts)) + y = np.zeros_like(arr, dtype=_types[1], order=next(layouts)) + x.fill(4) + y.fill(9) + cfunc = njit((typeof(arr), typeof(x), typeof(y)))(pyfunc) + expected = pyfunc(arr, x, y) + got = cfunc(arr, x, y) + self.assertPreciseEqual(got, expected) + + def check_scal(scal): + x = 4 + y = 5 + np.random.shuffle(_types) + x = _types[0](4) + y = _types[1](5) + cfunc = njit((typeof(scal), typeof(x), typeof(y)))(pyfunc) + expected = pyfunc(scal, x, y) + got = cfunc(scal, x, y) + self.assertPreciseEqual(got, expected) + + arr = np.int16([1, 0, -1, 0]) + check_arr(arr) + arr = np.bool_([1, 0, 1]) + check_arr(arr) + + arr = fac(24) + check_arr(arr) + check_arr(arr.reshape((3, 8))) + check_arr(arr.reshape((3, 8)).T) + check_arr(arr.reshape((3, 8))[::2]) + check_arr(arr.reshape((2, 3, 4))) + check_arr(arr.reshape((2, 3, 4)).T) + check_arr(arr.reshape((2, 3, 4))[::2]) + + check_arr(arr.reshape((2, 3, 4)), layout='F') + check_arr(arr.reshape((2, 3, 4)).T, layout='F') + check_arr(arr.reshape((2, 3, 4))[::2], layout='F') + + for v in (0.0, 1.5, float('nan')): + arr = np.array([v]).reshape(()) + check_arr(arr) + + for x in (0, 1, True, False, 2.5, 0j): + check_scal(x) + + def test_np_where_3_broadcast_x_y_scalar(self): + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + def check_ok(args): + expected = pyfunc(*args) + got = cfunc(*args) + self.assertPreciseEqual(got, expected) + + def a_variations(): + a = np.linspace(-2, 4, 20) + self.random.shuffle(a) + yield a + yield a.reshape(2, 5, 2) + yield a.reshape(4, 5, order='F') + yield a.reshape(2, 5, 2)[::-1] + + for a in a_variations(): + params = (a > 0, 0, 1) + check_ok(params) + + params = (a < 0, np.nan, 1 + 4j) + check_ok(params) + + params = (a > 1, True, False) + check_ok(params) + + def test_np_where_3_broadcast_x_or_y_scalar(self): + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + def check_ok(args): + condition, x, y = args + + expected = pyfunc(condition, x, y) + got = cfunc(condition, x, y) + self.assertPreciseEqual(got, expected) + + # swap x and y + expected = pyfunc(condition, y, x) + got = cfunc(condition, y, x) + self.assertPreciseEqual(got, expected) + + def array_permutations(): + x = np.arange(9).reshape(3, 3) + yield x + yield x * 1.1 + yield np.asfortranarray(x) + yield x[::-1] + yield np.linspace(-10, 10, 60).reshape(3, 4, 5) * 1j + + def scalar_permutations(): + yield 0 + yield 4.3 + yield np.nan + yield True + yield 8 + 4j + + for x in array_permutations(): + for y in scalar_permutations(): + x_mean = np.mean(x) + condition = x > x_mean + params = (condition, x, y) + check_ok(params) + + def test_np_where_numpy_basic(self): + # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8670-L8694 + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + # skipping unsupported dtypes: + # np.longdouble, np.clongdouble + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128] + for dt in dts: + c = np.ones(53, dtype=bool) + np.testing.assert_equal(cfunc( c, dt(0), dt(1)), dt(0)) + np.testing.assert_equal(cfunc(~c, dt(0), dt(1)), dt(1)) + np.testing.assert_equal(cfunc(True, dt(0), dt(1)), dt(0)) + np.testing.assert_equal(cfunc(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + np.testing.assert_equal(cfunc(c, e, e), e) + np.testing.assert_equal(cfunc(c, d, e), r) + np.testing.assert_equal(cfunc(c, d, e[0]), r) + np.testing.assert_equal(cfunc(c, d[0], e), r) + np.testing.assert_equal(cfunc(c[::2], d[::2], e[::2]), r[::2]) + np.testing.assert_equal(cfunc(c[1::2], d[1::2], e[1::2]), r[1::2]) + np.testing.assert_equal(cfunc(c[::3], d[::3], e[::3]), r[::3]) + np.testing.assert_equal(cfunc(c[1::3], d[1::3], e[1::3]), r[1::3]) + np.testing.assert_equal(cfunc(c[::-2], d[::-2], e[::-2]), r[::-2]) + np.testing.assert_equal(cfunc(c[::-3], d[::-3], e[::-3]), r[::-3]) + np.testing.assert_equal(cfunc(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + def test_np_where_numpy_ndim(self): + # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8737-L8749 + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = cfunc(np.array(c)[:,np.newaxis], a, b) + np.testing.assert_array_equal(r[0], a[0]) + np.testing.assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = cfunc(c, a, b) + np.testing.assert_array_equal(r[:,0], a[:,0]) + np.testing.assert_array_equal(r[:,1], b[:,0]) + + def test_np_where_numpy_dtype_mix(self): + # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8751-L8773 + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + np.testing.assert_equal(cfunc(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + np.testing.assert_equal(cfunc(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + np.testing.assert_equal(cfunc(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + np.testing.assert_equal(cfunc(c, b, a), r) + + def test_np_where_numpy_test_error(self): + # https://github.com/numpy/numpy/blob/fe2bb380fd9a084b622ff3f00cb6f245e8c1a10e/numpy/core/tests/test_multiarray.py#L8794-L8799 + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + + self.disable_leak_check() + with self.assertRaisesRegex(ValueError, "objects cannot be broadcast"): + cfunc(c, a, b) + + with self.assertRaisesRegex(ValueError, "objects cannot be broadcast"): + cfunc(c[0], a, b) + + def test_np_where_invalid_inputs(self): + pyfunc = np_where_3 + cfunc = jit(nopython=True)(pyfunc) + + msg = 'The argument "condition" must be array-like' + with self.assertRaisesRegex(TypingError, msg): + cfunc(None, 2, 3) + + msg = 'The argument "x" must be array-like if provided' + with self.assertRaisesRegex(TypingError, msg): + cfunc(1, 'hello', 3) + + msg = 'The argument "y" must be array-like if provided' + with self.assertRaisesRegex(TypingError, msg): + cfunc(1, 2, 'world') + + # None values are not yet supported in np.where + msg = 'Argument "x" or "y" cannot be None' + with self.assertRaisesRegex(TypingError, msg): + cfunc(1, None, None) + + def test_arange_1_arg(self): + + all_pyfuncs = ( + np_arange_1, + lambda x: np.arange(x, 10), + lambda x: np.arange(7, step=max(1, abs(x))) + ) + + for pyfunc in all_pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + + def check_ok(arg0): + expected = pyfunc(arg0) + got = cfunc(arg0) + np.testing.assert_allclose(expected, got) + + check_ok(0) + check_ok(1) + check_ok(4) + check_ok(5.5) + check_ok(-3) + check_ok(complex(4, 4)) + check_ok(np.int8(0)) + + def test_arange_2_arg(self): + def check_ok(arg0, arg1, pyfunc, cfunc): + expected = pyfunc(arg0, arg1) + got = cfunc(arg0, arg1) + np.testing.assert_allclose(expected, got) + + all_pyfuncs = ( + np_arange_2, + np_arange_start_stop, + np_arange_1_stop, + np_arange_1_step, + lambda x, y: np.arange(x, y, 5), + lambda x, y: np.arange(2, y, step=x), + ) + + for pyfunc in all_pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + + check_ok(-1, 5, pyfunc, cfunc) + check_ok(-8, -1, pyfunc, cfunc) + check_ok(4, 0.5, pyfunc, cfunc) + check_ok(0.5, 4, pyfunc, cfunc) + check_ok(3, None, pyfunc, cfunc) + if numpy_version < (2, 0): + check_ok(complex(1, 1), complex(4, 4), pyfunc, cfunc) + check_ok(complex(4, 4), complex(1, 1), pyfunc, cfunc) + + pyfunc = np_arange_1_dtype + cfunc = jit(nopython=True)(pyfunc) + + check_ok(5, np.float32, pyfunc, cfunc) + check_ok(2.0, np.int32, pyfunc, cfunc) + check_ok(7, None, pyfunc, cfunc) + check_ok(np.int8(0), None, pyfunc, cfunc) + + if numpy_version < (2, 0): + check_ok(10, np.complex128, pyfunc, cfunc) + check_ok(np.complex64(10), np.complex128, pyfunc, cfunc) + + def test_arange_3_arg(self): + windows64 = sys.platform.startswith('win32') and sys.maxsize > 2 ** 32 + + def check_ok(arg0, arg1, arg2, pyfunc, cfunc, check_dtype=False): + expected = pyfunc(arg0, arg1, arg2) + got = cfunc(arg0, arg1, arg2) + np.testing.assert_allclose(expected, got) + # windows 64 cannot differentiate between a python int and a + # np.int64 which means the result from numba is int64 more often + # than in NumPy. + if not windows64: + self.assertEqual(expected.dtype, got.dtype) + + for pyfunc in (np_arange_3, np_arange_2_step, np_arange_start_stop_step): + cfunc = jit(nopython=True)(pyfunc) + + check_ok(0, 5, 1, pyfunc, cfunc) + check_ok(-8, -1, 3, pyfunc, cfunc) + check_ok(0, -10, -2, pyfunc, cfunc) + check_ok(0.5, 4, 2, pyfunc, cfunc) + check_ok(0, 1, 0.1, pyfunc, cfunc) + check_ok(3, 6, None, pyfunc, cfunc) + check_ok(3, None, None, pyfunc, cfunc) + check_ok(np.int8(0), np.int8(5), np.int8(1), pyfunc, cfunc) + check_ok(np.int8(0), np.int16(5), np.int32(1), pyfunc, cfunc) + # check upcasting logic, this matters most on windows + i8 = np.int8 + check_ok(i8(0), i8(5), i8(1), pyfunc, cfunc, True) # C int + check_ok(np.int64(0), i8(5), i8(1), pyfunc, cfunc, True) # int64 + if numpy_version < (2, 0): + check_ok(0, complex(4, 4), complex(1, 1), pyfunc, cfunc) + + pyfunc = np_arange_2_dtype + cfunc = jit(nopython=True)(pyfunc) + + check_ok(1, 5, np.float32, pyfunc, cfunc) + check_ok(2.0, 8, np.int32, pyfunc, cfunc) + check_ok(1, 7, None, pyfunc, cfunc) + check_ok(np.int8(0), np.int32(5), None, pyfunc, cfunc, True) + if numpy_version < (2, 0): + check_ok(-2, 10, np.complex128, pyfunc, cfunc) + check_ok(3, np.complex64(10), np.complex128, pyfunc, cfunc) + + def test_arange_4_arg(self): + for pyfunc in (np_arange_4, np_arange_start_stop_step_dtype): + cfunc = jit(nopython=True)(pyfunc) + + def check_ok(arg0, arg1, arg2, arg3): + expected = pyfunc(arg0, arg1, arg2, arg3) + got = cfunc(arg0, arg1, arg2, arg3) + np.testing.assert_allclose(expected, got) + + check_ok(0, 5, 1, np.float64) + check_ok(-8, -1, 3, np.int32) + check_ok(0, -10, -2, np.float32) + check_ok(0.5, 4, 2, None) + check_ok(3, 6, None, None) + check_ok(3, None, None, None) + if numpy_version < (2, 0): + check_ok(0, 1, 0.1, np.complex128) + check_ok(0, complex(4, 4), complex(1, 1), np.complex128) + + def test_arange_throws(self): + # Exceptions leak references + self.disable_leak_check() + + bad_funcs_1 = [ + lambda x: np.arange(stop=x), + lambda x: np.arange(step=x), + lambda x: np.arange(dtype=x), + ] + bad_funcs_2 = [ + lambda x, y: np.arange(stop=x, step=y), + lambda x, y: np.arange(stop=x, dtype=y), + ] + + for pyfunc in bad_funcs_1: + with self.assertRaises(TypingError) as raises: + cfunc = jit(nopython=True)(pyfunc) + cfunc(2) + for pyfunc in bad_funcs_2: + with self.assertRaises(TypingError) as raises: + cfunc = jit(nopython=True)(pyfunc) + cfunc(2, 6) + + # check step size = 0, this is nonsense + pyfunc = np_arange_3 + cfunc = jit(nopython=True)(pyfunc) + for f in (pyfunc, cfunc,): + for inputs in [(1, np.int16(2), 0), (1, 2, 0)]: + # there's a different error depending on whether any of the + # input values are np scalars + permitted_errors = (ZeroDivisionError, ValueError) + with self.assertRaises(permitted_errors) as raises: + # this will raise RuntimeWarning's about zero division + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + f(*inputs) + self.assertIn("Maximum allowed size exceeded", + str(raises.exception)) + + def test_arange_accuracy(self): + # Checking arange reasonably replicates NumPy's algorithm + # see https://github.com/numba/numba/issues/6768 + @jit(nopython=True) + def foo(step): + return np.arange(0, 1 + step, step) + + x = 0.010101010101010102 + self.assertPreciseEqual(foo(x), foo.py_func(x)) + + def test_item(self): + pyfunc = array_item + cfunc = jit(nopython=True)(pyfunc) + + def check_ok(arg): + expected = pyfunc(arg) + got = cfunc(arg) + self.assertPreciseEqual(got, expected) + + def check_err(arg): + with self.assertRaises(ValueError) as raises: + cfunc(arg) + self.assertIn("item(): can only convert an array of size 1 to a Python scalar", + str(raises.exception)) + + # Exceptions leak references + self.disable_leak_check() + + # Test on different kinds of scalars and 1-item arrays + check_ok(np.float32([1.5])) + check_ok(np.complex128([[1.5j]])) + check_ok(np.array(1.5)) + check_ok(np.bool_(True)) + check_ok(np.float32(1.5)) + + check_err(np.array([1, 2])) + check_err(np.array([])) + + @skip_if_numpy_2 + def test_itemset(self): + pyfunc = array_itemset + cfunc = jit(nopython=True)(pyfunc) + + def check_ok(a, v): + expected = a.copy() + got = a.copy() + pyfunc(expected, v) + cfunc(got, v) + self.assertPreciseEqual(got, expected) + + def check_err(a): + with self.assertRaises(ValueError) as raises: + cfunc(a, 42) + self.assertIn("itemset(): can only write to an array of size 1", + str(raises.exception)) + + # Exceptions leak references + self.disable_leak_check() + + # Test on different kinds of 1-item arrays + check_ok(np.float32([1.5]), 42) + check_ok(np.complex128([[1.5j]]), 42) + check_ok(np.array(1.5), 42) + + check_err(np.array([1, 2])) + check_err(np.array([])) + + def test_sum(self): + """ test sum over a whole range of dtypes, no axis or dtype parameter + """ + pyfunc = array_sum + cfunc = jit(nopython=True)(pyfunc) + all_dtypes = [np.float64, np.float32, np.int64, np.int32, + np.complex64, np.complex128, np.timedelta64] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype), + np.ones((7, 3), arr_dtype) * -5] + for arr_dtype in all_dtypes] + + unsigned_dtypes = [np.uint32, np.uint64, np.bool_] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype)] + for arr_dtype in unsigned_dtypes] + + for arr_list in all_test_arrays: + for arr in arr_list: + with self.subTest("Test np.sum with {} input ".format(arr.dtype)): + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + + def test_sum_axis_kws1(self): + """ test sum with axis parameter over a whole range of dtypes """ + pyfunc = array_sum_axis_kws + cfunc = jit(nopython=True)(pyfunc) + all_dtypes = [np.float64, np.float32, np.int64, np.complex64, + np.complex128, TIMEDELTA_M] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype), + np.ones((7, 3), arr_dtype) * -5] + for arr_dtype in all_dtypes] + + unsigned_dtypes = [np.uint64, np.bool_] + all_test_arrays += [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype)] + for arr_dtype in unsigned_dtypes] + + for arr_list in all_test_arrays: + for arr in arr_list: + for axis in (0, 1, 2): + if axis > len(arr.shape)-1: + continue + with self.subTest("Testing np.sum(axis) with {} " + "input ".format(arr.dtype)): + self.assertPreciseEqual(pyfunc(arr, axis=axis), + cfunc(arr, axis=axis)) + + def test_sum_axis_kws2(self): + """ testing uint32 and int32 separately + + uint32 and int32 must be tested separately because Numpy's current + behaviour is different in 64bits Windows (accumulates as int32) + and 64bits Linux (accumulates as int64), while Numba has decided to always + accumulate as int64, when the OS is 64bits. No testing has been done + for behaviours in 32 bits platforms. + """ + pyfunc = array_sum_axis_kws + cfunc = jit(nopython=True)(pyfunc) + all_dtypes = [np.int32] + # expected return dtypes in Numba + out_dtypes = {np.dtype('int32'): np.int64, np.dtype('uint32'): np.uint64, + np.dtype('int64'): np.int64, + np.dtype(TIMEDELTA_M): np.dtype(TIMEDELTA_M)} + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype), + np.ones((7, 3), arr_dtype) * -5] + for arr_dtype in all_dtypes] + + unsigned_dtypes = [np.uint32] + all_test_arrays += [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype)] + for arr_dtype in unsigned_dtypes] + + for arr_list in all_test_arrays: + for arr in arr_list: + for axis in (0, 1, 2): + if axis > len(arr.shape)-1: + continue + with self.subTest("Testing np.sum(axis) with {} " + "input ".format(arr.dtype)): + npy_res = pyfunc(arr, axis=axis) + numba_res = cfunc(arr, axis=axis) + if isinstance(numba_res, np.ndarray): + self.assertPreciseEqual( + npy_res.astype(out_dtypes[arr.dtype]), + numba_res.astype(out_dtypes[arr.dtype])) + else: + # the results are scalars + self.assertEqual(npy_res, numba_res) + + def test_sum_dtype_kws(self): + """ test sum with dtype parameter over a whole range of dtypes """ + pyfunc = array_sum_dtype_kws + cfunc = jit(nopython=True)(pyfunc) + all_dtypes = [np.float64, np.float32, np.int64, np.int32, + np.complex64, np.complex128] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype), + np.ones((7, 3), arr_dtype) * -5] + for arr_dtype in all_dtypes] + + unsigned_dtypes = [np.uint32, np.uint64, np.bool_] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype)] + for arr_dtype in unsigned_dtypes] + + out_dtypes = {np.dtype('float64'): [np.float64], + np.dtype('float32'): [np.float64, np.float32], + np.dtype('int64'): [np.float64, np.int64, np.float32], + np.dtype('int32'): [np.float64, np.int64, np.float32, np.int32], + np.dtype('uint32'): [np.float64, np.int64, np.float32], + np.dtype('uint64'): [np.float64, np.int64], + np.dtype('bool'): [np.float64, np.int64, np.float32, np.int32, np.bool_], + np.dtype('complex64'): [np.complex64, np.complex128], + np.dtype('complex128'): [np.complex128]} + + for arr_list in all_test_arrays: + for arr in arr_list: + for out_dtype in out_dtypes[arr.dtype]: + subtest_str = ("Testing np.sum with {} input and {} output" + .format(arr.dtype, out_dtype)) + with self.subTest(subtest_str): + self.assertPreciseEqual(pyfunc(arr, dtype=out_dtype), + cfunc(arr, dtype=out_dtype)) + + def test_sum_axis_dtype_kws(self): + """ test sum with axis and dtype parameters over a whole range of dtypes """ + pyfunc = array_sum_axis_dtype_kws + cfunc = jit(nopython=True)(pyfunc) + all_dtypes = [np.float64, np.float32, np.int64, np.int32, + np.complex64, np.complex128] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype), + np.ones((7, 3), arr_dtype) * -5] + for arr_dtype in all_dtypes] + + unsigned_dtypes = [np.uint32, np.uint64, np.bool_] + all_test_arrays = [ + [np.ones((7, 6, 5, 4, 3), arr_dtype), + np.ones(1, arr_dtype)] + for arr_dtype in unsigned_dtypes] + + out_dtypes = {np.dtype('float64'): [np.float64], + np.dtype('float32'): [np.float64, np.float32], + np.dtype('int64'): [np.float64, np.int64, np.float32], + np.dtype('int32'): [np.float64, np.int64, np.float32, np.int32], + np.dtype('uint32'): [np.float64, np.int64, np.float32], + np.dtype('uint64'): [np.float64, np.uint64], + np.dtype('bool'): [np.float64, np.int64, np.float32, np.int32, np.bool_], + np.dtype('complex64'): [np.complex64, np.complex128], + np.dtype('complex128'): [np.complex128]} + + for arr_list in all_test_arrays: + for arr in arr_list: + for out_dtype in out_dtypes[arr.dtype]: + for axis in (0, 1, 2): + if axis > len(arr.shape) - 1: + continue + subtest_str = ("Testing np.sum with {} input and {} output " + .format(arr.dtype, out_dtype)) + with self.subTest(subtest_str): + py_res = pyfunc(arr, axis=axis, dtype=out_dtype) + nb_res = cfunc(arr, axis=axis, dtype=out_dtype) + self.assertPreciseEqual(py_res, nb_res) + + def test_sum_axis_dtype_pos_arg(self): + """ testing that axis and dtype inputs work when passed as positional """ + pyfunc = array_sum_axis_dtype_pos + cfunc = jit(nopython=True)(pyfunc) + dtype = np.float64 + # OK + a = np.ones((7, 6, 5, 4, 3)) + self.assertPreciseEqual(pyfunc(a, 1, dtype), + cfunc(a, 1, dtype)) + + self.assertPreciseEqual(pyfunc(a, 2, dtype), + cfunc(a, 2, dtype)) + + def test_sum_1d_kws(self): + # check 1d reduces to scalar + pyfunc = array_sum_axis_kws + cfunc = jit(nopython=True)(pyfunc) + a = np.arange(10.) + self.assertPreciseEqual(pyfunc(a, axis=0), cfunc(a, axis=0)) + pyfunc = array_sum_const_axis_neg_one + cfunc = jit(nopython=True)(pyfunc) + a = np.arange(10.) + self.assertPreciseEqual(pyfunc(a, axis=-1), cfunc(a, axis=-1)) + + def test_sum_const(self): + pyfunc = array_sum_const_multi + cfunc = jit(nopython=True)(pyfunc) + + arr = np.ones((3, 4, 5, 6, 7, 8)) + axis = 1 + self.assertPreciseEqual(pyfunc(arr, axis), cfunc(arr, axis)) + axis = 2 + self.assertPreciseEqual(pyfunc(arr, axis), cfunc(arr, axis)) + + def test_sum_exceptions(self): + # Exceptions leak references + self.disable_leak_check() + pyfunc = array_sum + cfunc = jit(nopython=True)(pyfunc) + + a = np.ones((7, 6, 5, 4, 3)) + b = np.ones((4, 3)) + # BAD: axis > dimensions + with self.assertRaises(ValueError): + cfunc(b, 2) + # BAD: negative axis + with self.assertRaises(ValueError): + cfunc(a, -1) + # BAD: axis greater than 3 + with self.assertRaises(ValueError): + cfunc(a, 4) + + def test_sum_const_negative(self): + # Exceptions leak references + self.disable_leak_check() + + @jit(nopython=True) + def foo(arr): + return arr.sum(axis=-3) + + # ndim == 4, axis == -3, OK + a = np.ones((1, 2, 3, 4)) + self.assertPreciseEqual(foo(a), foo.py_func(a)) + # ndim == 3, axis == -3, OK + a = np.ones((1, 2, 3)) + self.assertPreciseEqual(foo(a), foo.py_func(a)) + # ndim == 2, axis == -3, BAD + a = np.ones((1, 2)) + with self.assertRaises(NumbaValueError) as raises: + foo(a) + errmsg = "'axis' entry (-1) is out of bounds" + self.assertIn(errmsg, str(raises.exception)) + with self.assertRaises(ValueError) as raises: + foo.py_func(a) + self.assertIn("out of bounds", str(raises.exception)) + + def test_cumsum(self): + pyfunc = array_cumsum + cfunc = jit(nopython=True)(pyfunc) + # OK + a = np.ones((2, 3)) + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + # BAD: with axis + with self.assertRaises(TypingError): + cfunc(a, 1) + # BAD: with kw axis + pyfunc = array_cumsum_kws + cfunc = jit(nopython=True)(pyfunc) + with self.assertRaises(TypingError): + cfunc(a, axis=1) + + def test_take(self): + pyfunc = array_take + cfunc = jit(nopython=True)(pyfunc) + + def check(arr, ind): + expected = pyfunc(arr, ind) + got = cfunc(arr, ind) + self.assertPreciseEqual(expected, got) + if hasattr(expected, 'order'): + self.assertEqual(expected.order == got.order) + + # need to check: + # 1. scalar index + # 2. 1d array index + # 3. nd array index, >2d and F order + # 4. reflected list + # 5. tuples + + test_indices = [] + test_indices.append(1) + test_indices.append(5) + test_indices.append(11) + test_indices.append(-2) + test_indices.append(np.array([1, 5, 1, 11, 3])) + test_indices.append(np.array([[1, 5, 1], [11, 3, 0]], order='F')) + test_indices.append(np.array([[[1, 5, 1], [11, 3, 0]]])) + test_indices.append(np.array([[[[1, 5]], [[11, 0]], [[1, 2]]]])) + test_indices.append([1, 5, 1, 11, 3]) + test_indices.append((1, 5, 1)) + test_indices.append(((1, 5, 1), (11, 3, 2))) + test_indices.append((((1,), (5,), (1,)), ((11,), (3,), (2,)))) + + layouts = cycle(['C', 'F', 'A']) + + for dt in [np.float64, np.int64, np.complex128]: + A = np.arange(12, dtype=dt).reshape((4, 3), order=next(layouts)) + for ind in test_indices: + check(A, ind) + + #check illegal access raises + A = np.arange(12, dtype=dt).reshape((4, 3), order=next(layouts)) + szA = A.size + illegal_indices = [szA, -szA - 1, np.array(szA), np.array(-szA - 1), + [szA], [-szA - 1]] + for x in illegal_indices: + with self.assertRaises(IndexError): + cfunc(A, x) # oob raises + + # check float indexing raises + with self.assertRaises(TypingError): + cfunc(A, [1.7]) + + #exceptions leak refs + self.disable_leak_check() + + def test_fill(self): + pyfunc = array_fill + cfunc = jit(nopython=True)(pyfunc) + def check(arr, val): + expected = np.copy(arr) + erv = pyfunc(expected, val) + self.assertTrue(erv is None) + got = np.copy(arr) + grv = cfunc(got, val) + self.assertTrue(grv is None) + # check mutation is the same + self.assertPreciseEqual(expected, got) + + # scalar + A = np.arange(1) + for x in [np.float64, np.bool_]: + check(A, x(10)) + + # 2d + A = np.arange(12).reshape(3, 4) + for x in [np.float64, np.bool_]: + check(A, x(10)) + + # 4d + A = np.arange(48, dtype=np.complex64).reshape(2, 3, 4, 2) + for x in [np.float64, np.complex128, np.bool_]: + check(A, x(10)) + + def test_real(self): + pyfunc = array_real + cfunc = jit(nopython=True)(pyfunc) + + x = np.linspace(-10, 10) + np.testing.assert_equal(pyfunc(x), cfunc(x)) + + x, y = np.meshgrid(x, x) + z = x + 1j*y + np.testing.assert_equal(pyfunc(z), cfunc(z)) + + def test_imag(self): + pyfunc = array_imag + cfunc = jit(nopython=True)(pyfunc) + + x = np.linspace(-10, 10) + np.testing.assert_equal(pyfunc(x), cfunc(x)) + + x, y = np.meshgrid(x, x) + z = x + 1j*y + np.testing.assert_equal(pyfunc(z), cfunc(z)) + + def _lower_clip_result_test_util(self, func, a, a_min, a_max): + # verifies that type-inference is working on the return value + # this used to trigger issue #3489 + def lower_clip_result(a): + return np.expm1(func(a, a_min, a_max)) + + np.testing.assert_almost_equal( + lower_clip_result(a), + jit(nopython=True)(lower_clip_result)(a)) + + def test_clip(self): + has_out = (np_clip, np_clip_kwargs, array_clip, array_clip_kwargs) + has_no_out = (np_clip_no_out, array_clip_no_out) + # TODO: scalars are not tested (issue #3469) + for a in (np.linspace(-10, 10, 101), + np.linspace(-10, 10, 40).reshape(5, 2, 4)): + for pyfunc in has_out + has_no_out: + cfunc = jit(nopython=True)(pyfunc) + + msg = "array_clip: must set either max or min" + with self.assertRaisesRegex(ValueError, msg): + cfunc(a, None, None) + + np.testing.assert_equal(pyfunc(a, 0, None), cfunc(a, 0, None)) + np.testing.assert_equal(pyfunc(a, None, 0), cfunc(a, None, 0)) + + np.testing.assert_equal(pyfunc(a, -5, 5), cfunc(a, -5, 5)) + + if pyfunc in has_out: + pyout = np.empty_like(a) + cout = np.empty_like(a) + np.testing.assert_equal(pyfunc(a, -5, 5, pyout), + cfunc(a, -5, 5, cout)) + np.testing.assert_equal(pyout, cout) + + self._lower_clip_result_test_util(cfunc, a, -5, 5) + + def test_clip_array_min_max(self): + has_out = (np_clip, np_clip_kwargs, array_clip, array_clip_kwargs) + has_no_out = (np_clip_no_out, array_clip_no_out) + # TODO: scalars are not tested (issue #3469) + a = np.linspace(-10, 10, 40).reshape(5, 2, 4) + a_min_arr = np.arange(-8, 0).astype(a.dtype).reshape(2, 4) + a_max_arr = np.arange(0, 8).astype(a.dtype).reshape(2, 4) + mins = [0, -5, a_min_arr, None] + maxs = [0, 5, a_max_arr, None] + for pyfunc in has_out + has_no_out: + cfunc = jit(nopython=True)(pyfunc) + + for a_min in mins: + for a_max in maxs: + + if a_min is None and a_max is None: + msg = "array_clip: must set either max or min" + with self.assertRaisesRegex(ValueError, msg): + cfunc(a, None, None) + continue + + np.testing.assert_equal(pyfunc(a, a_min, a_max), cfunc(a, a_min, a_max)) + + if pyfunc in has_out: + pyout = np.empty_like(a) + cout = np.empty_like(a) + np.testing.assert_equal(pyfunc(a, a_min, a_max, pyout), + cfunc(a, a_min, a_max, cout)) + np.testing.assert_equal(pyout, cout) + + self._lower_clip_result_test_util(cfunc, a, a_min, a_max) + + def test_clip_bad_array(self): + cfunc = jit(nopython=True)(np_clip) + msg = '.*The argument "a" must be array-like.*' + with self.assertRaisesRegex(TypingError, msg): + cfunc(None, 0, 10) + + def test_clip_bad_min(self): + cfunc = jit(nopython=True)(np_clip) + msg = '.*The argument "a_min" must be a number.*' + with self.assertRaisesRegex(TypingError, msg): + cfunc(1, 'a', 10) + + def test_clip_bad_max(self): + cfunc = jit(nopython=True)(np_clip) + msg = '.*The argument "a_max" must be a number.*' + with self.assertRaisesRegex(TypingError, msg): + cfunc(1, 1, 'b') + + def test_clip_bad_out(self): + cfunc = jit(nopython=True)(np_clip) + msg = '.*The argument "out" must be an array if it is provided.*' + with self.assertRaisesRegex(TypingError, msg): + cfunc(5, 1, 10, out=6) + + def test_clip_no_broadcast(self): + self.disable_leak_check() + cfunc = jit(nopython=True)(np_clip) + msg = ".*shape mismatch: objects cannot be broadcast to a single shape.*" + a = np.linspace(-10, 10, 40).reshape(5, 2, 4) + a_min_arr = np.arange(-5, 0).astype(a.dtype).reshape(5, 1) + a_max_arr = np.arange(0, 5).astype(a.dtype).reshape(5, 1) + min_max = [(0, a_max_arr), (-5, a_max_arr), + (a_min_arr, a_max_arr), + (a_min_arr, 0), (a_min_arr, 5)] + for a_min, a_max in min_max: + with self.assertRaisesRegex(ValueError, msg): + cfunc(a, a_min, a_max) + + def test_conj(self): + for pyfunc in [array_conj, array_conjugate]: + cfunc = jit(nopython=True)(pyfunc) + + x = np.linspace(-10, 10) + np.testing.assert_equal(pyfunc(x), cfunc(x)) + + x, y = np.meshgrid(x, x) + z = x + 1j*y + np.testing.assert_equal(pyfunc(z), cfunc(z)) + + def test_unique(self): + pyfunc = np_unique + cfunc = jit(nopython=True)(pyfunc) + + def check(a): + np.testing.assert_equal(pyfunc(a), cfunc(a)) + + check(np.array([[1, 1, 3], [3, 4, 5]])) + check(np.array(np.zeros(5))) + check(np.array([[3.1, 3.1], [1.7, 2.29], [3.3, 1.7]])) + check(np.array([])) + + @needs_blas + def test_array_dot(self): + # just ensure that the dot impl dispatches correctly, do + # not test dot itself, this is done in test_linalg. + pyfunc = array_dot + cfunc = jit(nopython=True)(pyfunc) + a = np.arange(20.).reshape(4, 5) + b = np.arange(5.) + np.testing.assert_equal(pyfunc(a, b), cfunc(a, b)) + + # check that chaining works + pyfunc = array_dot_chain + cfunc = jit(nopython=True)(pyfunc) + a = np.arange(16.).reshape(4, 4) + np.testing.assert_equal(pyfunc(a, a), cfunc(a, a)) + + def test_array_ctor_with_dtype_arg(self): + # Test using np.dtype and np.generic (i.e. np.dtype.type) has args + pyfunc = array_ctor + cfunc = jit(nopython=True)(pyfunc) + n = 2 + args = n, np.int32 + np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) + args = n, np.dtype('int32') + np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) + args = n, np.float32 + np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) + args = n, np.dtype('f4') + np.testing.assert_array_equal(pyfunc(*args), cfunc(*args)) + +class TestArrayComparisons(TestCase): + + def test_identity(self): + def check(a, b, expected): + cfunc = njit((typeof(a), typeof(b)))(pyfunc) + self.assertPreciseEqual(cfunc(a, b), + (expected, not expected)) + + pyfunc = identity_usecase + + arr = np.zeros(10, dtype=np.int32).reshape((2, 5)) + check(arr, arr, True) + check(arr, arr[:], True) + check(arr, arr.copy(), False) + check(arr, arr.view('uint32'), False) + check(arr, arr.T, False) + check(arr, arr[:-1], False) + + # Other comparison operators ('==', etc.) are tested in test_ufuncs + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_reductions.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..e25fb43ce241e06c97e16e22a9fd8a51b74575bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_reductions.py @@ -0,0 +1,1201 @@ +from itertools import product, combinations_with_replacement + +import numpy as np + +from numba import jit, njit, typeof +from numba.np.numpy_support import numpy_version +from numba.tests.support import TestCase, MemoryLeakMixin, tag, skip_if_numpy_2 +import unittest + + +def array_all(arr): + return arr.all() + +def array_all_global(arr): + return np.all(arr) + +def array_any(arr): + return arr.any() + +def array_any_global(arr): + return np.any(arr) + +def array_cumprod(arr): + return arr.cumprod() + +def array_cumprod_global(arr): + return np.cumprod(arr) + +def array_nancumprod(arr): + return np.nancumprod(arr) + +def array_cumsum(arr): + return arr.cumsum() + +def array_cumsum_global(arr): + return np.cumsum(arr) + +def array_nancumsum(arr): + return np.nancumsum(arr) + +def array_sum(arr): + return arr.sum() + +def array_sum_global(arr): + return np.sum(arr) + +def array_prod(arr): + return arr.prod() + +def array_prod_global(arr): + return np.prod(arr) + +def array_mean(arr): + return arr.mean() + +def array_mean_global(arr): + return np.mean(arr) + +def array_var(arr): + return arr.var() + +def array_var_global(arr): + return np.var(arr) + +def array_std(arr): + return arr.std() + +def array_std_global(arr): + return np.std(arr) + +def array_min(arr): + return arr.min() + +def array_min_global(arr): + return np.min(arr) + +def array_amin(arr): + return np.amin(arr) + +def array_max(arr): + return arr.max() + +def array_max_global(arr): + return np.max(arr) + +def array_amax(arr): + return np.amax(arr) + +def array_argmin(arr): + return arr.argmin() + +def array_argmin_global(arr): + return np.argmin(arr) + +def array_argmax(arr): + return arr.argmax() + +def array_argmax_global(arr): + return np.argmax(arr) + +def array_median_global(arr): + return np.median(arr) + +def array_nanmin(arr): + return np.nanmin(arr) + +def array_nanmax(arr): + return np.nanmax(arr) + +def array_nanmean(arr): + return np.nanmean(arr) + +def array_nansum(arr): + return np.nansum(arr) + +def array_nanprod(arr): + return np.nanprod(arr) + +def array_nanstd(arr): + return np.nanstd(arr) + +def array_nanvar(arr): + return np.nanvar(arr) + +def array_nanmedian_global(arr): + return np.nanmedian(arr) + +def array_percentile_global(arr, q): + return np.percentile(arr, q) + +def array_nanpercentile_global(arr, q): + return np.nanpercentile(arr, q) + +def array_ptp_global(a): + return np.ptp(a) + +def array_ptp(a): + return a.ptp() + +def array_quantile_global(arr, q): + return np.quantile(arr, q) + +def array_nanquantile_global(arr, q): + return np.nanquantile(arr, q) + +def base_test_arrays(dtype): + if dtype == np.bool_: + def factory(n): + assert n % 2 == 0 + return np.bool_([0, 1] * (n // 2)) + else: + def factory(n): + return np.arange(n, dtype=dtype) + 1 + + a1 = factory(10) + a2 = factory(10).reshape(2, 5) + # The prod() of this array fits in a 32-bit int + a3 = (factory(12))[::-1].reshape((2, 3, 2), order='A') + assert not (a3.flags.c_contiguous or a3.flags.f_contiguous) + + return [a1, a2, a3] + +def full_test_arrays(dtype): + array_list = base_test_arrays(dtype) + + # Add floats with some mantissa + if dtype == np.float32: + array_list += [a / 10 for a in array_list] + + # add imaginary part + if dtype == np.complex64: + acc = [] + for a in array_list: + tmp = a / 10 + 1j * a / 11 + tmp[::2] = np.conj(tmp[::2]) + acc.append(tmp) + array_list.extend(acc) + + for a in array_list: + assert a.dtype == np.dtype(dtype) + return array_list + +def run_comparative(compare_func, test_array): + cfunc = njit(compare_func) + numpy_result = compare_func(test_array) + numba_result = cfunc(test_array) + + return numpy_result, numba_result + + +class TestArrayReductions(MemoryLeakMixin, TestCase): + """ + Test array reduction methods and functions such as .sum(), .max(), etc. + """ + + def setUp(self): + super(TestArrayReductions, self).setUp() + np.random.seed(42) + + def check_reduction_basic(self, pyfunc, **kwargs): + # Basic reduction checks on 1-d float64 arrays + cfunc = jit(nopython=True)(pyfunc) + def check(arr): + self.assertPreciseEqual(pyfunc(arr), cfunc(arr), **kwargs) + + arr = np.float64([1.0, 2.0, 0.0, -0.0, 1.0, -1.5]) + check(arr) + arr = np.float64([-0.0, -1.5]) + check(arr) + arr = np.float64([-1.5, 2.5, 'inf']) + check(arr) + arr = np.float64([-1.5, 2.5, '-inf']) + check(arr) + arr = np.float64([-1.5, 2.5, 'inf', '-inf']) + check(arr) + arr = np.float64(['nan', -1.5, 2.5, 'nan', 3.0]) + check(arr) + arr = np.float64(['nan', -1.5, 2.5, 'nan', 'inf', '-inf', 3.0]) + check(arr) + arr = np.float64([5.0, 'nan', -1.5, 'nan']) + check(arr) + # Only NaNs + arr = np.float64(['nan', 'nan']) + check(arr) + + def test_all_basic(self, pyfunc=array_all): + cfunc = jit(nopython=True)(pyfunc) + def check(arr): + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + + arr = np.float64([1.0, 0.0, float('inf'), float('nan')]) + check(arr) + arr[1] = -0.0 + check(arr) + arr[1] = 1.5 + check(arr) + arr = arr.reshape((2, 2)) + check(arr) + check(arr[::-1]) + + def test_any_basic(self, pyfunc=array_any): + cfunc = jit(nopython=True)(pyfunc) + def check(arr): + self.assertPreciseEqual(pyfunc(arr), cfunc(arr)) + + arr = np.float64([0.0, -0.0, 0.0, 0.0]) + check(arr) + arr[2] = float('nan') + check(arr) + arr[2] = float('inf') + check(arr) + arr[2] = 1.5 + check(arr) + arr = arr.reshape((2, 2)) + check(arr) + check(arr[::-1]) + + def test_sum_basic(self): + self.check_reduction_basic(array_sum) + + def test_mean_basic(self): + self.check_reduction_basic(array_mean) + + def test_var_basic(self): + self.check_reduction_basic(array_var, prec='double') + + def test_std_basic(self): + self.check_reduction_basic(array_std) + + def test_min_basic(self): + self.check_reduction_basic(array_min) + + def test_max_basic(self): + self.check_reduction_basic(array_max) + + def test_argmin_basic(self): + self.check_reduction_basic(array_argmin) + + def test_argmax_basic(self): + self.check_reduction_basic(array_argmax) + + def test_nanmin_basic(self): + self.check_reduction_basic(array_nanmin) + + def test_nanmax_basic(self): + self.check_reduction_basic(array_nanmax) + + def test_nanmean_basic(self): + self.check_reduction_basic(array_nanmean) + + def test_nansum_basic(self): + self.check_reduction_basic(array_nansum) + + def test_nanprod_basic(self): + self.check_reduction_basic(array_nanprod) + + def test_nanstd_basic(self): + self.check_reduction_basic(array_nanstd) + + def test_nanvar_basic(self): + self.check_reduction_basic(array_nanvar, prec='double') + + def check_median_basic(self, pyfunc, array_variations): + cfunc = jit(nopython=True)(pyfunc) + def check(arr): + expected = pyfunc(arr) + got = cfunc(arr) + self.assertPreciseEqual(got, expected) + + # Odd sizes + def check_odd(a): + check(a) + a = a.reshape((9, 7)) + check(a) + check(a.T) + for a in array_variations(np.arange(63) + 10.5): + check_odd(a) + + # Even sizes + def check_even(a): + check(a) + a = a.reshape((4, 16)) + check(a) + check(a.T) + for a in array_variations(np.arange(64) + 10.5): + check_even(a) + + @staticmethod + def _array_variations(a): + # Sorted, reversed, random, many duplicates, many NaNs, all NaNs + yield a + a = a[::-1].copy() + yield a + np.random.shuffle(a) + yield a + a[a % 4 >= 1] = 3.5 + yield a + a[a % 4 >= 2] = np.nan + yield a + a[:] = np.nan + yield a + + def test_median_basic(self): + pyfunc = array_median_global + + def variations(a): + # Sorted, reversed, random, many duplicates + yield a + a = a[::-1].copy() + yield a + np.random.shuffle(a) + yield a + a[a % 4 >= 1] = 3.5 + yield a + + self.check_median_basic(pyfunc, variations) + + def check_percentile_and_quantile(self, pyfunc, q_upper_bound): + cfunc = jit(nopython=True)(pyfunc) + + def check(a, q, abs_tol=1e-12): + expected = pyfunc(a, q) + got = cfunc(a, q) + # NOTE: inf/nan is not checked, seems to be susceptible to upstream + # changes + finite = np.isfinite(expected) + if np.all(finite): + self.assertPreciseEqual(got, expected, abs_tol=abs_tol) + else: + self.assertPreciseEqual(got[finite], expected[finite], + abs_tol=abs_tol) + + a = self.random.randn(27).reshape(3, 3, 3) + q = np.linspace(0, q_upper_bound, 14)[::-1] + check(a, q) + check(a, 0) + check(a, q_upper_bound / 2) + check(a, q_upper_bound) + + not_finite = [np.nan, -np.inf, np.inf] + a.flat[:10] = self.random.choice(not_finite, 10) + self.random.shuffle(a) + self.random.shuffle(q) + check(a, q) + + a = a.flatten().tolist() + q = q.flatten().tolist() + check(a, q) + check(tuple(a), tuple(q)) + + a = self.random.choice([1, 2, 3, 4], 10) + q = np.linspace(0, q_upper_bound, 5) + check(a, q) + + # tests inspired by + # https://github.com/numpy/numpy/blob/345b2f6e/numpy/lib/tests/test_function_base.py + x = np.arange(8) * 0.5 + np.testing.assert_equal(cfunc(x, 0), 0.) + np.testing.assert_equal(cfunc(x, q_upper_bound), 3.5) + np.testing.assert_equal(cfunc(x, q_upper_bound / 2), 1.75) + + x = np.arange(12).reshape(3, 4) + q = np.array((0.25, 0.5, 1.0)) * q_upper_bound + np.testing.assert_equal(cfunc(x, q), [2.75, 5.5, 11.0]) + + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + q = np.array((0.25, 0.50)) * q_upper_bound + np.testing.assert_equal(cfunc(x, q).shape, (2,)) + + q = np.array((0.25, 0.50, 0.75)) * q_upper_bound + np.testing.assert_equal(cfunc(x, q).shape, (3,)) + + x = np.arange(12).reshape(3, 4) + np.testing.assert_equal(cfunc(x, q_upper_bound / 2), 5.5) + self.assertTrue(np.isscalar(cfunc(x, q_upper_bound / 2))) + + np.testing.assert_equal(cfunc([1, 2, 3], 0), 1) + + a = np.array([2, 3, 4, 1]) + cfunc(a, [q_upper_bound / 2]) + np.testing.assert_equal(a, np.array([2, 3, 4, 1])) + + def check_percentile_edge_cases(self, pyfunc, q_upper_bound=100): + cfunc = jit(nopython=True)(pyfunc) + + def check(a, q, abs_tol=1e-14): + expected = pyfunc(a, q) + got = cfunc(a, q) + # NOTE: inf/nan is not checked, seems to be susceptible to upstream + # changes + finite = np.isfinite(expected) + if np.all(finite): + self.assertPreciseEqual(got, expected, abs_tol=abs_tol) + else: + self.assertPreciseEqual(got[finite], expected[finite], + abs_tol=abs_tol) + + def convert_to_float_and_check(a, q, abs_tol=1e-14): + expected = pyfunc(a, q).astype(np.float64) + got = cfunc(a, q) + self.assertPreciseEqual(got, expected, abs_tol=abs_tol) + + def _array_combinations(elements): + for i in range(1, 10): + for comb in combinations_with_replacement(elements, i): + yield np.array(comb) + + # high number of combinations, many including non-finite values + q = (0, 0.1 * q_upper_bound, 0.2 * q_upper_bound, q_upper_bound) + element_pool = (1, -1, np.nan, np.inf, -np.inf) + for a in _array_combinations(element_pool): + check(a, q) + + # edge cases - numpy exhibits behavioural differences across + # platforms, see: https://github.com/numpy/numpy/issues/13272 + if q_upper_bound == 1: + _check = convert_to_float_and_check + else: + _check = check + + a = np.array(5) + q = np.array(1) + _check(a, q) + + a = 5 + q = q_upper_bound / 2 + _check(a, q) + + def check_percentile_exceptions(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + def check_err(a, q): + with self.assertRaises(ValueError) as raises: + cfunc(a, q) + self.assertEqual( + "Percentiles must be in the range [0, 100]", + str(raises.exception) + ) + + # Exceptions leak references + self.disable_leak_check() + + a = np.arange(5) + check_err(a, -5) # q less than 0 + check_err(a, (1, 10, 105)) # q contains value greater than 100 + check_err(a, (1, 10, np.nan)) # q contains nan + + with self.assertTypingError() as e: + a = np.arange(5) * 1j + q = 0.1 + cfunc(a, q) + + self.assertIn('Not supported for complex dtype', str(e.exception)) + + def check_quantile_exceptions(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + def check_err(a, q): + with self.assertRaises(ValueError) as raises: + cfunc(a, q) + self.assertEqual( + "Quantiles must be in the range [0, 1]", + str(raises.exception) + ) + + # Exceptions leak references + self.disable_leak_check() + + a = np.arange(5) + check_err(a, -0.5) # q less than 0 + check_err(a, (0.1, 0.10, 1.05)) # q contains value greater than 1 + check_err(a, (0.1, 0.10, np.nan)) # q contains nan + + with self.assertTypingError() as e: + a = np.arange(5) * 1j + q = 0.1 + cfunc(a, q) + + self.assertIn('Not supported for complex dtype', str(e.exception)) + + def test_percentile_basic(self): + pyfunc = array_percentile_global + self.check_percentile_and_quantile(pyfunc, q_upper_bound=100) + self.check_percentile_edge_cases(pyfunc, q_upper_bound=100) + self.check_percentile_exceptions(pyfunc) + + def test_nanpercentile_basic(self): + pyfunc = array_nanpercentile_global + self.check_percentile_and_quantile(pyfunc, q_upper_bound=100) + self.check_percentile_edge_cases(pyfunc, q_upper_bound=100) + self.check_percentile_exceptions(pyfunc) + + def test_quantile_basic(self): + pyfunc = array_quantile_global + self.check_percentile_and_quantile(pyfunc, q_upper_bound=1) + self.check_percentile_edge_cases(pyfunc, q_upper_bound=1) + self.check_quantile_exceptions(pyfunc) + + def test_nanquantile_basic(self): + pyfunc = array_nanquantile_global + self.check_percentile_and_quantile(pyfunc, q_upper_bound=1) + self.check_percentile_edge_cases(pyfunc, q_upper_bound=1) + self.check_quantile_exceptions(pyfunc) + + def test_nanmedian_basic(self): + pyfunc = array_nanmedian_global + self.check_median_basic(pyfunc, self._array_variations) + + def test_array_sum_global(self): + arr = np.arange(10, dtype=np.int32) + arrty = typeof(arr) + self.assertEqual(arrty.ndim, 1) + self.assertEqual(arrty.layout, 'C') + + cfunc = njit((arrty,),)(array_sum_global) + self.assertEqual(np.sum(arr), cfunc(arr)) + + def test_array_prod_int_1d(self): + arr = np.arange(10, dtype=np.int32) + 1 + arrty = typeof(arr) + self.assertEqual(arrty.ndim, 1) + self.assertEqual(arrty.layout, 'C') + + cfunc = njit((arrty,))(array_prod) + self.assertEqual(arr.prod(), cfunc(arr)) + + def test_array_prod_float_1d(self): + arr = np.arange(10, dtype=np.float32) + 1 / 10 + arrty = typeof(arr) + self.assertEqual(arrty.ndim, 1) + self.assertEqual(arrty.layout, 'C') + + cfunc = njit((arrty,))(array_prod) + np.testing.assert_allclose(arr.prod(), cfunc(arr)) + + def test_array_prod_global(self): + arr = np.arange(10, dtype=np.int32) + arrty = typeof(arr) + self.assertEqual(arrty.ndim, 1) + self.assertEqual(arrty.layout, 'C') + + cfunc = njit((arrty,))(array_prod_global) + np.testing.assert_allclose(np.prod(arr), cfunc(arr)) + + def check_cumulative(self, pyfunc): + arr = np.arange(2, 10, dtype=np.int16) + expected, got = run_comparative(pyfunc, arr) + self.assertPreciseEqual(got, expected) + arr = np.linspace(2, 8, 6) + expected, got = run_comparative(pyfunc, arr) + self.assertPreciseEqual(got, expected) + arr = arr.reshape((3, 2)) + expected, got = run_comparative(pyfunc, arr) + self.assertPreciseEqual(got, expected) + + def test_array_cumsum(self): + self.check_cumulative(array_cumsum) + + def test_array_cumsum_global(self): + self.check_cumulative(array_cumsum_global) + + def test_array_cumprod(self): + self.check_cumulative(array_cumprod) + + def test_array_cumprod_global(self): + self.check_cumulative(array_cumprod_global) + + def check_aggregation_magnitude(self, pyfunc, is_prod=False): + """ + Check that integer overflows are avoided (issue #931). + """ + # Overflows are avoided here (ints are cast either to intp + # or float64). + n_items = 2 if is_prod else 10 # avoid overflow on prod() + arr = (np.arange(n_items) + 40000).astype('int16') + npr, nbr = run_comparative(pyfunc, arr) + self.assertPreciseEqual(npr, nbr) + # Overflows are avoided for functions returning floats here. + # Other functions may wrap around. + arr = (np.arange(10) + 2**60).astype('int64') + npr, nbr = run_comparative(pyfunc, arr) + self.assertPreciseEqual(npr, nbr) + arr = arr.astype('uint64') + npr, nbr = run_comparative(pyfunc, arr) + self.assertPreciseEqual(npr, nbr) + + def test_sum_magnitude(self): + self.check_aggregation_magnitude(array_sum) + self.check_aggregation_magnitude(array_sum_global) + + def test_cumsum_magnitude(self): + self.check_aggregation_magnitude(array_cumsum) + self.check_aggregation_magnitude(array_cumsum_global) + + def test_nancumsum_magnitude(self): + self.check_aggregation_magnitude(array_nancumsum, is_prod=True) + + def test_prod_magnitude(self): + self.check_aggregation_magnitude(array_prod, is_prod=True) + self.check_aggregation_magnitude(array_prod_global, is_prod=True) + + def test_cumprod_magnitude(self): + self.check_aggregation_magnitude(array_cumprod, is_prod=True) + self.check_aggregation_magnitude(array_cumprod_global, is_prod=True) + + def test_nancumprod_magnitude(self): + self.check_aggregation_magnitude(array_nancumprod, is_prod=True) + + def test_mean_magnitude(self): + self.check_aggregation_magnitude(array_mean) + self.check_aggregation_magnitude(array_mean_global) + + def test_var_magnitude(self): + self.check_aggregation_magnitude(array_var) + self.check_aggregation_magnitude(array_var_global) + + def test_std_magnitude(self): + self.check_aggregation_magnitude(array_std) + self.check_aggregation_magnitude(array_std_global) + + def _do_check_nptimedelta(self, pyfunc, arr): + arrty = typeof(arr) + cfunc = jit(nopython=True)(pyfunc) + + self.assertPreciseEqual(cfunc(arr), pyfunc(arr)) + # Even vs. odd size, for np.median + self.assertPreciseEqual(cfunc(arr[:-1]), pyfunc(arr[:-1])) + # Test with different orders, for np.median + arr = arr[::-1].copy() # Keep 'C' layout + self.assertPreciseEqual(cfunc(arr), pyfunc(arr)) + np.random.shuffle(arr) + self.assertPreciseEqual(cfunc(arr), pyfunc(arr)) + # Test with a NaT + if 'median' not in pyfunc.__name__: + # Test with (val, NaT)^N (and with the random NaT from above) + # use a loop, there's some weird thing/bug with arr[1::2] = 'NaT' + + # Further Numba has bug(s) relating to NaN/NaT handling in anything + # using a partition such as np.median + for x in range(1, len(arr), 2): + arr[x] = 'NaT' + self.assertPreciseEqual(cfunc(arr), pyfunc(arr)) + # Test with all NaTs + arr.fill(arrty.dtype('NaT')) + self.assertPreciseEqual(cfunc(arr), pyfunc(arr)) + + def check_npdatetime(self, pyfunc): + arr = np.arange(10).astype(dtype='M8[Y]') + self._do_check_nptimedelta(pyfunc, arr) + + def check_nptimedelta(self, pyfunc): + arr = np.arange(10).astype(dtype='m8[s]') + self._do_check_nptimedelta(pyfunc, arr) + + def test_min_npdatetime(self): + self.check_npdatetime(array_min) + self.check_nptimedelta(array_min) + + def test_max_npdatetime(self): + self.check_npdatetime(array_max) + self.check_nptimedelta(array_max) + + def test_argmin_npdatetime(self): + self.check_npdatetime(array_argmin) + self.check_nptimedelta(array_argmin) + + def test_argmax_npdatetime(self): + self.check_npdatetime(array_argmax) + self.check_nptimedelta(array_argmax) + + def test_median_npdatetime(self): + self.check_nptimedelta(array_median_global) + + def test_sum_npdatetime(self): + self.check_nptimedelta(array_sum) + + def test_cumsum_npdatetime(self): + self.check_nptimedelta(array_cumsum) + + def test_mean_npdatetime(self): + self.check_nptimedelta(array_mean) + + def check_nan_cumulative(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + def check(a): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def _set_some_values_to_nan(a): + p = a.size // 2 # set approx half elements to NaN + np.put(a, np.random.choice(range(a.size), p, replace=False), np.nan) + return a + + def a_variations(): + yield np.linspace(-1, 3, 60).reshape(3, 4, 5) + yield np.array([np.inf, 3, 4]) + yield np.array([True, True, True, False]) + yield np.arange(1, 10) + yield np.asfortranarray(np.arange(1, 64) - 33.3) + yield np.arange(1, 10, dtype=np.float32)[::-1] + + for a in a_variations(): + check(a) # no nans + check(_set_some_values_to_nan(a.astype(np.float64))) # about 50% nans + + # edge cases + check(np.array([])) + check(np.full(10, np.nan)) + + parts = np.array([np.nan, 2, np.nan, 4, 5, 6, 7, 8, 9]) + + a = parts + 1j * parts[::-1] + a = a.reshape(3, 3) + check(a) + + def test_nancumprod_basic(self): + self.check_cumulative(array_nancumprod) + self.check_nan_cumulative(array_nancumprod) + + def test_nancumsum_basic(self): + self.check_cumulative(array_nancumsum) + self.check_nan_cumulative(array_nancumsum) + + def test_ptp_basic(self): + pyfunc = array_ptp_global + cfunc = jit(nopython=True)(pyfunc) + + def check(a): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def a_variations(): + yield np.arange(10) + yield np.array([-1.1, np.nan, 2.2]) + yield np.array([-np.inf, 5]) + yield (4, 2, 5) + yield (1,) + yield np.full(5, 5) + yield [2.2, -2.3, 0.1] + a = np.linspace(-10, 10, 16).reshape(4, 2, 2) + yield a + yield np.asfortranarray(a) + yield a[::-1] + np.random.RandomState(0).shuffle(a) + yield a + yield 6 + yield 6.5 + yield -np.inf + yield 1 + 4j + yield [2.2, np.nan] + yield [2.2, np.inf] + yield ((4.1, 2.0, -7.6), (4.3, 2.7, 5.2)) + yield np.full(5, np.nan) + yield 1 + np.nan * 1j + yield np.nan + np.nan * 1j + yield np.nan + + for a in a_variations(): + check(a) + + @skip_if_numpy_2 + def test_ptp_method(self): + # checks wiring of np.ndarray.ptp() only, `np.ptp` test above checks + # the actual alg + pyfunc = array_ptp + cfunc = jit(nopython=True)(pyfunc) + + a = np.arange(10) + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def test_ptp_complex(self): + pyfunc = array_ptp_global + cfunc = jit(nopython=True)(pyfunc) + + def check(a): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def make_array(real_nan=False, imag_nan=False): + real = np.linspace(-4, 4, 25) + if real_nan: + real[4:9] = np.nan + imag = np.linspace(-5, 5, 25) + if imag_nan: + imag[7:12] = np.nan + return (real + 1j * imag).reshape(5, 5) + + for real_nan, imag_nan in product([True, False], repeat=2): + comp = make_array(real_nan, imag_nan) + check(comp) + + real = np.ones(8) + imag = np.arange(-4, 4) + comp = real + 1j * imag + check(comp) + comp = real - 1j * imag + check(comp) + + comp = np.full((4, 4), fill_value=(1 - 1j)) + check(comp) + + def test_ptp_exceptions(self): + pyfunc = array_ptp_global + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc(np.array((True, True, False))) + + msg = "Boolean dtype is unsupported (as per NumPy)" + self.assertIn(msg, str(e.exception)) + + with self.assertRaises(ValueError) as e: + cfunc(np.array([])) + + msg = "zero-size array reduction not possible" + self.assertIn(msg, str(e.exception)) + + def test_min_max_complex_basic(self): + pyfuncs = array_min_global, array_max_global + + for pyfunc in pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + + def check(a): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + real = np.linspace(-10, 10, 40) + real[:4] = real[-1] + imag = real * 2 + a = real - imag * 1j + check(a) + + for _ in range(10): + self.random.shuffle(real) + self.random.shuffle(imag) + dtype = self.random.choice([np.complex64, np.complex128]) + a = real - imag * 1j + a[:4] = a[-1] + check(a.astype(dtype)) + + def test_nanmin_nanmax_complex_basic(self): + pyfuncs = array_nanmin, array_nanmax + + for pyfunc in pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + + def check(a): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + real = np.linspace(-10, 10, 40) + real[:4] = real[-1] + real[5:9] = np.nan + imag = real * 2 + imag[7:12] = np.nan + a = real - imag * 1j + check(a) + + for _ in range(10): + self.random.shuffle(real) + self.random.shuffle(imag) + a = real - imag * 1j + a[:4] = a[-1] + check(a) + + def test_nanmin_nanmax_non_array_inputs(self): + pyfuncs = array_nanmin, array_nanmax + + def check(a): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def a_variations(): + yield [1, 6, 4, 2] + yield ((-10, 4, -12), (5, 200, -30)) + yield np.array(3) + yield (2,) + yield 3.142 + yield False + yield (np.nan, 3.142, -5.2, 3.0) + yield [np.inf, np.nan, -np.inf] + yield [(np.nan, 1.1), (-4.4, 8.7)] + + for pyfunc in pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + + for a in a_variations(): + check(a) + + def test_argmax_axis_1d_2d_4d(self): + arr1d = np.array([0, 20, 3, 4]) + arr2d = np.arange(6).reshape(2, 3) + arr2d[0,1] += 100 + + arr4d = np.arange(120).reshape(2, 3, 4, 5) + 10 + arr4d[0, 1, 1, 2] += 100 + arr4d[1, 0, 0, 0] -= 51 + + for arr in [arr1d, arr2d, arr4d]: + axes = list(range(arr.ndim)) + [ + -(i+1) for i in range(arr.ndim) + ] + py_functions = [ + lambda a, _axis=axis: np.argmax(a, axis=_axis) + for axis in axes + ] + c_functions = [ + jit(nopython=True)(pyfunc) for pyfunc in py_functions + ] + for cfunc in c_functions: + self.assertPreciseEqual(cfunc.py_func(arr), cfunc(arr)) + + def test_argmax_axis_out_of_range(self): + arr1d = np.arange(6) + arr2d = np.arange(6).reshape(2, 3) + + @jit(nopython=True) + def jitargmax(arr, axis): + return np.argmax(arr, axis) + + def assert_raises(arr, axis): + with self.assertRaisesRegex(ValueError, "axis.*out of bounds"): + jitargmax.py_func(arr, axis) + with self.assertRaisesRegex(ValueError, "axis.*out of bounds"): + jitargmax(arr, axis) + + assert_raises(arr1d, 1) + assert_raises(arr1d, -2) + assert_raises(arr2d, -3) + assert_raises(arr2d, 2) + # Exceptions leak references + self.disable_leak_check() + + def test_argmax_axis_must_be_integer(self): + arr = np.arange(6) + + @jit(nopython=True) + def jitargmax(arr, axis): + return np.argmax(arr, axis) + + with self.assertTypingError() as e: + jitargmax(arr, "foo") + self.assertIn("axis must be an integer", str(e.exception)) + + def test_argmax_method_axis(self): + arr2d = np.arange(6).reshape(2, 3) + + def argmax(arr): + return arr2d.argmax(axis=0) + + self.assertPreciseEqual(argmax(arr2d), + jit(nopython=True)(argmax)(arr2d)) + + def test_argmax_return_type(self): + # See issue #7853, return type should be intp not based on input type + arr2d = np.arange(6, dtype=np.uint8).reshape(2, 3) + + def argmax(arr): + return arr2d.argmax(axis=0) + + self.assertPreciseEqual(argmax(arr2d), + jit(nopython=True)(argmax)(arr2d)) + + def test_argmin_axis_1d_2d_4d(self): + arr1d = np.array([0, 20, 3, 4]) + arr2d = np.arange(6).reshape(2, 3) + arr2d[0,1] += 100 + + arr4d = np.arange(120).reshape(2, 3, 4, 5) + 10 + arr4d[0, 1, 1, 2] += 100 + arr4d[1, 0, 0, 0] -= 51 + + for arr in [arr1d, arr2d, arr4d]: + axes = list(range(arr.ndim)) + [ + -(i+1) for i in range(arr.ndim) + ] + py_functions = [ + lambda a, _axis=axis: np.argmin(a, axis=_axis) + for axis in axes + ] + c_functions = [ + jit(nopython=True)(pyfunc) for pyfunc in py_functions + ] + for cfunc in c_functions: + self.assertPreciseEqual(cfunc.py_func(arr), cfunc(arr)) + + def test_argmin_axis_out_of_range(self): + arr1d = np.arange(6) + arr2d = np.arange(6).reshape(2, 3) + + @jit(nopython=True) + def jitargmin(arr, axis): + return np.argmin(arr, axis) + + def assert_raises(arr, axis): + with self.assertRaisesRegex(ValueError, "axis.*out of bounds"): + jitargmin.py_func(arr, axis) + with self.assertRaisesRegex(ValueError, "axis.*out of bounds"): + jitargmin(arr, axis) + + assert_raises(arr1d, 1) + assert_raises(arr1d, -2) + assert_raises(arr2d, -3) + assert_raises(arr2d, 2) + + # Exceptions leak references + self.disable_leak_check() + + def test_argmin_axis_must_be_integer(self): + arr = np.arange(6) + + @jit(nopython=True) + def jitargmin(arr, axis): + return np.argmin(arr, axis) + + with self.assertTypingError() as e: + jitargmin(arr, "foo") + self.assertIn("axis must be an integer", str(e.exception)) + + def test_argmin_method_axis(self): + arr2d = np.arange(6).reshape(2, 3) + + def argmin(arr): + return arr2d.argmin(axis=0) + + self.assertPreciseEqual(argmin(arr2d), + jit(nopython=True)(argmin)(arr2d)) + + def test_argmin_return_type(self): + # See issue #7853, return type should be intp not based on input type + arr2d = np.arange(6, dtype=np.uint8).reshape(2, 3) + + def argmin(arr): + return arr2d.argmin(axis=0) + + self.assertPreciseEqual(argmin(arr2d), + jit(nopython=True)(argmin)(arr2d)) + + @classmethod + def install_generated_tests(cls): + # These form a testing product where each of the combinations are tested + + # these function are tested in real and complex space + reduction_funcs = [array_sum, array_sum_global, + array_prod, array_prod_global, + array_mean, array_mean_global, + array_var, array_var_global, + array_std, array_std_global, + array_all, array_all_global, + array_any, array_any_global, + array_min, array_min_global, + array_amax, array_amin, + array_max, array_max_global, + array_nanmax, array_nanmin, + array_nansum, + ] + + # these functions only work in real space as no complex comparison + # operator is implemented + reduction_funcs_rspace = [array_argmin, array_argmin_global, + array_argmax, array_argmax_global] + + reduction_funcs += [array_nanmean, array_nanstd, array_nanvar] + reduction_funcs += [array_nanprod] + + dtypes_to_test = [np.int32, np.float32, np.bool_, np.complex64] + + def install_tests(dtypes, funcs): + # Install tests on class + for dt in dtypes: + test_arrays = full_test_arrays(dt) + for red_func, test_array in product(funcs, test_arrays): + # Create the name for the test function + test_name = "test_{0}_{1}_{2}d" + test_name = test_name.format(red_func.__name__, + test_array.dtype.name, + test_array.ndim) + + def new_test_function(self, redFunc=red_func, + testArray=test_array, + testName=test_name): + ulps = 1 + if 'prod' in red_func.__name__ and \ + np.iscomplexobj(testArray): + # prod family accumulate slightly more error on + # some architectures (power, 32bit) for complex input + ulps = 3 + npr, nbr = run_comparative(redFunc, testArray) + self.assertPreciseEqual(npr, nbr, msg=testName, + prec="single", ulps=ulps) + + # Install it into the class + setattr(cls, test_name, new_test_function) + + # install tests for reduction functions that only work in real space + install_tests(dtypes_to_test[:-1], reduction_funcs_rspace) + + # install tests for reduction functions + install_tests(dtypes_to_test, reduction_funcs) + + +TestArrayReductions.install_generated_tests() + + +class TestArrayReductionsExceptions(MemoryLeakMixin, TestCase): + + # int64, size 0 + zero_size = np.arange(0) + + def check_exception(self, pyfunc, msg): + cfunc = jit(nopython=True)(pyfunc) + # make sure NumPy raises consistently/no behaviour change + with self.assertRaises(BaseException): + pyfunc(self.zero_size) + # check numba impl raises expected + with self.assertRaises(ValueError) as e: + cfunc(self.zero_size) + self.assertIn(msg, str(e.exception)) + + @classmethod + def install(cls): + + fn_to_msg = dict() + empty_seq = "attempt to get {0} of an empty sequence" + op_no_ident = ("zero-size array to reduction operation " + "{0}") + for x in [array_argmax, array_argmax_global, array_argmin, + array_argmin_global]: + fn_to_msg[x] = empty_seq + for x in [array_max, array_max, array_min, array_min]: + fn_to_msg[x] = op_no_ident + + name_template = "test_zero_size_array_{0}" + for fn, msg in fn_to_msg.items(): + test_name = name_template.format(fn.__name__) + + lmsg = msg.format(fn.__name__) + lmsg = lmsg.replace('array_','').replace('_global','') + def test_fn(self, func=fn, message=lmsg): + self.check_exception(func, message) + + setattr(cls, test_name, test_fn) + +TestArrayReductionsExceptions.install() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_array_return.py b/venv/lib/python3.10/site-packages/numba/tests/test_array_return.py new file mode 100644 index 0000000000000000000000000000000000000000..bab7160404ca27a0160880268610f84f66a71a9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_array_return.py @@ -0,0 +1,38 @@ +import numpy as np + +from numba import typeof, njit +from numba.tests.support import MemoryLeakMixin +import unittest + + +def array_return(a, i): + a[i] = 123 + return a + + +def array_return_start_with_loop(a): + for i in range(a.size): + a[i] += 1 + return a + + +class TestArrayReturn(MemoryLeakMixin, unittest.TestCase): + def test_array_return(self): + a = np.arange(10) + i = 2 + at, it = typeof(a), typeof(i) + cfunc = njit((at, it))(array_return) + self.assertIs(a, cfunc(a, i)) + + def test_array_return_start_with_loop(self): + """ + A bug breaks array return if the function starts with a loop + """ + a = np.arange(10) + at = typeof(a) + cfunc = njit((at,))(array_return_start_with_loop) + self.assertIs(a, cfunc(a)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_asnumbatype.py b/venv/lib/python3.10/site-packages/numba/tests/test_asnumbatype.py new file mode 100644 index 0000000000000000000000000000000000000000..8684874608ee3c4ec0eb1fcbe0e6a16ce09cee5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_asnumbatype.py @@ -0,0 +1,170 @@ +""" +Tests for the as_numba_type() machinery. +""" +import typing as py_typing + + +import unittest + +from numba.core import types +from numba.core.errors import TypingError +from numba.core.typing.typeof import typeof +from numba.core.typing.asnumbatype import as_numba_type, AsNumbaTypeRegistry +from numba.experimental.jitclass import jitclass +from numba.tests.support import TestCase + + +class TestAsNumbaType(TestCase): + + int_nb_type = typeof(0) + float_nb_type = typeof(0.0) + complex_nb_type = typeof(complex(0)) + str_nb_type = typeof("numba") + bool_nb_type = typeof(True) + none_nb_type = typeof(None) + + def test_simple_types(self): + self.assertEqual(as_numba_type(int), self.int_nb_type) + self.assertEqual(as_numba_type(float), self.float_nb_type) + self.assertEqual(as_numba_type(complex), self.complex_nb_type) + self.assertEqual(as_numba_type(str), self.str_nb_type) + self.assertEqual(as_numba_type(bool), self.bool_nb_type) + self.assertEqual(as_numba_type(type(None)), self.none_nb_type) + + def test_numba_types(self): + numba_types = [ + types.intp, + types.boolean, + types.ListType(types.float64), + types.DictType( + types.intp, types.Tuple([types.float32, types.float32]) + ), + ] + + for ty in numba_types: + self.assertEqual(as_numba_type(ty), ty) + + def test_single_containers(self): + self.assertEqual( + as_numba_type(py_typing.List[float]), + types.ListType(self.float_nb_type), + ) + self.assertEqual( + as_numba_type(py_typing.Dict[float, str]), + types.DictType(self.float_nb_type, self.str_nb_type), + ) + self.assertEqual( + as_numba_type(py_typing.Set[complex]), + types.Set(self.complex_nb_type), + ) + self.assertEqual( + as_numba_type(py_typing.Tuple[float, float]), + types.Tuple([self.float_nb_type, self.float_nb_type]), + ) + self.assertEqual( + as_numba_type(py_typing.Tuple[float, complex]), + types.Tuple([self.float_nb_type, self.complex_nb_type]), + ) + + def test_optional(self): + self.assertEqual( + as_numba_type(py_typing.Optional[float]), + types.Optional(self.float_nb_type), + ) + self.assertEqual( + as_numba_type(py_typing.Union[str, None]), + types.Optional(self.str_nb_type), + ) + self.assertEqual( + as_numba_type(py_typing.Union[None, bool]), + types.Optional(self.bool_nb_type), + ) + + # Optional[x] is a special case of Union[x, None]. We raise a + # TypingError if the right type is not NoneType. + with self.assertRaises(TypingError) as raises: + as_numba_type(py_typing.Union[int, float]) + self.assertIn("Cannot type Union that is not an Optional", + str(raises.exception)) + + def test_nested_containers(self): + IntList = py_typing.List[int] + self.assertEqual( + as_numba_type(py_typing.List[IntList]), + types.ListType(types.ListType(self.int_nb_type)), + ) + self.assertEqual( + as_numba_type(py_typing.List[py_typing.Dict[float, bool]]), + types.ListType( + types.DictType(self.float_nb_type, self.bool_nb_type) + ), + ) + self.assertEqual( + as_numba_type( + py_typing.Set[py_typing.Tuple[py_typing.Optional[int], float]]), + types.Set(types.Tuple( + [types.Optional(self.int_nb_type), self.float_nb_type])), + ) + + def test_jitclass_registers(self): + + @jitclass + class MyInt: + x: int + + def __init__(self, value): + self.x = value + + self.assertEqual(as_numba_type(MyInt), MyInt.class_type.instance_type) + + def test_type_alias(self): + Pair = py_typing.Tuple[int, int] + ListOfPairs = py_typing.List[Pair] + + pair_nb_type = types.Tuple((self.int_nb_type, self.int_nb_type)) + self.assertEqual(as_numba_type(Pair), pair_nb_type) + self.assertEqual( + as_numba_type(ListOfPairs), types.ListType(pair_nb_type) + ) + + def test_overwrite_type(self): + as_numba_type = AsNumbaTypeRegistry() + self.assertEqual(as_numba_type(float), self.float_nb_type) + as_numba_type.register(float, types.float32) + self.assertEqual(as_numba_type(float), types.float32) + self.assertNotEqual(as_numba_type(float), self.float_nb_type) + + def test_any_throws(self): + Any = py_typing.Any + + any_types = [ + py_typing.Optional[Any], + py_typing.List[Any], + py_typing.Set[Any], + py_typing.Dict[float, Any], + py_typing.Dict[Any, float], + py_typing.Tuple[int, Any], + ] + + for bad_py_type in any_types: + with self.assertRaises(TypingError) as raises: + as_numba_type(bad_py_type) + self.assertIn( + "Cannot infer numba type of python type", + str(raises.exception), + ) + + def test_bad_union_throws(self): + bad_unions = [ + py_typing.Union[str, int], + py_typing.Union[int, type(None), py_typing.Tuple[bool, bool]], + ] + + for bad_py_type in bad_unions: + with self.assertRaises(TypingError) as raises: + as_numba_type(bad_py_type) + self.assertIn("Cannot type Union", str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_auto_constants.py b/venv/lib/python3.10/site-packages/numba/tests/test_auto_constants.py new file mode 100644 index 0000000000000000000000000000000000000000..e58c8a1947be711b32e6369002e992ca66d839f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_auto_constants.py @@ -0,0 +1,38 @@ +import math +import sys + +import numpy as np + +from numba import njit +import numba.tests.usecases as uc +import unittest + + +class TestAutoConstants(unittest.TestCase): + def test_numpy_nan(self): + + @njit + def f(): + return np.nan + + self.assertTrue(math.isnan(f())) + self.assertTrue(math.isnan(f.py_func())) + + def test_sys_constant(self): + + @njit + def f(): + return sys.hexversion + + self.assertEqual(f(), f.py_func()) + + def test_module_string_constant(self): + + @njit + def f(): + return uc._GLOBAL_STR + self.assertEqual(f(), f.py_func()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_blackscholes.py b/venv/lib/python3.10/site-packages/numba/tests/test_blackscholes.py new file mode 100644 index 0000000000000000000000000000000000000000..d12846117ff793d4af3fd833187849747bf44663 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_blackscholes.py @@ -0,0 +1,125 @@ +import math + +import numpy as np + +import unittest +from numba import njit +from numba.extending import register_jitable +from numba.tests.support import TestCase + + +RISKFREE = 0.02 +VOLATILITY = 0.30 + + +A1 = 0.31938153 +A2 = -0.356563782 +A3 = 1.781477937 +A4 = -1.821255978 +A5 = 1.330274429 +RSQRT2PI = 0.39894228040143267793994605993438 + + +@register_jitable +def cnd_array(d): + K = 1.0 / (1.0 + 0.2316419 * np.abs(d)) + ret_val = (RSQRT2PI * np.exp(-0.5 * d * d) * + (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))))) + return np.where(d > 0, 1.0 - ret_val, ret_val) + + +@register_jitable +def cnd(d): + K = 1.0 / (1.0 + 0.2316419 * math.fabs(d)) + ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) * + (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))))) + if d > 0: + ret_val = 1.0 - ret_val + return ret_val + + +@njit +def blackscholes_arrayexpr(stockPrice, optionStrike, optionYears, Riskfree, + Volatility): + S = stockPrice + X = optionStrike + T = optionYears + R = Riskfree + V = Volatility + sqrtT = np.sqrt(T) + d1 = (np.log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT) + d2 = d1 - V * sqrtT + cndd1 = cnd_array(d1) + cndd2 = cnd_array(d2) + + expRT = np.exp(- R * T) + + callResult = (S * cndd1 - X * expRT * cndd2) + putResult = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1)) + return callResult, putResult + + +@njit +def blackscholes_scalar(callResult, putResult, stockPrice, optionStrike, + optionYears, Riskfree, Volatility): + S = stockPrice + X = optionStrike + T = optionYears + R = Riskfree + V = Volatility + for i in range(len(S)): + sqrtT = math.sqrt(T[i]) + d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT) + d2 = d1 - V * sqrtT + cndd1 = cnd(d1) + cndd2 = cnd(d2) + + expRT = math.exp((-1. * R) * T[i]) + callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2) + putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1)) + + +def randfloat(rand_var, low, high): + return (1.0 - rand_var) * low + rand_var * high + + +class TestBlackScholes(TestCase): + def test_array_expr(self): + OPT_N = 400 + + stockPrice = randfloat(self.random.random_sample(OPT_N), 5.0, 30.0) + optionStrike = randfloat(self.random.random_sample(OPT_N), 1.0, 100.0) + optionYears = randfloat(self.random.random_sample(OPT_N), 0.25, 10.0) + + args = stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY + + callResultGold, putResultGold = blackscholes_arrayexpr.py_func(*args) + callResultNumba, putResultNumba = blackscholes_arrayexpr(*args) + + delta = np.abs(callResultGold - callResultNumba) + self.assertAlmostEqual(delta.max(), 0) + + def test_scalar(self): + OPT_N = 400 + + callResultGold = np.zeros(OPT_N) + putResultGold = np.zeros(OPT_N) + + callResultNumba = np.zeros(OPT_N) + putResultNumba = np.zeros(OPT_N) + + stockPrice = randfloat(self.random.random_sample(OPT_N), 5.0, 30.0) + optionStrike = randfloat(self.random.random_sample(OPT_N), 1.0, 100.0) + optionYears = randfloat(self.random.random_sample(OPT_N), 0.25, 10.0) + + args = stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY + + blackscholes_scalar.py_func(callResultGold, putResultGold, *args) + blackscholes_scalar(callResultNumba, putResultNumba, *args) + + delta = np.abs(callResultGold - callResultNumba) + self.assertAlmostEqual(delta.max(), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_boundscheck.py b/venv/lib/python3.10/site-packages/numba/tests/test_boundscheck.py new file mode 100644 index 0000000000000000000000000000000000000000..f6f904298d95106d0d6d796764168d049ee74d22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_boundscheck.py @@ -0,0 +1,268 @@ +import numpy as np + +from numba.cuda.testing import SerialMixin +from numba import typeof, cuda, njit +from numba.core.types import float64 +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.core import config +import unittest + + +def basic_array_access(a): + return a[10] + + +def slice_array_access(a): + # The first index (slice) is not bounds checked + return a[10:, 10] + + +def fancy_array_access(x): + a = np.array([1, 2, 3]) + return x[a] + + +def fancy_array_modify(x): + a = np.array([1, 2, 3]) + x[a] = 0 + return x + + +class TestBoundsCheckNoError(MemoryLeakMixin, TestCase): + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_basic_array_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.arange(5) + # Check the numpy behavior to make sure the test is correct + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + basic_array_access(a) + + at = typeof(a) + noboundscheck = njit((at,))(basic_array_access) + # Check that the default flag doesn't raise + noboundscheck(a) + # boundscheck(a) is tested in TestBoundsCheckError below + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_slice_array_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.ones((5, 5)) + b = np.ones((5, 20)) + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + slice_array_access(a) + # Out of bounds on a slice doesn't raise + slice_array_access(b) + + at = typeof(a) + rt = float64[:] + noboundscheck = njit(rt(at))(slice_array_access) + boundscheck = njit(rt(at), boundscheck=True)(slice_array_access) + # Check that the default flag doesn't raise + noboundscheck(a) + noboundscheck(b) + # boundscheck(a) is tested in TestBoundsCheckError below + + # Doesn't raise + boundscheck(b) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_fancy_indexing_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.arange(3) + b = np.arange(4) + + # Check the numpy behavior to ensure the test is correct. + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + fancy_array_access(a) + fancy_array_access(b) + + at = typeof(a) + rt = at.dtype[:] + noboundscheck = njit(rt(at))(fancy_array_access) + boundscheck = njit(rt(at), boundscheck=True)(fancy_array_access) + # Check that the default flag doesn't raise + noboundscheck(a) + noboundscheck(b) + # boundscheck(a) is tested in TestBoundsCheckError below + + # Doesn't raise + boundscheck(b) + + +class TestNoCudaBoundsCheck(SerialMixin, TestCase): + @unittest.skipIf(not cuda.is_available(), "NO CUDA") + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '1'}) + def test_no_cuda_boundscheck(self): + self.assertTrue(config.BOUNDSCHECK) + with self.assertRaises(NotImplementedError): + @cuda.jit(boundscheck=True) + def func(): + pass + + # Make sure we aren't raising "not supported" error if we aren't + # requesting bounds checking anyway. Related pull request: #5257 + @cuda.jit(boundscheck=False) + def func3(): + pass + + @cuda.jit + def func2(x, a): + a[1] = x[1] + + a = np.ones((1,)) + x = np.zeros((1,)) + # Out of bounds but doesn't raise (it does raise in the simulator, + # so skip there) + if not config.ENABLE_CUDASIM: + func2[1, 1](x, a) + + +# This is a separate test because the jitted functions that raise exceptions +# have memory leaks. +class TestBoundsCheckError(TestCase): + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_basic_array_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.arange(5) + # Check the numpy behavior to make sure the test is correct + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + basic_array_access(a) + + at = typeof(a) + boundscheck = njit((at,), boundscheck=True)(basic_array_access) + + with self.assertRaises(IndexError): + boundscheck(a) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_slice_array_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.ones((5, 5)) + b = np.ones((5, 20)) + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + slice_array_access(a) + # Out of bounds on a slice doesn't raise + slice_array_access(b) + + at = typeof(a) + rt = float64[:] + boundscheck = njit(rt(at), boundscheck=True)(slice_array_access) + + with self.assertRaises(IndexError): + boundscheck(a) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_fancy_indexing_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.arange(3) + b = np.arange(4) + + # Check the numpy behavior to ensure the test is correct. + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + fancy_array_access(a) + fancy_array_access(b) + + at = typeof(a) + rt = at.dtype[:] + boundscheck = njit(rt(at), boundscheck=True)(fancy_array_access) + + with self.assertRaises(IndexError): + boundscheck(a) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_fancy_indexing_with_modification_boundscheck(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.arange(3) + b = np.arange(4) + + # Check the numpy behavior to ensure the test is correct. + with self.assertRaises(IndexError): + # TODO: When we raise the same error message as numpy, test that + # they are the same + fancy_array_modify(a) + fancy_array_modify(b) + + at = typeof(a) + rt = at.dtype[:] + boundscheck = njit(rt(at), boundscheck=True)(fancy_array_modify) + + with self.assertRaises(IndexError): + boundscheck(a) + + +class TestBoundsEnvironmentVariable(TestCase): + def setUp(self): + @njit + def default(x): + return x[1] + + @njit(boundscheck=False) + def off(x): + return x[1] + + @njit(boundscheck=True) + def on(x): + return x[1] + + self.default = default + self.off = off + self.on = on + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': ''}) + def test_boundscheck_unset(self): + self.assertIsNone(config.BOUNDSCHECK) + + a = np.array([1]) + + # Doesn't raise + self.default(a) + self.off(a) + + with self.assertRaises(IndexError): + self.on(a) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '1'}) + def test_boundscheck_enabled(self): + self.assertTrue(config.BOUNDSCHECK) + + a = np.array([1]) + + with self.assertRaises(IndexError): + self.default(a) + self.off(a) + self.on(a) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '0'}) + def test_boundscheck_disabled(self): + self.assertFalse(config.BOUNDSCHECK) + + a = np.array([1]) + + # Doesn't raise + self.default(a) + self.off(a) + self.on(a) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_buffer_protocol.py b/venv/lib/python3.10/site-packages/numba/tests/test_buffer_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..b680d819e72e48e983405c3bf969d13544eb73a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_buffer_protocol.py @@ -0,0 +1,289 @@ +import array + +import numpy as np + +from numba import jit +from numba.tests.support import TestCase, compile_function, MemoryLeakMixin +import unittest + + +@jit(nopython=True) +def len_usecase(buf): + return len(buf) + + +@jit(nopython=True) +def getitem_usecase(buf, i): + return buf[i] + + +@jit(nopython=True) +def getslice_usecase(buf, i, j): + s = buf[i:j] + return s[0] + 2 * s[-1] + + +@jit(nopython=True) +def setitem_usecase(buf, i, v): + buf[i] = v + + +@jit(nopython=True) +def iter_usecase(buf): + res = 0.0 + for i, x in enumerate(buf): + res += x + res *= i + 1 + return res + + +def attrgetter(attr): + code = """def func(x): + return x.%(attr)s +""" % locals() + pyfunc = compile_function("func", code, globals()) + return jit(nopython=True)(pyfunc) + + +contiguous_usecase = attrgetter("contiguous") +c_contiguous_usecase = attrgetter("c_contiguous") +f_contiguous_usecase = attrgetter("f_contiguous") +itemsize_usecase = attrgetter("itemsize") +nbytes_usecase = attrgetter("nbytes") +ndim_usecase = attrgetter("ndim") +readonly_usecase = attrgetter("readonly") +shape_usecase = attrgetter("shape") +strides_usecase = attrgetter("strides") + + +class TestBufferProtocol(MemoryLeakMixin, TestCase): + """ + Test operations on buffer-providing objects. + """ + + def _arrays(self): + n = 10 + for letter, offset in [ + ('b', -3), + ('B', 0), + ('h', -5000), + ('H', 40000), + ('i', -100000), + ('I', 1000000), + ('l', -100000), + ('L', 1000000), + ('q', -2**60), + ('Q', 2**63 + 1), + ('f', 1.5), + ('d', -1.5), + ]: + yield array.array(letter, [i + offset for i in range(n)]) + + def _memoryviews(self): + n = 10 + yield memoryview(bytearray(b"abcdefghi")) + yield memoryview(b"abcdefghi") + # Different item types + for dtype, start, stop in [ + ('int8', -10, 10), + ('uint8', 0, 10), + ('int16', -5000, 1000), + ('uint16', 40000, 50000), + ('int32', -100000, 100000), + ('uint32', 0, 1000000), + ('int64', -2**60, 10), + ('uint64', 0, 2**64 - 10), + ('float32', 1.5, 3.5), + ('float64', 1.5, 3.5), + ('complex64', -8j, 12 + 5j), + ('complex128', -8j, 12 + 5j), + ]: + yield memoryview(np.linspace(start, stop, n).astype(dtype)) + # Different layouts + arr = np.arange(12).reshape((3, 4)) + assert arr.flags.c_contiguous and not arr.flags.f_contiguous + yield memoryview(arr) + arr = arr.T + assert arr.flags.f_contiguous and not arr.flags.c_contiguous + yield memoryview(arr) + arr = arr[::2] + assert not arr.flags.f_contiguous and not arr.flags.c_contiguous + yield memoryview(arr) + + def _readonlies(self): + yield b"xyz" + yield memoryview(b"abcdefghi") + arr = np.arange(5) + arr.setflags(write=False) + yield memoryview(arr) + + def _check_unary(self, jitfunc, *args): + pyfunc = jitfunc.py_func + self.assertPreciseEqual(jitfunc(*args), pyfunc(*args)) + + def check_len(self, obj): + self._check_unary(len_usecase, obj) + + def check_iter(self, obj): + self._check_unary(iter_usecase, obj) + + def check_getitem(self, obj): + # Be careful to index all dimensions, since we don't support + # partial indexing yet. + def yield_indices(obj): + try: + shape = obj.shape + except AttributeError: + shape = len(obj), + for tup in np.ndindex(shape): + # Simple 1d buffer-providing objects usually don't support + # tuple indexing. + if len(tup) == 1: + yield tup[0] + else: + yield tup + + for i in yield_indices(obj): + try: + expected = obj[i] + except (NotImplementedError, TypeError): + if isinstance(obj, memoryview): + # The memoryview object doesn't support all codes yet, + # fall back on the underlying object. + expected = obj.obj[i] + else: + raise + self.assertPreciseEqual(getitem_usecase(obj, i), expected) + + def check_setitem(self, obj): + for i in range(len(obj)): + orig = list(obj) + val = obj[i] // 2 + 1 + setitem_usecase(obj, i, val) + self.assertEqual(obj[i], val) + for j, val in enumerate(orig): + if j != i: + self.assertEqual(obj[j], val) + + def check_getslice(self, obj): + self._check_unary(getslice_usecase, obj, 1, len(obj) - 1) + + def test_len(self): + self.check_len(bytearray(5)) + self.check_len(b"xyz") + for mem in self._memoryviews(): + self.check_len(mem) + for arr in self._arrays(): + self.check_len(arr) + for buf in self._readonlies(): + self.check_getitem(buf) + + def test_getitem(self): + self.check_getitem(bytearray(b"abc")) + self.check_getitem(b"xyz") + for mem in self._memoryviews(): + self.check_getitem(mem) + for arr in self._arrays(): + self.check_getitem(arr) + for buf in self._readonlies(): + self.check_getitem(buf) + + def test_getslice(self): + with self.assertTypingError(): + self.check_getslice(bytearray(b"abcde")) + self.check_getslice(b"xyzuvw") + self.check_getslice(memoryview(b"xyzuvw")) + with self.assertTypingError(): + self.check_getslice(array.array('i', range(10))) + for buf in self._readonlies(): + self.check_getitem(buf) + + def test_setitem(self): + self.check_setitem(bytearray(b"abcdefghi")) + for arr in self._arrays(): + self.check_setitem(arr) + for mem in self._memoryviews(): + self.check_getitem(mem) + # Read-only buffers + for buf in self._readonlies(): + with self.assertTypingError(): + self.check_setitem(buf) + + def test_iter(self): + self.check_iter(bytearray(b"abc")) + self.check_iter(b"xyz") + self.check_iter(memoryview(b"xyz")) + for arr in self._arrays(): + self.check_iter(arr) + for buf in self._readonlies(): + self.check_getitem(buf) + + +class TestMemoryView(MemoryLeakMixin, TestCase): + """ + Test memoryview-specific attributes and operations. + """ + + def _arrays(self): + arr = np.arange(12) + yield arr + arr = arr.reshape((3, 4)) + yield arr + yield arr.T + yield arr[::2] + arr.setflags(write=False) + yield arr + arr = np.zeros(()) + assert arr.ndim == 0 + yield arr + + def test_ndim(self): + for arr in self._arrays(): + m = memoryview(arr) + self.assertPreciseEqual(ndim_usecase(m), arr.ndim) + + def test_shape(self): + for arr in self._arrays(): + m = memoryview(arr) + self.assertPreciseEqual(shape_usecase(m), arr.shape) + + def test_strides(self): + for arr in self._arrays(): + m = memoryview(arr) + self.assertPreciseEqual(strides_usecase(m), arr.strides) + + def test_itemsize(self): + for arr in self._arrays(): + m = memoryview(arr) + self.assertPreciseEqual(itemsize_usecase(m), arr.itemsize) + + def test_nbytes(self): + for arr in self._arrays(): + m = memoryview(arr) + self.assertPreciseEqual(nbytes_usecase(m), arr.size * arr.itemsize) + + def test_readonly(self): + for arr in self._arrays(): + m = memoryview(arr) + self.assertIs(readonly_usecase(m), not arr.flags.writeable) + m = memoryview(b"xyz") + self.assertIs(readonly_usecase(m), True) + m = memoryview(bytearray(b"xyz")) + self.assertIs(readonly_usecase(m), False) + + def test_contiguous(self): + m = memoryview(bytearray(b"xyz")) + self.assertIs(contiguous_usecase(m), True) + self.assertIs(c_contiguous_usecase(m), True) + self.assertIs(f_contiguous_usecase(m), True) + for arr in self._arrays(): + m = memoryview(arr) + # Note `arr.flags.contiguous` is wrong (it mimics c_contiguous) + self.assertIs(contiguous_usecase(m), + arr.flags.f_contiguous or arr.flags.c_contiguous) + self.assertIs(c_contiguous_usecase(m), arr.flags.c_contiguous) + self.assertIs(f_contiguous_usecase(m), arr.flags.f_contiguous) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_builtins.py b/venv/lib/python3.10/site-packages/numba/tests/test_builtins.py new file mode 100644 index 0000000000000000000000000000000000000000..11689b245a13abe4f0c40c71da5e87cd7c937721 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_builtins.py @@ -0,0 +1,1682 @@ +import itertools +import functools +import sys +import operator +from collections import namedtuple + +import numpy as np + +import unittest +import warnings + +from numba import jit, typeof, njit, typed +from numba.core import errors, types, config +from numba.tests.support import (TestCase, tag, ignore_internal_warnings, + MemoryLeakMixin) +from numba.core.extending import overload_method, box, register_jitable + + +forceobj_flags = {'forceobj': True} + +no_pyobj_flags = {'nopython': True, '_nrt': False} + +nrt_no_pyobj_flags = {'nopython': True} + + +def abs_usecase(x): + return abs(x) + +def all_usecase(x, y): + if x == None and y == None: + return all([]) + elif x == None: + return all([y]) + elif y == None: + return all([x]) + else: + return all([x, y]) + +def any_usecase(x, y): + if x == None and y == None: + return any([]) + elif x == None: + return any([y]) + elif y == None: + return any([x]) + else: + return any([x, y]) + +def bool_usecase(x): + return bool(x) + +def complex_usecase(x, y): + return complex(x, y) + +def divmod_usecase(x, y): + return divmod(x, y) + +def enumerate_usecase(): + result = 0 + for i, j in enumerate((1., 2.5, 3.)): + result += i * j + return result + +def enumerate_start_usecase(): + result = 0 + for i, j in enumerate((1., 2.5, 3.), 42): + result += i * j + return result + +def enumerate_invalid_start_usecase(): + result = 0 + for i, j in enumerate((1., 2.5, 3.), 3.14159): + result += i * j + return result + +def filter_usecase(x, filter_func): + return filter(filter_func, x) + +def float_usecase(x): + return float(x) + +def float_inf_usecase(x): + d = { + 0: float('inf'), + 1: float('INF'), + 2: float('-inf'), + 3: float('-INF'), + 4: float('\r\nINF\r '), + 5: float(' \r\n\t-INF'), + 6: float('1234.45'), + 7: float('\n-123.4\r'), + } + return d.get(x) + +def format_usecase(x, y): + return x.format(y) + +def globals_usecase(): + return globals() + +# NOTE: hash() is tested in test_hashing + +def hex_usecase(x): + return hex(x) + +def str_usecase(x): + return str(x) + +def int_usecase(x, base): + return int(x, base=base) + +def iter_next_usecase(x): + it = iter(x) + return next(it), next(it) + +def locals_usecase(x): + y = 5 + return locals()['y'] + +def long_usecase(x, base): + return long(x, base=base) + +def map_usecase(x, map_func): + return map(map_func, x) + + +def max_usecase1(x, y): + return max(x, y) + +def max_usecase2(x, y): + return max([x, y]) + +def max_usecase3(x): + return max(x) + +def max_usecase4(): + return max(()) + + +def min_usecase1(x, y): + return min(x, y) + +def min_usecase2(x, y): + return min([x, y]) + +def min_usecase3(x): + return min(x) + +def min_usecase4(): + return min(()) + +def oct_usecase(x): + return oct(x) + +def reduce_usecase(reduce_func, x): + return functools.reduce(reduce_func, x) + +def round_usecase1(x): + return round(x) + +def round_usecase2(x, n): + return round(x, n) + +def sum_usecase(x): + return sum(x) + +def type_unary_usecase(a, b): + return type(a)(b) + +def truth_usecase(p): + return operator.truth(p) + +def unichr_usecase(x): + return unichr(x) + +def zip_usecase(): + result = 0 + for i, j in zip((1, 2, 3), (4.5, 6.7)): + result += i * j + return result + +def zip_0_usecase(): + result = 0 + for i in zip(): + result += 1 + return result + +def zip_1_usecase(): + result = 0 + for i, in zip((1, 2)): + result += i + return result + + +def zip_3_usecase(): + result = 0 + for i, j, k in zip((1, 2), (3, 4, 5), (6.7, 8.9)): + result += i * j * k + return result + + +def zip_first_exhausted(): + iterable = range(7) + n = 3 + it = iter(iterable) + # 1st iterator is shorter + front = list(zip(range(n), it)) + # Make sure that we didn't skip one in `it` + back = list(it) + return front, back + + +def pow_op_usecase(x, y): + return x ** y + + +def pow_usecase(x, y): + return pow(x, y) + + +def sum_usecase(x): + return sum(x) + + +def sum_kwarg_usecase(x, start=0): + ret = sum(x, start) + return sum(x, start=start), ret + + +def isinstance_usecase(a): + if isinstance(a, (int, float)): + if isinstance(a, int): + return a + 1, 'int' + if isinstance(a, float): + return a + 2.0, 'float' + elif isinstance(a, str): + return a + ", world!", 'str' + elif isinstance(a, complex): + return a.imag, 'complex' + elif isinstance(a, (tuple, list)): + if isinstance(a, tuple): + return 'tuple' + else: + return 'list' + elif isinstance(a, set): + return 'set' + elif isinstance(a, bytes): + return 'bytes' + return 'no match' + + +def isinstance_dict(): + a = {1: 2, 3: 4} + b = {'a': 10, 'b': np.zeros(3)} + if isinstance(a, dict) and isinstance(b, dict): + return 'dict' + else: + return 'not dict' + + +def isinstance_usecase_numba_types(a): + if isinstance(a, typed.List): + return 'typed list' + elif isinstance(a, (types.int32, types.int64)): + if isinstance(a, types.int32): + return 'int32' + else: + return 'int64' + elif isinstance(a, (types.float32, types.float64)): + if isinstance(a, types.float32): + return 'float32' + elif isinstance(a, types.float64): + return 'float64' + elif isinstance(a, typed.Dict): + return 'typed dict' + else: + return 'no match' + + +def isinstance_usecase_numba_types_2(): + # some types cannot be passed as argument to njit functions + a = b'hello' + b = range(1, 2) + c = dict() + c[2] = 3 + if isinstance(a, bytes) and \ + isinstance(b, range) and \ + isinstance(c, dict): + return True + return False + + +def invalid_isinstance_usecase(x): + if isinstance(x, ('foo',)): + return 'true branch' + else: + return 'false branch' + + +def isinstance_usecase_invalid_type(x): + # this should be a valid call when x := float + if isinstance(x, (float, 'not a type')): + return True + else: + return False + + +def invalid_isinstance_usecase_phi_nopropagate(x): + if x > 4: + z = 10 + else: + z = 'a' + if isinstance(z, int): + return True + else: + return False + + +def invalid_isinstance_usecase_phi_nopropagate2(a): + # Numba issue #9125 + x = 0 + if isinstance(a, int): + a = (a, a) + + for i in range(len(a)): + x += i + return x + + +def invalid_isinstance_optional_usecase(x): + if x > 4: + z = 10 + else: + z = None + if isinstance(z, int): + return True + else: + return False + +def invalid_isinstance_unsupported_type_usecase(): + ntpl = namedtuple('ntpl', ['a', 'b']) + inst = ntpl(1, 2) + def impl(x): + return isinstance(inst, ntpl) + return impl + +class TestBuiltins(TestCase): + + def run_nullary_func(self, pyfunc, flags): + cfunc = jit((), **flags)(pyfunc) + expected = pyfunc() + self.assertPreciseEqual(cfunc(), expected) + + def test_abs(self, flags=forceobj_flags): + pyfunc = abs_usecase + + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-1, 0, 1]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + cfunc = jit((types.float32,), **flags)(pyfunc) + for x in [-1.1, 0.0, 1.1]: + self.assertPreciseEqual(cfunc(x), pyfunc(x), prec='single') + + complex_values = [-1.1 + 0.5j, 0.0 + 0j, 1.1 + 3j, + float('inf') + 1j * float('nan'), + float('nan') - 1j * float('inf')] + cfunc = jit((types.complex64,), **flags)(pyfunc) + for x in complex_values: + self.assertPreciseEqual(cfunc(x), pyfunc(x), prec='single') + cfunc = jit((types.complex128,), **flags)(pyfunc) + for x in complex_values: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + for unsigned_type in types.unsigned_domain: + unsigned_values = [0, 10, 2, 2 ** unsigned_type.bitwidth - 1] + cfunc = jit((unsigned_type,), **flags)(pyfunc) + for x in unsigned_values: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_abs_npm(self): + self.test_abs(flags=no_pyobj_flags) + + def test_all(self, flags=forceobj_flags): + pyfunc = all_usecase + + cfunc = jit((types.int32,types.int32), **flags)(pyfunc) + x_operands = [-1, 0, 1, None] + y_operands = [-1, 0, 1, None] + for x, y in itertools.product(x_operands, y_operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_all_npm(self): + with self.assertTypingError(): + self.test_all(flags=no_pyobj_flags) + + def test_any(self, flags=forceobj_flags): + pyfunc = any_usecase + + cfunc = jit((types.int32,types.int32), **flags)(pyfunc) + x_operands = [-1, 0, 1, None] + y_operands = [-1, 0, 1, None] + for x, y in itertools.product(x_operands, y_operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_any_npm(self): + with self.assertTypingError(): + self.test_any(flags=no_pyobj_flags) + + def test_bool(self, flags=forceobj_flags): + pyfunc = bool_usecase + + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-1, 0, 1]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + cfunc = jit((types.float64,), **flags)(pyfunc) + for x in [0.0, -0.0, 1.5, float('inf'), float('nan')]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + cfunc = jit((types.complex128,), **flags)(pyfunc) + for x in [complex(0, float('inf')), complex(0, float('nan'))]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_bool_npm(self): + self.test_bool(flags=no_pyobj_flags) + + def test_bool_nonnumber(self, flags=forceobj_flags): + pyfunc = bool_usecase + + cfunc = jit((types.string,), **flags)(pyfunc) + for x in ['x', '']: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + cfunc = jit((types.Dummy('list'),), **flags)(pyfunc) + for x in [[1], []]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_bool_nonnumber_npm(self): + with self.assertTypingError(): + self.test_bool_nonnumber(flags=no_pyobj_flags) + + def test_complex(self, flags=forceobj_flags): + pyfunc = complex_usecase + + cfunc = jit((types.int32, types.int32), **flags)(pyfunc) + + x_operands = [-1, 0, 1] + y_operands = [-1, 0, 1] + for x, y in itertools.product(x_operands, y_operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_complex_npm(self): + self.test_complex(flags=no_pyobj_flags) + + def test_divmod_ints(self, flags=forceobj_flags): + pyfunc = divmod_usecase + + cfunc = jit((types.int64, types.int64), **flags)(pyfunc) + + def truncate_result(x, bits=64): + # Remove any extraneous bits (since Numba will return + # a 64-bit result by definition) + if x >= 0: + x &= (1 << (bits - 1)) - 1 + return x + + denominators = [1, 3, 7, 15, -1, -3, -7, -15, 2**63 - 1, -2**63] + numerators = denominators + [0] + for x, y, in itertools.product(numerators, denominators): + expected_quot, expected_rem = pyfunc(x, y) + quot, rem = cfunc(x, y) + f = truncate_result + self.assertPreciseEqual((f(quot), f(rem)), + (f(expected_quot), f(expected_rem))) + + for x in numerators: + with self.assertRaises(ZeroDivisionError): + cfunc(x, 0) + + def test_divmod_ints_npm(self): + self.test_divmod_ints(flags=no_pyobj_flags) + + def test_divmod_floats(self, flags=forceobj_flags): + pyfunc = divmod_usecase + + cfunc = jit((types.float64, types.float64), **flags)(pyfunc) + + denominators = [1., 3.5, 1e100, -2., -7.5, -1e101, + np.inf, -np.inf, np.nan] + numerators = denominators + [-0.0, 0.0] + for x, y, in itertools.product(numerators, denominators): + expected_quot, expected_rem = pyfunc(x, y) + quot, rem = cfunc(x, y) + self.assertPreciseEqual((quot, rem), (expected_quot, expected_rem)) + + for x in numerators: + with self.assertRaises(ZeroDivisionError): + cfunc(x, 0.0) + + def test_divmod_floats_npm(self): + self.test_divmod_floats(flags=no_pyobj_flags) + + def test_enumerate(self, flags=forceobj_flags): + self.run_nullary_func(enumerate_usecase, flags) + + def test_enumerate_npm(self): + self.test_enumerate(flags=no_pyobj_flags) + + def test_enumerate_start(self, flags=forceobj_flags): + self.run_nullary_func(enumerate_start_usecase, flags) + + def test_enumerate_start_npm(self): + self.test_enumerate_start(flags=no_pyobj_flags) + + def test_enumerate_start_invalid_start_type(self): + pyfunc = enumerate_invalid_start_usecase + + cfunc = jit((), **forceobj_flags)(pyfunc) + + with self.assertRaises(TypeError) as raises: + cfunc() + + msg = "'float' object cannot be interpreted as an integer" + self.assertIn(msg, str(raises.exception)) + + def test_enumerate_start_invalid_start_type_npm(self): + pyfunc = enumerate_invalid_start_usecase + with self.assertRaises(errors.TypingError) as raises: + jit((), **no_pyobj_flags)(pyfunc) + msg = "Only integers supported as start value in enumerate" + self.assertIn(msg, str(raises.exception)) + + def test_filter(self, flags=forceobj_flags): + pyfunc = filter_usecase + argtys = (types.Dummy('list'), types.Dummy('function_ptr')) + cfunc = jit(argtys, **flags)(pyfunc) + + filter_func = lambda x: x % 2 + x = [0, 1, 2, 3, 4] + self.assertSequenceEqual(list(cfunc(x, filter_func)), + list(pyfunc(x, filter_func))) + + def test_filter_npm(self): + with self.assertTypingError(): + self.test_filter(flags=no_pyobj_flags) + + def test_float(self, flags=forceobj_flags): + pyfunc = float_usecase + + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-1, 0, 1]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + cfunc = jit((types.float32,), **flags)(pyfunc) + for x in [-1.1, 0.0, 1.1]: + self.assertPreciseEqual(cfunc(x), pyfunc(x), prec='single') + + cfunc = jit((types.string,), **flags)(pyfunc) + for x in ['-1.1', '0.0', '1.1', 'inf', '-inf', 'INF', '-INF']: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_float_npm(self): + with self.assertTypingError(): + self.test_float(flags=no_pyobj_flags) + + def test_float_string_literal(self): + pyfunc = float_inf_usecase + cfunc = njit(pyfunc) + for x in range(8): + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_format(self, flags=forceobj_flags): + pyfunc = format_usecase + + cfunc = jit((types.string, types.int32,), **flags)(pyfunc) + x = '{0}' + for y in [-1, 0, 1]: + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + cfunc = jit((types.string, types.float32,), **flags)(pyfunc) + x = '{0}' + for y in [-1.1, 0.0, 1.1]: + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + cfunc = jit((types.string, types.string,), **flags)(pyfunc) + x = '{0}' + for y in ['a', 'b', 'c']: + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_format_npm(self): + with self.assertTypingError(): + self.test_format(flags=no_pyobj_flags) + + def test_globals(self, flags=forceobj_flags): + pyfunc = globals_usecase + cfunc = jit((), **flags)(pyfunc) + g = cfunc() + self.assertIs(g, globals()) + + def test_globals_npm(self): + with self.assertTypingError(): + self.test_globals(flags=no_pyobj_flags) + + def test_globals_jit(self, flags=forceobj_flags): + # Issue #416: weird behaviour of globals() in combination with + # the @jit decorator. + pyfunc = globals_usecase + jitted = jit(**flags)(pyfunc) + self.assertIs(jitted(), globals()) + self.assertIs(jitted(), globals()) + + def test_globals_jit_npm(self): + with self.assertTypingError(): + self.test_globals_jit(nopython=True) + + def test_hex(self, flags=forceobj_flags): + pyfunc = hex_usecase + + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-1, 0, 1]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_hex_npm(self): + with self.assertTypingError(): + self.test_hex(flags=no_pyobj_flags) + + def test_int_str(self): + pyfunc = str_usecase + + small_inputs = [ + 1234, + 1, + 0, + 10, + 1000, + ] + + large_inputs = [ + 123456789, + 2222222, + 1000000, + ~0x0 + ] + + args = [*small_inputs, *large_inputs] + + typs = [ + types.int8, + types.int16, + types.int32, + types.int64, + types.uint, + types.uint8, + types.uint16, + types.uint32, + types.uint64, + ] + + for typ in typs: + cfunc = jit((typ,), **nrt_no_pyobj_flags)(pyfunc) + for v in args: + tp_info = np.iinfo(typ.key) + if not (tp_info.min <= v <= tp_info.max): + continue + self.assertPreciseEqual(cfunc(typ(v)), pyfunc(typ(v))) + + if typ.signed: + self.assertPreciseEqual(cfunc(typ(-v)), pyfunc(typ(-v))) + + def test_int(self, flags=forceobj_flags): + pyfunc = int_usecase + + cfunc = jit((types.string, types.int32,), **flags)(pyfunc) + + x_operands = ['-1', '0', '1', '10'] + y_operands = [2, 8, 10, 16] + for x, y in itertools.product(x_operands, y_operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_int_npm(self): + with self.assertTypingError(): + self.test_int(flags=no_pyobj_flags) + + def test_iter_next(self, flags=forceobj_flags): + pyfunc = iter_next_usecase + cfunc = jit((types.UniTuple(types.int32, 3),), **flags)(pyfunc) + self.assertPreciseEqual(cfunc((1, 42, 5)), (1, 42)) + + cfunc = jit((types.UniTuple(types.int32, 1),), **flags)(pyfunc) + with self.assertRaises(StopIteration): + cfunc((1,)) + + def test_iter_next_npm(self): + self.test_iter_next(flags=no_pyobj_flags) + + def test_locals(self, flags=forceobj_flags): + pyfunc = locals_usecase + with self.assertRaises(errors.ForbiddenConstruct): + jit((types.int64,), **flags)(pyfunc) + + def test_locals_forceobj(self): + self.test_locals(flags=forceobj_flags) + + def test_locals_npm(self): + with self.assertTypingError(): + self.test_locals(flags=no_pyobj_flags) + + def test_map(self, flags=forceobj_flags): + pyfunc = map_usecase + argtys = (types.Dummy('list'), types.Dummy('function_ptr')) + cfunc = jit(argtys, **flags)(pyfunc) + + map_func = lambda x: x * 2 + x = [0, 1, 2, 3, 4] + self.assertSequenceEqual(list(cfunc(x, map_func)), + list(pyfunc(x, map_func))) + + def test_map_npm(self): + with self.assertTypingError(): + self.test_map(flags=no_pyobj_flags) + + # + # min() and max() + # + + def check_minmax_1(self, pyfunc, flags): + cfunc = jit((types.int32, types.int32), **flags)(pyfunc) + + x_operands = [-1, 0, 1] + y_operands = [-1, 0, 1] + for x, y in itertools.product(x_operands, y_operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_max_1(self, flags=forceobj_flags): + """ + max(*args) + """ + self.check_minmax_1(max_usecase1, flags) + + def test_min_1(self, flags=forceobj_flags): + """ + min(*args) + """ + self.check_minmax_1(min_usecase1, flags) + + def test_max_npm_1(self): + self.test_max_1(flags=no_pyobj_flags) + + def test_min_npm_1(self): + self.test_min_1(flags=no_pyobj_flags) + + def check_minmax_2(self, pyfunc, flags): + cfunc = jit((types.int32, types.int32), **flags)(pyfunc) + + x_operands = [-1, 0, 1] + y_operands = [-1, 0, 1] + for x, y in itertools.product(x_operands, y_operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_max_2(self, flags=forceobj_flags): + """ + max(list) + """ + self.check_minmax_2(max_usecase2, flags) + + def test_min_2(self, flags=forceobj_flags): + """ + min(list) + """ + self.check_minmax_2(min_usecase2, flags) + + def test_max_npm_2(self): + with self.assertTypingError(): + self.test_max_2(flags=no_pyobj_flags) + + def test_min_npm_2(self): + with self.assertTypingError(): + self.test_min_2(flags=no_pyobj_flags) + + def check_minmax_3(self, pyfunc, flags): + def check(argty): + cfunc = jit((argty,), **flags)(pyfunc) + # Check that the algorithm matches Python's with a non-total order + tup = (1.5, float('nan'), 2.5) + for val in [tup, tup[::-1]]: + self.assertPreciseEqual(cfunc(val), pyfunc(val)) + + check(types.UniTuple(types.float64, 3)) + check(types.Tuple((types.float32, types.float64, types.float32))) + + def test_max_3(self, flags=forceobj_flags): + """ + max(tuple) + """ + self.check_minmax_3(max_usecase3, flags) + + def test_min_3(self, flags=forceobj_flags): + """ + min(tuple) + """ + self.check_minmax_3(min_usecase3, flags) + + def test_max_npm_3(self): + self.test_max_3(flags=no_pyobj_flags) + + def test_min_npm_3(self): + self.test_min_3(flags=no_pyobj_flags) + + def check_min_max_invalid_types(self, pyfunc, flags=forceobj_flags): + cfunc = jit((types.int32, types.Dummy('list'),), **flags)(pyfunc) + cfunc(1, [1]) + + def test_max_1_invalid_types(self): + with self.assertRaises(TypeError): + self.check_min_max_invalid_types(max_usecase1) + + def test_max_1_invalid_types_npm(self): + with self.assertTypingError(): + self.check_min_max_invalid_types(max_usecase1, flags=no_pyobj_flags) + + def test_min_1_invalid_types(self): + with self.assertRaises(TypeError): + self.check_min_max_invalid_types(min_usecase1) + + def test_min_1_invalid_types_npm(self): + with self.assertTypingError(): + self.check_min_max_invalid_types(min_usecase1, flags=no_pyobj_flags) + + def check_minmax_bool1(self, pyfunc, flags): + cfunc = jit((types.bool_, types.bool_), **flags)(pyfunc) + + operands = (False, True) + for x, y in itertools.product(operands, operands): + self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y)) + + def test_max_bool1(self, flags=forceobj_flags): + # tests max() + self.check_minmax_bool1(max_usecase1, flags) + + def test_min_bool1(self, flags=forceobj_flags): + # tests min() + self.check_minmax_bool1(min_usecase1, flags) + + # Test that max(1) and min(1) fail + + def check_min_max_unary_non_iterable(self, pyfunc, flags=forceobj_flags): + cfunc = jit((types.int32,), **flags)(pyfunc) + cfunc(1) + + def test_max_unary_non_iterable(self): + with self.assertRaises(TypeError): + self.check_min_max_unary_non_iterable(max_usecase3) + + def test_max_unary_non_iterable_npm(self): + with self.assertTypingError(): + self.check_min_max_unary_non_iterable(max_usecase3) + + def test_min_unary_non_iterable(self): + with self.assertRaises(TypeError): + self.check_min_max_unary_non_iterable(min_usecase3) + + def test_min_unary_non_iterable_npm(self): + with self.assertTypingError(): + self.check_min_max_unary_non_iterable(min_usecase3) + + # Test that max(()) and min(()) fail + + def check_min_max_empty_tuple(self, pyfunc, func_name): + with self.assertTypingError() as raises: + jit((), **no_pyobj_flags)(pyfunc) + self.assertIn("%s() argument is an empty tuple" % func_name, + str(raises.exception)) + + def test_max_empty_tuple(self): + self.check_min_max_empty_tuple(max_usecase4, "max") + + def test_min_empty_tuple(self): + self.check_min_max_empty_tuple(min_usecase4, "min") + + + def test_oct(self, flags=forceobj_flags): + pyfunc = oct_usecase + + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-8, -1, 0, 1, 8]: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_oct_npm(self): + with self.assertTypingError(): + self.test_oct(flags=no_pyobj_flags) + + def test_reduce(self, flags=forceobj_flags): + pyfunc = reduce_usecase + argtys = (types.Dummy('function_ptr'), types.Dummy('list')) + cfunc = jit(argtys, **flags)(pyfunc) + + reduce_func = lambda x, y: x + y + + x = range(10) + self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x)) + + x = [x + x/10.0 for x in range(10)] + self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x)) + + x = [complex(x, x) for x in range(10)] + self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x)) + + def test_reduce_npm(self): + with self.assertTypingError(): + self.test_reduce(flags=no_pyobj_flags) + + def test_round1(self, flags=forceobj_flags): + pyfunc = round_usecase1 + + for tp in (types.float64, types.float32): + cfunc = jit((tp,), **flags)(pyfunc) + values = [-1.6, -1.5, -1.4, -0.5, 0.0, 0.1, 0.5, 0.6, 1.4, 1.5, 5.0] + values += [-0.1, -0.0] + for x in values: + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_round1_npm(self): + self.test_round1(flags=no_pyobj_flags) + + def test_round2(self, flags=forceobj_flags): + pyfunc = round_usecase2 + + for tp in (types.float64, types.float32): + prec = 'single' if tp is types.float32 else 'exact' + cfunc = jit((tp, types.int32), **flags)(pyfunc) + for x in [0.0, 0.1, 0.125, 0.25, 0.5, 0.75, 1.25, + 1.5, 1.75, 2.25, 2.5, 2.75, 12.5, 15.0, 22.5]: + for n in (-1, 0, 1, 2): + self.assertPreciseEqual(cfunc(x, n), pyfunc(x, n), + prec=prec) + expected = pyfunc(-x, n) + self.assertPreciseEqual(cfunc(-x, n), pyfunc(-x, n), + prec=prec) + + def test_round2_npm(self): + self.test_round2(flags=no_pyobj_flags) + + def test_sum_objmode(self, flags=forceobj_flags): + pyfunc = sum_usecase + + cfunc = jit((types.Dummy('list'),), **flags)(pyfunc) + + x = range(10) + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + x = [x + x/10.0 for x in range(10)] + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + x = [complex(x, x) for x in range(10)] + self.assertPreciseEqual(cfunc(x), pyfunc(x)) + + def test_sum(self): + # In Python 3.8+ "start" can be specified as a kwarg, so test that too + sum_default = njit(sum_usecase) + sum_kwarg = njit(sum_kwarg_usecase) + + @njit + def sum_range(sz, start=0): + tmp = range(sz) + ret = sum(tmp, start) + return sum(tmp, start=start), ret + + ntpl = namedtuple('ntpl', ['a', 'b']) + + # check call with default kwarg, start=0 + def args(): + yield [*range(10)] + yield [x + x/10.0 for x in range(10)] + yield [x * 1j for x in range(10)] + yield (1, 2, 3) + yield (1, 2, 3j) + # uints will likely end up as floats as `start` is signed, so just + # test mixed signed ints + yield (np.int64(32), np.int32(2), np.int8(3)) + tl = typed.List(range(5)) + yield tl + yield np.ones(5) + yield ntpl(100, 200) + yield ntpl(100, 200j) + + for x in args(): + self.assertPreciseEqual(sum_default(x), sum_default.py_func(x)) + + # Check the uint use case, as start is signed, NumPy will end up with + # a float result whereas Numba will end up with an int (see integer + # typing NBEP). + x = (np.uint64(32), np.uint32(2), np.uint8(3)) + self.assertEqual(sum_default(x), sum_default.py_func(x)) + + # check call with changing default kwarg, start + def args_kws(): + yield [*range(10)], 12 + yield [x + x/10.0 for x in range(10)], 19j + yield [x * 1j for x in range(10)], -2 + yield (1, 2, 3), 9 + yield (1, 2, 3j), -0 + # uints will likely end up as floats as `start` is signed, so just + # test mixed signed ints + yield (np.int64(32), np.int32(2), np.int8(3)), np.uint32(7) + tl = typed.List(range(5)) + yield tl, 100 + yield np.ones((5, 5)), 10 * np.ones((5,)) + yield ntpl(100, 200), -50 + yield ntpl(100, 200j), 9 + + for x, start in args_kws(): + self.assertPreciseEqual(sum_kwarg(x, start=start), + sum_kwarg.py_func(x, start=start)) + + # check call with range() + for start in range(-3, 4): + for sz in range(-3, 4): + self.assertPreciseEqual(sum_range(sz, start=start), + sum_range.py_func(sz, start=start)) + + def test_sum_exceptions(self): + sum_default = njit(sum_usecase) + sum_kwarg = njit(sum_kwarg_usecase) + + # check start as string/bytes/bytearray is error + msg = "sum() can't sum {}" + + with self.assertRaises(errors.TypingError) as raises: + sum_kwarg((1, 2, 3), 'a') + + self.assertIn(msg.format('strings'), str(raises.exception)) + + with self.assertRaises(errors.TypingError) as raises: + sum_kwarg((1, 2, 3), b'123') + + self.assertIn(msg.format('bytes'), str(raises.exception)) + + with self.assertRaises(errors.TypingError) as raises: + sum_kwarg((1, 2, 3), bytearray(b'123')) + + self.assertIn(msg.format('bytearray'), str(raises.exception)) + + # check invalid type has no impl + with self.assertRaises(errors.TypingError) as raises: + sum_default('abcd') + + self.assertIn('No implementation', str(raises.exception)) + + def test_truth(self): + pyfunc = truth_usecase + cfunc = jit(nopython=True)(pyfunc) + + self.assertEqual(pyfunc(True), cfunc(True)) + self.assertEqual(pyfunc(False), cfunc(False)) + + def test_type_unary(self): + # Test type(val) and type(val)(other_val) + pyfunc = type_unary_usecase + cfunc = jit(nopython=True)(pyfunc) + + def check(*args): + expected = pyfunc(*args) + self.assertPreciseEqual(cfunc(*args), expected) + + check(1.5, 2) + check(1, 2.5) + check(1.5j, 2) + check(True, 2) + check(2.5j, False) + + def test_zip(self, flags=forceobj_flags): + self.run_nullary_func(zip_usecase, flags) + + def test_zip_npm(self): + self.test_zip(flags=no_pyobj_flags) + + def test_zip_1(self, flags=forceobj_flags): + self.run_nullary_func(zip_1_usecase, flags) + + def test_zip_1_npm(self): + self.test_zip_1(flags=no_pyobj_flags) + + def test_zip_3(self, flags=forceobj_flags): + self.run_nullary_func(zip_3_usecase, flags) + + def test_zip_3_npm(self): + self.test_zip_3(flags=no_pyobj_flags) + + def test_zip_0(self, flags=forceobj_flags): + self.run_nullary_func(zip_0_usecase, flags) + + def test_zip_0_npm(self): + self.test_zip_0(flags=no_pyobj_flags) + + def test_zip_first_exhausted(self, flags=forceobj_flags): + """ + Test side effect to the input iterators when a left iterator has been + exhausted before the ones on the right. + """ + self.run_nullary_func(zip_first_exhausted, flags) + + def test_zip_first_exhausted_npm(self): + self.test_zip_first_exhausted(flags=nrt_no_pyobj_flags) + + def test_pow_op_usecase(self): + args = [ + (2, 3), + (2.0, 3), + (2, 3.0), + (2j, 3.0j), + ] + + for x, y in args: + argtys = (typeof(x), typeof(y)) + cfunc = jit(argtys, **no_pyobj_flags)(pow_op_usecase) + r = cfunc(x, y) + self.assertPreciseEqual(r, pow_op_usecase(x, y)) + + def test_pow_usecase(self): + args = [ + (2, 3), + (2.0, 3), + (2, 3.0), + (2j, 3.0j), + ] + + for x, y in args: + argtys = (typeof(x), typeof(y)) + cfunc = jit(argtys, **no_pyobj_flags)(pow_usecase) + r = cfunc(x, y) + self.assertPreciseEqual(r, pow_usecase(x, y)) + + def _check_min_max(self, pyfunc): + cfunc = njit()(pyfunc) + expected = pyfunc() + got = cfunc() + self.assertPreciseEqual(expected, got) + + def test_min_max_iterable_input(self): + + @njit + def frange(start, stop, step): + i = start + while i < stop: + yield i + i += step + + def sample_functions(op): + yield lambda: op(range(10)) + yield lambda: op(range(4, 12)) + yield lambda: op(range(-4, -15, -1)) + yield lambda: op([6.6, 5.5, 7.7]) + yield lambda: op([(3, 4), (1, 2)]) + yield lambda: op(frange(1.1, 3.3, 0.1)) + yield lambda: op([np.nan, -np.inf, np.inf, np.nan]) + yield lambda: op([(3,), (1,), (2,)]) + + for fn in sample_functions(op=min): + self._check_min_max(fn) + + for fn in sample_functions(op=max): + self._check_min_max(fn) + + +class TestOperatorMixedTypes(TestCase): + + def test_eq_ne(self): + for opstr in ('eq', 'ne'): + op = getattr(operator, opstr) + + @njit + def func(a, b): + return op(a, b) + + # all these things should evaluate to being equal or not, all should + # survive typing. + things = (1, 0, True, False, 1.0, 2.0, 1.1, 1j, None, "", "1") + for x, y in itertools.product(things, things): + self.assertPreciseEqual(func.py_func(x, y), func(x, y)) + + def test_cmp(self): + for opstr in ('gt', 'lt', 'ge', 'le', 'eq', 'ne'): + op = getattr(operator, opstr) + @njit + def func(a, b): + return op(a, b) + + # numerical things should all be comparable + things = (1, 0, True, False, 1.0, 0.0, 1.1) + for x, y in itertools.product(things, things): + expected = func.py_func(x, y) + got = func(x, y) + message = ("%s %s %s does not match between Python and Numba" + % (x, opstr, y)) + self.assertEqual(expected, got, message) + + +class TestIsinstanceBuiltin(TestCase): + def test_isinstance(self): + pyfunc = isinstance_usecase + cfunc = jit(nopython=True)(pyfunc) + + inputs = ( + 3, # int + 5.0, # float + "Hello", # string + b'world', # bytes + 1j, # complex + [1, 2, 3], # list + (1, 3, 3, 3), # UniTuple + set([1, 2]), # set + (1, 'nba', 2), # Heterogeneous Tuple + # {'hello': 2}, # dict - doesn't work as input + None, + ) + + for inpt in inputs: + expected = pyfunc(inpt) + got = cfunc(inpt) + self.assertEqual(expected, got) + + def test_isinstance_dict(self): + # Tests typed.Dict and LiteralStrKeyDict + pyfunc = isinstance_dict + cfunc = jit(nopython=True)(pyfunc) + self.assertEqual(pyfunc(), cfunc()) + + def test_isinstance_issue9125(self): + pyfunc = invalid_isinstance_usecase_phi_nopropagate2 + cfunc = jit(nopython=True)(pyfunc) + self.assertEqual(pyfunc(3), cfunc(3)) + + def test_isinstance_numba_types(self): + # This makes use of type aliasing between python scalars and NumPy + # scalars, see also test_numba_types() + pyfunc = isinstance_usecase_numba_types + cfunc = jit(nopython=True)(pyfunc) + + inputs = ( + (types.int32(1), 'int32'), + (types.int64(2), 'int64'), + (types.float32(3.0), 'float32'), + (types.float64(4.0), 'float64'), + (types.complex64(5j), 'no match'), + (typed.List([1, 2]), 'typed list'), + (typed.Dict.empty(types.int64, types.int64), 'typed dict') + ) + + for inpt, expected in inputs: + got = cfunc(inpt) + self.assertEqual(expected, got) + + def test_isinstance_numba_types_2(self): + pyfunc = isinstance_usecase_numba_types_2 + cfunc = jit(nopython=True)(pyfunc) + self.assertEqual(pyfunc(), cfunc()) + + def test_isinstance_invalid_type(self): + pyfunc = isinstance_usecase_invalid_type + cfunc = jit(nopython=True)(pyfunc) + + # valid type + self.assertTrue(cfunc(3.4)) + + # invalid type + msg = 'Cannot infer numba type of python type' + + with self.assertRaises(errors.TypingError) as raises: + cfunc(100) + + self.assertIn(msg, str(raises.exception)) + + def test_isinstance_exceptions(self): + fns = [ + (invalid_isinstance_usecase, + 'Cannot infer numba type of python type'), + (invalid_isinstance_usecase_phi_nopropagate, + ('isinstance() cannot determine the type of variable "z" due to a ' + 'branch.')), + (invalid_isinstance_optional_usecase, + ('isinstance() cannot determine the type of variable "z" due to a ' + 'branch.')), + (invalid_isinstance_unsupported_type_usecase(), + ('isinstance() does not support variables of type "ntpl(')), + ] + + for fn, msg in fns: + fn = njit(fn) + + with self.assertRaises(errors.TypingError) as raises: + fn(100) + + self.assertIn(msg, str(raises.exception)) + + def test_combinations(self): + # Combinatorically test common classes and instances + def gen_w_arg(clazz_type): + def impl(x): + return isinstance(x, clazz_type) + return impl + + clazz_types = (int, float, complex, str, list, tuple, bytes, set, range, + np.int8, np.float32,) + instances = (1, 2.3, 4j, '5', [6,], (7,), b'8', {9,}, None, + (10, 11, 12), (13, 'a', 14j), np.array([15, 16, 17]), + np.int8(18), np.float32(19), + typed.Dict.empty(types.unicode_type, types.float64), + typed.List.empty_list(types.complex128), np.ones(4)) + + for ct in clazz_types: + fn = njit(gen_w_arg(ct)) + for x in instances: + expected = fn.py_func(x) + got = fn(x) + self.assertEqual(got, expected) + + def test_numba_types(self): + # Check types which are Numba types, this would break without the jit + # decorator in all cases except numba.typed containers. + def gen_w_arg(clazz_type): + def impl(): + return isinstance(1, clazz_type) + return impl + + clazz_types = (types.Integer, types.Float, types.Array,) + + msg = "Numba type classes.*are not supported" + for ct in clazz_types: + fn = njit(gen_w_arg(ct)) + with self.assertRaises(errors.TypingError) as raises: + fn() + self.assertRegex(str(raises.exception), msg) + + def test_python_numpy_scalar_alias_problem(self): + # There's a problem due to Python and NumPy scalars being aliased in the + # type system. This is because e.g. int scalar values and NumPy np.intp + # type alias to types.intp. This test merely records this fact. + + @njit + def foo(): + return isinstance(np.intp(10), int) + + self.assertEqual(foo(), True) + self.assertEqual(foo.py_func(), False) + + @njit + def bar(): + return isinstance(1, np.intp) + + self.assertEqual(bar(), True) + self.assertEqual(bar.py_func(), False) + + def test_branch_prune(self): + # Check that isinstance branches are pruned allowing otherwise + # impossible type specific specialisation. + + @njit + def foo(x): + if isinstance(x, str): + return x + 'some_string' + elif isinstance(x, complex): + return np.imag(x) + elif isinstance(x, tuple): + return len(x) + else: + assert 0 + + for x in ('string', 1 + 2j, ('a', 3, 4j)): + expected = foo.py_func(x) + got = foo(x) + self.assertEqual(got, expected) + + def test_branch_prune_and_bind_to_sig(self): + # see issue 9795 + @register_jitable + def f(x, y): + return x + y + + @njit + def call_f(x): + if isinstance(x, tuple): + return f(*x) + else: + return f(x) + + # The issue is that without isinstance and branch pruning working + # correctly, an attempt will be made to bind the function `f` with + # argument `x`. If `x` is a Tuple type, this will fail on the `else` + # branch as `f` takes two arguments opposed to one. + x = (1, 2) + self.assertEqual(call_f(x), call_f.py_func(x)) + + + # This should raise as partial type inference and branch pruning will + # remove the `f(*x)` branch and just leave `f(x)`, which then won't + # bind because `f` takes two arguments and only one is supplied. + with self.assertRaises(errors.TypingError) as raises: + call_f(1) + + msg = str(raises.exception) + self.assertIn("Cannot bind", msg) + self.assertIn("TypeError: missing a required argument: 'y'", msg) + + def test_branch_prune_non_tuples_as_star_arg(self): + + # see issue 9795 + @register_jitable + def f(x, y): + return x + y + + @register_jitable + def g(x): + return x + + @njit + def call_f(x): + if isinstance(x, tuple): + return f(*x) + else: + return g(x) + + # The issue is that without isinstance and branch pruning working + # correctly, an attempt will be made to bind the function `f` with + # argument `x`. If `x` is a non-tuple type `*x` will not bind to the + # signature of `f`. + x = 1 + self.assertEqual(call_f(x), call_f.py_func(x)) + + def test_branch_prune_literal_as_star_arg(self): + + # see issue 9795 + @register_jitable + def f(x, y): + return x + y + + @register_jitable + def g(x): + return x + + one = 1 + @njit + def call_f(): + x = one + if isinstance(x, tuple): + return f(*x) + else: + return g(x) + + # The issue is that without isinstance and branch pruning working + # correctly, an attempt will be made to bind the function `f` with + # argument `x`. If `x` is a non-tuple const value type `*x` will not + # bind to the signature of `f`. + self.assertEqual(call_f(), call_f.py_func()) + + +class TestGetattrBuiltin(MemoryLeakMixin, TestCase): + # Tests the getattr() builtin + + def test_getattr_func_retty(self): + + @njit + def foo(x): + attr = getattr(x, '__hash__') + return attr() + + for x in (1, 2.34, (5, 6, 7)): + self.assertPreciseEqual(foo(x), foo.py_func(x)) + + def test_getattr_value_retty(self): + + @njit + def foo(x): + return getattr(x, 'ndim') + + for x in range(3): + tmp = np.empty((1, ) * x) + self.assertPreciseEqual(foo(tmp), foo.py_func(tmp)) + + def test_getattr_module_obj(self): + # Consts on modules work ok + + @njit + def foo(): + return getattr(np, 'pi') + + self.assertPreciseEqual(foo(), foo.py_func()) + + def test_getattr_module_obj_not_implemented(self): + # Functions on modules do not work at present + + @njit + def foo(): + return getattr(np, 'cos')(1) + + with self.assertRaises(errors.TypingError) as raises: + foo() + + msg = "Returning function objects is not implemented" + self.assertIn(msg, str(raises.exception)) + + def test_getattr_raises_attribute_error(self): + + invalid_attr = '__not_a_valid_attr__' + + @njit + def foo(x): + return getattr(x, invalid_attr) + + with self.assertRaises(AttributeError) as raises: + foo(1.23) + + self.assertIn(f"'float64' has no attribute '{invalid_attr}'", + str(raises.exception)) + + def test_getattr_with_default(self): + # Checks returning a default works + + @njit + def foo(x, default): + return getattr(x, '__not_a_valid_attr__', default) + + for x, y in zip((1, 2.34, (5, 6, 7),), (None, 20, 'some_string')): + self.assertPreciseEqual(foo(x, y), foo.py_func(x, y)) + + def test_getattr_non_literal_str(self): + + @njit + def foo(x, nonliteral_str): + return getattr(x, nonliteral_str) + + with self.assertRaises(errors.TypingError) as raises: + foo(1, '__hash__') + + msg = "argument 'name' must be a literal string" + self.assertIn(msg, str(raises.exception)) + + def test_getattr_no_optional_type_generated(self): + + @njit + def default_hash(): + return 12345 + + @njit + def foo(): + hash_func = getattr(np.ones(1), "__not_a_valid_attr__", + default_hash) + return hash_func() # Optionals have no call support + + self.assertPreciseEqual(foo(), foo.py_func()) + + +class TestHasattrBuiltin(MemoryLeakMixin, TestCase): + # Tests the hasattr() builtin + + def test_hasattr(self): + + @njit + def foo(x): + return hasattr(x, '__hash__'), hasattr(x, '__not_a_valid_attr__') + + ty = types.int64 + for x in (1, 2.34, (5, 6, 7), typed.Dict.empty(ty, ty), + typed.List.empty_list(ty), np.ones(4), 'ABC'): + self.assertPreciseEqual(foo(x), foo.py_func(x)) + + def test_hasattr_non_const_attr(self): + # This tests that an error is raised in the case that a hasattr() call + # is made on an attribute that cannot be resolved as a compile time + # constant (there's a phi in the way!). + + @njit + def foo(pred): + if pred > 3: + attr = "__hash__" + else: + attr = "__str__" + + hasattr(1, attr) + + with self.assertRaises(errors.NumbaTypeError) as raises: + foo(6) + + msg = ('hasattr() cannot determine the type of variable ' + '"attr" due to a branch.') + self.assertIn(msg, str(raises.exception)) + + +class TestStrAndReprBuiltin(MemoryLeakMixin, TestCase): + + def test_str_default(self): + + @njit + def foo(): + return str() + + self.assertEqual(foo(), foo.py_func()) + + def test_str_object_kwarg(self): + + @njit + def foo(x): + return str(object=x) + + value = "a string" + self.assertEqual(foo(value), foo.py_func(value)) + + def test_str_calls_dunder_str(self): + + @njit + def foo(x): + return str(x) + + Dummy, DummyType = self.make_dummy_type() + dummy = Dummy() + string_repr = "this is the dummy object str" + Dummy.__str__= lambda inst: string_repr + + @overload_method(DummyType, "__str__") + def ol_dummy_string(dummy): + def impl(dummy): + return string_repr + return impl + + @overload_method(DummyType, "__repr__") + def ol_dummy_repr(dummy): + def impl(dummy): + return "SHOULD NOT BE CALLED" + return impl + + self.assertEqual(foo(dummy), foo.py_func(dummy)) + + def test_str_falls_back_to_repr(self): + + @njit + def foo(x): + return str(x) + + Dummy, DummyType = self.make_dummy_type() + dummy = Dummy() + string_repr = "this is the dummy object repr" + Dummy.__repr__= lambda inst: string_repr + + @overload_method(DummyType, "__repr__") + def ol_dummy_repr(dummy): + def impl(dummy): + return string_repr + return impl + + self.assertEqual(foo(dummy), foo.py_func(dummy)) + + def test_repr(self): + @njit + def foo(x): + return repr(x), x + + for x in ("abc", False, 123): + self.assertEqual(foo(x), foo.py_func(x)) + + def test_repr_fallback(self): + # checks str/repr fallback, there's no overloaded __str__ or __repr__ + # for the dummy type so it has to use generic '' + # string for the `repr` call. + + Dummy, DummyType = self.make_dummy_type() + dummy = Dummy() + string_repr = f"" + Dummy.__repr__= lambda inst: string_repr + + @box(DummyType) + def box_dummy(typ, obj, c): + clazobj = c.pyapi.unserialize(c.pyapi.serialize_object(Dummy)) + return c.pyapi.call_function_objargs(clazobj, ()) + + @njit + def foo(x): + return str(x) + + self.assertEqual(foo(dummy), foo.py_func(dummy)) + + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_byteflow.py b/venv/lib/python3.10/site-packages/numba/tests/test_byteflow.py new file mode 100644 index 0000000000000000000000000000000000000000..041e1d65b20f7fcdc49857903748727cecc29a16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_byteflow.py @@ -0,0 +1,94 @@ +""" +Test byteflow.py specific issues +""" +import unittest + +from numba.tests.support import TestCase +from numba.core.compiler import run_frontend + + +class TestByteFlowIssues(TestCase): + def test_issue_5087(self): + # This is an odd issue. The exact number of print below is + # necessary to trigger it. Too many or too few will alter the behavior. + # Also note that the function below will not be executed. The problem + # occurs at compilation. The definition below is invalid for execution. + # The problem occurs in the bytecode analysis. + def udt(): + print + print + print + + for i in range: + print + print + print + print + print + print + print + print + print + print + print + print + print + print + print + print + print + print + + for j in range: + print + print + print + print + print + print + print + for k in range: + for l in range: + print + + print + print + print + print + print + print + print + print + print + if print: + for n in range: + print + else: + print + + run_frontend(udt) + + def test_issue_5097(self): + # Inspired by https://github.com/numba/numba/issues/5097 + def udt(): + for i in range(0): + if i > 0: + pass + a = None # noqa: F841 + + run_frontend(udt) + + def test_issue_5680(self): + # From https://github.com/numba/numba/issues/5680#issuecomment-625351336 + def udt(): + for k in range(0): + if 1 == 1: + ... + if 'a' == 'a': + ... + + run_frontend(udt) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_caching.py b/venv/lib/python3.10/site-packages/numba/tests/test_caching.py new file mode 100644 index 0000000000000000000000000000000000000000..57c8a4dcc591fe2ae58f9e5ae54d58f845d3ec19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_caching.py @@ -0,0 +1,1166 @@ +import importlib +import inspect +import multiprocessing +import os +import shutil +import stat +import subprocess +import sys +import traceback +import unittest +import warnings +import zipfile +from pathlib import Path + +import llvmlite.binding as ll +import numpy as np + +from numba import njit +from numba.core import codegen +from numba.core.caching import _UserWideCacheLocator, _ZipCacheLocator +from numba.core.errors import NumbaWarning +from numba.parfors import parfor +from numba.tests.support import ( + SerialMixin, + TestCase, + capture_cache_log, + import_dynamic, + override_config, + run_in_new_process_caching, + skip_if_typeguard, + skip_parfors_unsupported, + temp_directory, +) + +try: + import ipykernel +except ImportError: + ipykernel = None + + +def check_access_is_preventable(): + # This exists to check whether it is possible to prevent access to + # a file/directory through the use of `chmod 500`. If a user has + # elevated rights (e.g. root) then writes are likely to be possible + # anyway. Tests that require functioning access prevention are + # therefore skipped based on the result of this check. + tempdir = temp_directory('test_cache') + test_dir = (os.path.join(tempdir, 'writable_test')) + os.mkdir(test_dir) + # check a write is possible + with open(os.path.join(test_dir, 'write_ok'), 'wt') as f: + f.write('check1') + # now forbid access + os.chmod(test_dir, 0o500) + try: + with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f: + f.write('check2') + # access prevention is not possible + return False + except PermissionError: + # Check that the cause of the exception is due to access/permission + # as per + # https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501 + # errno reports access/perm fail so access prevention via + # `chmod 500` works for this user. + return True + finally: + os.chmod(test_dir, 0o775) + shutil.rmtree(test_dir) + + +_access_preventable = check_access_is_preventable() +_access_msg = "Cannot create a directory to which writes are preventable" +skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg) + + +def constant_unicode_cache(): + c = "abcd" + return hash(c), c + + +def check_constant_unicode_cache(): + pyfunc = constant_unicode_cache + cfunc = njit(cache=True)(pyfunc) + exp_hv, exp_str = pyfunc() + got_hv, got_str = cfunc() + assert exp_hv == got_hv + assert exp_str == got_str + + +def dict_cache(): + return {'a': 1, 'b': 2} + + +def check_dict_cache(): + pyfunc = dict_cache + cfunc = njit(cache=True)(pyfunc) + exp = pyfunc() + got = cfunc() + assert exp == got + + +def generator_cache(): + for v in (1, 2, 3): + yield v + + +def check_generator_cache(): + pyfunc = generator_cache + cfunc = njit(cache=True)(pyfunc) + exp = list(pyfunc()) + got = list(cfunc()) + assert exp == got + + +class TestCaching(SerialMixin, TestCase): + def run_test(self, func): + func() + res = run_in_new_process_caching(func) + self.assertEqual(res['exitcode'], 0) + + def test_constant_unicode_cache(self): + self.run_test(check_constant_unicode_cache) + + def test_dict_cache(self): + self.run_test(check_dict_cache) + + def test_generator_cache(self): + self.run_test(check_generator_cache) + + def test_omitted(self): + + # Test in a new directory + cache_dir = temp_directory(self.__class__.__name__) + ctx = multiprocessing.get_context() + result_queue = ctx.Queue() + proc = ctx.Process( + target=omitted_child_test_wrapper, + args=(result_queue, cache_dir, False), + ) + proc.start() + proc.join() + success, output = result_queue.get() + + # Ensure the child process is completed before checking its output + if not success: + self.fail(output) + + self.assertEqual( + output, + 1000, + "Omitted function returned an incorrect output" + ) + + proc = ctx.Process( + target=omitted_child_test_wrapper, + args=(result_queue, cache_dir, True) + ) + proc.start() + proc.join() + success, output = result_queue.get() + + # Ensure the child process is completed before checking its output + if not success: + self.fail(output) + + self.assertEqual( + output, + 1000, + "Omitted function returned an incorrect output" + ) + + +def omitted_child_test_wrapper(result_queue, cache_dir, second_call): + with override_config("CACHE_DIR", cache_dir): + @njit(cache=True) + def test(num=1000): + return num + + try: + output = test() + # If we have a second call, we should have a cache hit. + # Otherwise, we expect a cache miss. + if second_call: + assert test._cache_hits[test.signatures[0]] == 1, \ + "Cache did not hit as expected" + assert test._cache_misses[test.signatures[0]] == 0, \ + "Cache has an unexpected miss" + else: + assert test._cache_misses[test.signatures[0]] == 1, \ + "Cache did not miss as expected" + assert test._cache_hits[test.signatures[0]] == 0, \ + "Cache has an unexpected hit" + success = True + # Catch anything raised so it can be propagated + except: # noqa: E722 + output = traceback.format_exc() + success = False + result_queue.put((success, output)) + + +class BaseCacheTest(TestCase): + # The source file that will be copied + usecases_file = None + # Make sure this doesn't conflict with another module + modname = None + + def setUp(self): + self.tempdir = temp_directory('test_cache') + sys.path.insert(0, self.tempdir) + self.modfile = os.path.join(self.tempdir, self.modname + ".py") + self.cache_dir = os.path.join(self.tempdir, "__pycache__") + shutil.copy(self.usecases_file, self.modfile) + os.chmod(self.modfile, stat.S_IREAD | stat.S_IWRITE) + self.maxDiff = None + + def tearDown(self): + sys.modules.pop(self.modname, None) + sys.path.remove(self.tempdir) + + def import_module(self): + # Import a fresh version of the test module. All jitted functions + # in the test module will start anew and load overloads from + # the on-disk cache if possible. + old = sys.modules.pop(self.modname, None) + if old is not None: + # Make sure cached bytecode is removed + cached = [old.__cached__] + for fn in cached: + try: + os.unlink(fn) + except FileNotFoundError: + pass + mod = import_dynamic(self.modname) + self.assertEqual(mod.__file__.rstrip('co'), self.modfile) + return mod + + def cache_contents(self): + try: + return [fn for fn in os.listdir(self.cache_dir) + if not fn.endswith(('.pyc', ".pyo"))] + except FileNotFoundError: + return [] + + def get_cache_mtimes(self): + return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn))) + for fn in sorted(self.cache_contents())) + + def check_pycache(self, n): + c = self.cache_contents() + self.assertEqual(len(c), n, c) + + def dummy_test(self): + pass + + +class DispatcherCacheUsecasesTest(BaseCacheTest): + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "cache_usecases.py") + modname = "dispatcher_caching_test_fodder" + + def run_in_separate_process(self, *, envvars={}): + # Cached functions can be run from a distinct process. + # Also stresses issue #1603: uncached function calling cached function + # shouldn't fail compiling. + code = """if 1: + import sys + + sys.path.insert(0, %(tempdir)r) + mod = __import__(%(modname)r) + mod.self_test() + """ % dict(tempdir=self.tempdir, modname=self.modname) + + subp_env = os.environ.copy() + subp_env.update(envvars) + popen = subprocess.Popen([sys.executable, "-c", code], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=subp_env) + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError( + "process failed with code %s: \n" + "stdout follows\n%s\n" + "stderr follows\n%s\n" + % (popen.returncode, out.decode(), err.decode()), + ) + + def check_hits(self, func, hits, misses=None): + st = func.stats + self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits) + if misses is not None: + self.assertEqual(sum(st.cache_misses.values()), misses, + st.cache_misses) + + +class TestCache(DispatcherCacheUsecasesTest): + + def test_caching(self): + self.check_pycache(0) + mod = self.import_module() + self.check_pycache(0) + + f = mod.add_usecase + self.assertPreciseEqual(f(2, 3), 6) + self.check_pycache(2) # 1 index, 1 data + self.assertPreciseEqual(f(2.5, 3), 6.5) + self.check_pycache(3) # 1 index, 2 data + self.check_hits(f, 0, 2) + + f = mod.add_objmode_usecase + self.assertPreciseEqual(f(2, 3), 6) + self.check_pycache(5) # 2 index, 3 data + self.assertPreciseEqual(f(2.5, 3), 6.5) + self.check_pycache(6) # 2 index, 4 data + self.check_hits(f, 0, 2) + + f = mod.record_return + rec = f(mod.aligned_arr, 1) + self.assertPreciseEqual(tuple(rec), (2, 43.5)) + rec = f(mod.packed_arr, 1) + self.assertPreciseEqual(tuple(rec), (2, 43.5)) + self.check_pycache(9) # 3 index, 6 data + self.check_hits(f, 0, 2) + + # Check the code runs ok from another process + self.run_in_separate_process() + + def test_caching_nrt_pruned(self): + self.check_pycache(0) + mod = self.import_module() + self.check_pycache(0) + + f = mod.add_usecase + self.assertPreciseEqual(f(2, 3), 6) + self.check_pycache(2) # 1 index, 1 data + # NRT pruning may affect cache + self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1) + self.check_pycache(3) # 1 index, 2 data + self.check_hits(f, 0, 2) + + def test_inner_then_outer(self): + # Caching inner then outer function is ok + mod = self.import_module() + self.assertPreciseEqual(mod.inner(3, 2), 6) + self.check_pycache(2) # 1 index, 1 data + # Uncached outer function shouldn't fail (issue #1603) + f = mod.outer_uncached + self.assertPreciseEqual(f(3, 2), 2) + self.check_pycache(2) # 1 index, 1 data + mod = self.import_module() + f = mod.outer_uncached + self.assertPreciseEqual(f(3, 2), 2) + self.check_pycache(2) # 1 index, 1 data + # Cached outer will create new cache entries + f = mod.outer + self.assertPreciseEqual(f(3, 2), 2) + self.check_pycache(4) # 2 index, 2 data + self.assertPreciseEqual(f(3.5, 2), 2.5) + self.check_pycache(6) # 2 index, 4 data + + def test_outer_then_inner(self): + # Caching outer then inner function is ok + mod = self.import_module() + self.assertPreciseEqual(mod.outer(3, 2), 2) + self.check_pycache(4) # 2 index, 2 data + self.assertPreciseEqual(mod.outer_uncached(3, 2), 2) + self.check_pycache(4) # same + mod = self.import_module() + f = mod.inner + self.assertPreciseEqual(f(3, 2), 6) + self.check_pycache(4) # same + self.assertPreciseEqual(f(3.5, 2), 6.5) + self.check_pycache(5) # 2 index, 3 data + + def test_no_caching(self): + mod = self.import_module() + + f = mod.add_nocache_usecase + self.assertPreciseEqual(f(2, 3), 6) + self.check_pycache(0) + + def test_looplifted(self): + # Loop-lifted functions can't be cached and raise a warning + mod = self.import_module() + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaWarning) + + f = mod.looplifted + self.assertPreciseEqual(f(4), 6) + self.check_pycache(0) + + self.assertEqual(len(w), 1) + self.assertIn('Cannot cache compiled function "looplifted" ' + 'as it uses lifted code', str(w[0].message)) + + def test_big_array(self): + # Code references big array globals cannot be cached + mod = self.import_module() + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaWarning) + + f = mod.use_big_array + np.testing.assert_equal(f(), mod.biggie) + self.check_pycache(0) + + self.assertEqual(len(w), 1) + self.assertIn('Cannot cache compiled function "use_big_array" ' + 'as it uses dynamic globals', str(w[0].message)) + + def test_ctypes(self): + # Functions using a ctypes pointer can't be cached and raise + # a warning. + mod = self.import_module() + + for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaWarning) + + self.assertPreciseEqual(f(0.0), 0.0) + self.check_pycache(0) + + self.assertEqual(len(w), 1) + self.assertIn( + 'Cannot cache compiled function "{}"'.format(f.__name__), + str(w[0].message), + ) + + def test_closure(self): + mod = self.import_module() + + with warnings.catch_warnings(): + warnings.simplefilter('error', NumbaWarning) + + f = mod.closure1 + self.assertPreciseEqual(f(3), 6) # 3 + 3 = 6 + f = mod.closure2 + self.assertPreciseEqual(f(3), 8) # 3 + 5 = 8 + f = mod.closure3 + self.assertPreciseEqual(f(3), 10) # 3 + 7 = 10 + f = mod.closure4 + self.assertPreciseEqual(f(3), 12) # 3 + 9 = 12 + self.check_pycache(5) # 1 nbi, 4 nbc + + def test_first_class_function(self): + mod = self.import_module() + f = mod.first_class_function_usecase + self.assertEqual(f(mod.first_class_function_mul, 1), 1) + self.assertEqual(f(mod.first_class_function_mul, 10), 100) + self.assertEqual(f(mod.first_class_function_add, 1), 2) + self.assertEqual(f(mod.first_class_function_add, 10), 20) + # 1 + 1 + 1 nbi, 1 + 1 + 2 nbc - a separate cache for each call to `f` + # with a different callback. + self.check_pycache(7) + + def test_cache_reuse(self): + mod = self.import_module() + mod.add_usecase(2, 3) + mod.add_usecase(2.5, 3.5) + mod.add_objmode_usecase(2, 3) + mod.outer_uncached(2, 3) + mod.outer(2, 3) + mod.record_return(mod.packed_arr, 0) + mod.record_return(mod.aligned_arr, 1) + mtimes = self.get_cache_mtimes() + # Two signatures compiled + self.check_hits(mod.add_usecase, 0, 2) + + mod2 = self.import_module() + self.assertIsNot(mod, mod2) + f = mod2.add_usecase + f(2, 3) + self.check_hits(f, 1, 0) + f(2.5, 3.5) + self.check_hits(f, 2, 0) + f = mod2.add_objmode_usecase + f(2, 3) + self.check_hits(f, 1, 0) + + # The files haven't changed + self.assertEqual(self.get_cache_mtimes(), mtimes) + + self.run_in_separate_process() + self.assertEqual(self.get_cache_mtimes(), mtimes) + + def test_cache_invalidate(self): + mod = self.import_module() + f = mod.add_usecase + self.assertPreciseEqual(f(2, 3), 6) + + # This should change the functions' results + with open(self.modfile, "a") as f: + f.write("\nZ = 10\n") + + mod = self.import_module() + f = mod.add_usecase + self.assertPreciseEqual(f(2, 3), 15) + f = mod.add_objmode_usecase + self.assertPreciseEqual(f(2, 3), 15) + + def test_recompile(self): + # Explicit call to recompile() should overwrite the cache + mod = self.import_module() + f = mod.add_usecase + self.assertPreciseEqual(f(2, 3), 6) + + mod = self.import_module() + f = mod.add_usecase + mod.Z = 10 + self.assertPreciseEqual(f(2, 3), 6) + f.recompile() + self.assertPreciseEqual(f(2, 3), 15) + + # Freshly recompiled version is re-used from other imports + mod = self.import_module() + f = mod.add_usecase + self.assertPreciseEqual(f(2, 3), 15) + + def test_same_names(self): + # Function with the same names should still disambiguate + mod = self.import_module() + f = mod.renamed_function1 + self.assertPreciseEqual(f(2), 4) + f = mod.renamed_function2 + self.assertPreciseEqual(f(2), 8) + + def test_frozen(self): + from .dummy_module import function + old_code = function.__code__ + code_obj = compile('pass', 'tests/dummy_module.py', 'exec') + try: + function.__code__ = code_obj + + source = inspect.getfile(function) + # doesn't return anything, since it cannot find the module + # fails unless the executable is frozen + locator = _UserWideCacheLocator.from_function(function, source) + self.assertIsNone(locator) + + sys.frozen = True + # returns a cache locator object, only works when the executable + # is frozen + locator = _UserWideCacheLocator.from_function(function, source) + self.assertIsInstance(locator, _UserWideCacheLocator) + + finally: + function.__code__ = old_code + del sys.frozen + + def _test_pycache_fallback(self): + """ + With a disabled __pycache__, test there is a working fallback + (e.g. on the user-wide cache dir) + """ + mod = self.import_module() + f = mod.add_usecase + # Remove this function's cache files at the end, to avoid accumulation + # across test calls. + self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True) + + self.assertPreciseEqual(f(2, 3), 6) + # It's a cache miss since the file was copied to a new temp location + self.check_hits(f, 0, 1) + + # Test re-use + mod2 = self.import_module() + f = mod2.add_usecase + self.assertPreciseEqual(f(2, 3), 6) + self.check_hits(f, 1, 0) + + # The __pycache__ is empty (otherwise the test's preconditions + # wouldn't be met) + self.check_pycache(0) + + @skip_bad_access + @unittest.skipIf(os.name == "nt", + "cannot easily make a directory read-only on Windows") + def test_non_creatable_pycache(self): + # Make it impossible to create the __pycache__ directory + old_perms = os.stat(self.tempdir).st_mode + os.chmod(self.tempdir, 0o500) + self.addCleanup(os.chmod, self.tempdir, old_perms) + + self._test_pycache_fallback() + + @skip_bad_access + @unittest.skipIf(os.name == "nt", + "cannot easily make a directory read-only on Windows") + def test_non_writable_pycache(self): + # Make it impossible to write to the __pycache__ directory + pycache = os.path.join(self.tempdir, '__pycache__') + os.mkdir(pycache) + old_perms = os.stat(pycache).st_mode + os.chmod(pycache, 0o500) + self.addCleanup(os.chmod, pycache, old_perms) + + self._test_pycache_fallback() + + def test_ipython(self): + # Test caching in an IPython session + base_cmd = [sys.executable, '-m', 'IPython'] + base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor'] + try: + ver = subprocess.check_output(base_cmd + ['--version']) + except subprocess.CalledProcessError as e: + self.skipTest("ipython not available: return code %d" + % e.returncode) + ver = ver.strip().decode() + # Create test input + inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt") + with open(inputfn, "w") as f: + f.write(r""" + import os + import sys + + from numba import jit + + # IPython 5 does not support multiline input if stdin isn't + # a tty (https://github.com/ipython/ipython/issues/9752) + f = jit(cache=True)(lambda: 42) + + res = f() + # IPython writes on stdout, so use stderr instead + sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()]) + + # IPython hijacks sys.exit(), bypass it + sys.stdout.flush() + sys.stderr.flush() + os._exit(res) + """) + + def execute_with_input(): + # Feed the test input as stdin, to execute it in REPL context + with open(inputfn, "rb") as stdin: + p = subprocess.Popen(base_cmd, stdin=stdin, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + out, err = p.communicate() + if p.returncode != 42: + self.fail("unexpected return code %d\n" + "-- stdout:\n%s\n" + "-- stderr:\n%s\n" + % (p.returncode, out, err)) + return err + + execute_with_input() + # Run a second time and check caching + err = execute_with_input() + self.assertIn("cache hits = 1", err.strip()) + + @unittest.skipIf((ipykernel is None) or (ipykernel.version_info[0] < 6), + "requires ipykernel >= 6") + def test_ipykernel(self): + # Test caching in an IPython session using ipykernel + + base_cmd = [sys.executable, '-m', 'IPython'] + base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor'] + try: + ver = subprocess.check_output(base_cmd + ['--version']) + except subprocess.CalledProcessError as e: + self.skipTest("ipython not available: return code %d" + % e.returncode) + ver = ver.strip().decode() + # Create test input + from ipykernel import compiler + inputfn = compiler.get_tmp_directory() + with open(inputfn, "w") as f: + f.write(r""" + import os + import sys + + from numba import jit + + # IPython 5 does not support multiline input if stdin isn't + # a tty (https://github.com/ipython/ipython/issues/9752) + f = jit(cache=True)(lambda: 42) + + res = f() + # IPython writes on stdout, so use stderr instead + sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()]) + + # IPython hijacks sys.exit(), bypass it + sys.stdout.flush() + sys.stderr.flush() + os._exit(res) + """) + + def execute_with_input(): + # Feed the test input as stdin, to execute it in REPL context + with open(inputfn, "rb") as stdin: + p = subprocess.Popen(base_cmd, stdin=stdin, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + out, err = p.communicate() + if p.returncode != 42: + self.fail("unexpected return code %d\n" + "-- stdout:\n%s\n" + "-- stderr:\n%s\n" + % (p.returncode, out, err)) + return err + + execute_with_input() + # Run a second time and check caching + err = execute_with_input() + self.assertIn("cache hits = 1", err.strip()) + + +class TestCacheZip(DispatcherCacheUsecasesTest): + + def setUp(self): + super().setUp() + + # Create a simple Python module to be zipped + mod_content = """ +from numba import jit + +@jit(cache=True) +def add(x, y): + return x + y +""" + mod_filename = "test_module.py" + zip_filename = "test_archive.zip" + + # Create a zip file containing the module + zip_path = os.path.join(self.tempdir, zip_filename) + with zipfile.ZipFile(zip_path, "w") as zf: + zf.writestr(mod_filename, mod_content) + + # Add the zip file to sys.path + sys.path.insert(0, zip_path) + self.modname = "test_module" + + def tearDown(self): + # Clean up: remove the zip file from sys.path + sys.path.pop(0) + # Remove the module from sys.modules to clean up + sys.modules.pop("test_module", None) + + def test_zip_caching(self): + # (note that `self.import_module()` fails because its checks are + # incompatible + # with the zip file, so we just use normal imports here) + + # First import and call + import test_module # type: ignore + + result1 = test_module.add(2, 3) + self.assertEqual(result1, 5) + self.check_hits(test_module.add, 0, 1) + + # Record the initial cache hits + self.check_hits(test_module.add, 0) + + # Remove the module and reimport + del sys.modules["test_module"] + importlib.invalidate_caches() + import test_module # type: ignore + + # Second call: should use the cache + result2 = test_module.add(2, 3) + self.assertEqual(result2, 5) + + # Check if the cache was hit + self.check_hits(test_module.add, 1) + + +class TestCacheZipLib(DispatcherCacheUsecasesTest): + """ + ZipCache tests that don't require the setup/teardown from `TestCacheZip` + """ + def test_zip_locator_creation(self): + + def mock_func(): + pass + + zip_path = "/path/to/archive.zip/module.py" + + locator = _ZipCacheLocator.from_function(mock_func, zip_path) + self.assertIsNotNone(locator) + self.assertEqual(locator._zip_path, str(Path("/path/to/archive.zip"))) + self.assertEqual(locator._internal_path, "module.py") + + def test_zip_locator_non_zip_path(self): + + def mock_func(): + pass + + non_zip_path = "/path/to/module.py" + + locator = _ZipCacheLocator.from_function(mock_func, non_zip_path) + self.assertIsNone(locator) + + +@skip_parfors_unsupported +class TestSequentialParForsCache(DispatcherCacheUsecasesTest): + def setUp(self): + super(TestSequentialParForsCache, self).setUp() + # Turn on sequential parfor lowering + parfor.sequential_parfor_lowering = True + + def tearDown(self): + super(TestSequentialParForsCache, self).tearDown() + # Turn off sequential parfor lowering + parfor.sequential_parfor_lowering = False + + def test_caching(self): + mod = self.import_module() + self.check_pycache(0) + f = mod.parfor_usecase + ary = np.ones(10) + self.assertPreciseEqual(f(ary), ary * ary + ary) + dynamic_globals = [cres.library.has_dynamic_globals + for cres in f.overloads.values()] + self.assertEqual(dynamic_globals, [False]) + self.check_pycache(2) # 1 index, 1 data + + +class TestCacheWithCpuSetting(DispatcherCacheUsecasesTest): + # Disable parallel testing due to envvars modification + _numba_parallel_test_ = False + + def check_later_mtimes(self, mtimes_old): + match_count = 0 + for k, v in self.get_cache_mtimes().items(): + if k in mtimes_old: + self.assertGreaterEqual(v, mtimes_old[k]) + match_count += 1 + self.assertGreater(match_count, 0, + msg='nothing to compare') + + def test_user_set_cpu_name(self): + self.check_pycache(0) + mod = self.import_module() + mod.self_test() + cache_size = len(self.cache_contents()) + + mtimes = self.get_cache_mtimes() + # Change CPU name to generic + self.run_in_separate_process(envvars={'NUMBA_CPU_NAME': 'generic'}) + + self.check_later_mtimes(mtimes) + self.assertGreater(len(self.cache_contents()), cache_size) + # Check cache index + cache = mod.add_usecase._cache + cache_file = cache._cache_file + cache_index = cache_file._load_index() + self.assertEqual(len(cache_index), 2) + [key_a, key_b] = cache_index.keys() + if key_a[1][1] == ll.get_host_cpu_name(): + key_host, key_generic = key_a, key_b + else: + key_host, key_generic = key_b, key_a + self.assertEqual(key_host[1][1], ll.get_host_cpu_name()) + self.assertEqual(key_host[1][2], codegen.get_host_cpu_features()) + self.assertEqual(key_generic[1][1], 'generic') + self.assertEqual(key_generic[1][2], '') + + def test_user_set_cpu_features(self): + self.check_pycache(0) + mod = self.import_module() + mod.self_test() + cache_size = len(self.cache_contents()) + + mtimes = self.get_cache_mtimes() + # Change CPU feature + my_cpu_features = '-sse;-avx' + + system_features = codegen.get_host_cpu_features() + + self.assertNotEqual(system_features, my_cpu_features) + self.run_in_separate_process( + envvars={'NUMBA_CPU_FEATURES': my_cpu_features}, + ) + self.check_later_mtimes(mtimes) + self.assertGreater(len(self.cache_contents()), cache_size) + # Check cache index + cache = mod.add_usecase._cache + cache_file = cache._cache_file + cache_index = cache_file._load_index() + self.assertEqual(len(cache_index), 2) + [key_a, key_b] = cache_index.keys() + + if key_a[1][2] == system_features: + key_host, key_generic = key_a, key_b + else: + key_host, key_generic = key_b, key_a + + self.assertEqual(key_host[1][1], ll.get_host_cpu_name()) + self.assertEqual(key_host[1][2], system_features) + self.assertEqual(key_generic[1][1], ll.get_host_cpu_name()) + self.assertEqual(key_generic[1][2], my_cpu_features) + + +class TestMultiprocessCache(BaseCacheTest): + + # Nested multiprocessing.Pool raises AssertionError: + # "daemonic processes are not allowed to have children" + _numba_parallel_test_ = False + + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "cache_usecases.py") + modname = "dispatcher_caching_test_fodder" + + def test_multiprocessing(self): + # Check caching works from multiple processes at once (#2028) + mod = self.import_module() + # Calling a pure Python caller of the JIT-compiled function is + # necessary to reproduce the issue. + f = mod.simple_usecase_caller + n = 3 + try: + ctx = multiprocessing.get_context('spawn') + except AttributeError: + ctx = multiprocessing + pool = ctx.Pool(n) + try: + res = sum(pool.imap(f, range(n))) + finally: + pool.close() + self.assertEqual(res, n * (n - 1) // 2) + + +@skip_if_typeguard +class TestCacheFileCollision(unittest.TestCase): + _numba_parallel_test_ = False + + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "cache_usecases.py") + modname = "caching_file_loc_fodder" + source_text_1 = """ +from numba import njit +@njit(cache=True) +def bar(): + return 123 +""" + source_text_2 = """ +from numba import njit +@njit(cache=True) +def bar(): + return 321 +""" + + def setUp(self): + self.tempdir = temp_directory('test_cache_file_loc') + sys.path.insert(0, self.tempdir) + self.modname = 'module_name_that_is_unlikely' + self.assertNotIn(self.modname, sys.modules) + self.modname_bar1 = self.modname + self.modname_bar2 = '.'.join([self.modname, 'foo']) + foomod = os.path.join(self.tempdir, self.modname) + os.mkdir(foomod) + with open(os.path.join(foomod, '__init__.py'), 'w') as fout: + print(self.source_text_1, file=fout) + with open(os.path.join(foomod, 'foo.py'), 'w') as fout: + print(self.source_text_2, file=fout) + + def tearDown(self): + sys.modules.pop(self.modname_bar1, None) + sys.modules.pop(self.modname_bar2, None) + sys.path.remove(self.tempdir) + + def import_bar1(self): + return import_dynamic(self.modname_bar1).bar + + def import_bar2(self): + return import_dynamic(self.modname_bar2).bar + + def test_file_location(self): + bar1 = self.import_bar1() + bar2 = self.import_bar2() + # Check that the cache file is named correctly + idxname1 = bar1._cache._cache_file._index_name + idxname2 = bar2._cache._cache_file._index_name + self.assertNotEqual(idxname1, idxname2) + self.assertTrue(idxname1.startswith("__init__.bar-3.py")) + self.assertTrue(idxname2.startswith("foo.bar-3.py")) + + @unittest.skipUnless(hasattr(multiprocessing, 'get_context'), + 'Test requires multiprocessing.get_context') + def test_no_collision(self): + bar1 = self.import_bar1() + bar2 = self.import_bar2() + with capture_cache_log() as buf: + res1 = bar1() + cachelog = buf.getvalue() + # bar1 should save new index and data + self.assertEqual(cachelog.count('index saved'), 1) + self.assertEqual(cachelog.count('data saved'), 1) + self.assertEqual(cachelog.count('index loaded'), 0) + self.assertEqual(cachelog.count('data loaded'), 0) + with capture_cache_log() as buf: + res2 = bar2() + cachelog = buf.getvalue() + # bar2 should save new index and data + self.assertEqual(cachelog.count('index saved'), 1) + self.assertEqual(cachelog.count('data saved'), 1) + self.assertEqual(cachelog.count('index loaded'), 0) + self.assertEqual(cachelog.count('data loaded'), 0) + self.assertNotEqual(res1, res2) + + try: + # Make sure we can spawn new process without inheriting + # the parent context. + mp = multiprocessing.get_context('spawn') + except ValueError: + print("missing spawn context") + + q = mp.Queue() + # Start new process that calls `cache_file_collision_tester` + proc = mp.Process(target=cache_file_collision_tester, + args=(q, self.tempdir, + self.modname_bar1, + self.modname_bar2)) + proc.start() + # Get results from the process + log1 = q.get() + got1 = q.get() + log2 = q.get() + got2 = q.get() + proc.join() + + # The remote execution result of bar1() and bar2() should match + # the one executed locally. + self.assertEqual(got1, res1) + self.assertEqual(got2, res2) + + # The remote should have loaded bar1 from cache + self.assertEqual(log1.count('index saved'), 0) + self.assertEqual(log1.count('data saved'), 0) + self.assertEqual(log1.count('index loaded'), 1) + self.assertEqual(log1.count('data loaded'), 1) + + # The remote should have loaded bar2 from cache + self.assertEqual(log2.count('index saved'), 0) + self.assertEqual(log2.count('data saved'), 0) + self.assertEqual(log2.count('index loaded'), 1) + self.assertEqual(log2.count('data loaded'), 1) + + +def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2): + sys.path.insert(0, tempdir) + bar1 = import_dynamic(modname_bar1).bar + bar2 = import_dynamic(modname_bar2).bar + with capture_cache_log() as buf: + r1 = bar1() + q.put(buf.getvalue()) + q.put(r1) + with capture_cache_log() as buf: + r2 = bar2() + q.put(buf.getvalue()) + q.put(r2) + + +class TestCacheMultipleFilesWithSignature(unittest.TestCase): + # Regression test for https://github.com/numba/numba/issues/3658 + + _numba_parallel_test_ = False + + source_text_file1 = """ +from file2 import function2 +""" + source_text_file2 = """ +from numba import njit + +@njit('float64(float64)', cache=True) +def function1(x): + return x + +@njit('float64(float64)', cache=True) +def function2(x): + return x +""" + + def setUp(self): + self.tempdir = temp_directory('test_cache_file_loc') + + self.file1 = os.path.join(self.tempdir, 'file1.py') + with open(self.file1, 'w') as fout: + print(self.source_text_file1, file=fout) + + self.file2 = os.path.join(self.tempdir, 'file2.py') + with open(self.file2, 'w') as fout: + print(self.source_text_file2, file=fout) + + def tearDown(self): + shutil.rmtree(self.tempdir) + + def test_caching_mutliple_files_with_signature(self): + # Execute file1.py + popen = subprocess.Popen([sys.executable, self.file1], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = popen.communicate() + msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}" + self.assertEqual(popen.returncode, 0, msg=msg) + + # Execute file2.py + popen = subprocess.Popen([sys.executable, self.file2], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = popen.communicate() + msg = f"stdout:\n{out.decode()}\n\nstderr:\n{err.decode()}" + self.assertEqual(popen.returncode, 0, msg) + + +class TestCFuncCache(BaseCacheTest): + + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "cfunc_cache_usecases.py") + modname = "cfunc_caching_test_fodder" + + def run_in_separate_process(self): + # Cached functions can be run from a distinct process. + code = """if 1: + import sys + + sys.path.insert(0, %(tempdir)r) + mod = __import__(%(modname)r) + mod.self_test() + + f = mod.add_usecase + assert f.cache_hits == 1 + f = mod.outer + assert f.cache_hits == 1 + f = mod.div_usecase + assert f.cache_hits == 1 + """ % dict(tempdir=self.tempdir, modname=self.modname) + + popen = subprocess.Popen([sys.executable, "-c", code], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError(f"process failed with code {popen.returncode}:" + f"stderr follows\n{err.decode()}\n") + + def check_module(self, mod): + mod.self_test() + + def test_caching(self): + self.check_pycache(0) + mod = self.import_module() + self.check_pycache(6) # 3 index, 3 data + + self.assertEqual(mod.add_usecase.cache_hits, 0) + self.assertEqual(mod.outer.cache_hits, 0) + self.assertEqual(mod.add_nocache_usecase.cache_hits, 0) + self.assertEqual(mod.div_usecase.cache_hits, 0) + self.check_module(mod) + + # Reload module to hit the cache + mod = self.import_module() + self.check_pycache(6) # 3 index, 3 data + + self.assertEqual(mod.add_usecase.cache_hits, 1) + self.assertEqual(mod.outer.cache_hits, 1) + self.assertEqual(mod.add_nocache_usecase.cache_hits, 0) + self.assertEqual(mod.div_usecase.cache_hits, 1) + self.check_module(mod) + + self.run_in_separate_process() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_casting.py b/venv/lib/python3.10/site-packages/numba/tests/test_casting.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1225347e4ada483861317c8d8c3b406b03837d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_casting.py @@ -0,0 +1,137 @@ +import numpy as np +from numba.core.errors import TypingError +from numba import njit +from numba.core import types +import struct +import unittest + + +def float_to_int(x): + return types.int32(x) + + +def int_to_float(x): + return types.float64(x) / 2 + + +def float_to_unsigned(x): + return types.uint32(x) + + +def float_to_complex(x): + return types.complex128(x) + + +def numpy_scalar_cast_error(): + np.int32(np.zeros((4,))) + +class TestCasting(unittest.TestCase): + def test_float_to_int(self): + pyfunc = float_to_int + cfunc = njit((types.float32,))(pyfunc) + + self.assertEqual(cfunc.nopython_signatures[0].return_type, types.int32) + self.assertEqual(cfunc(12.3), pyfunc(12.3)) + self.assertEqual(cfunc(12.3), int(12.3)) + self.assertEqual(cfunc(-12.3), pyfunc(-12.3)) + self.assertEqual(cfunc(-12.3), int(-12.3)) + + def test_int_to_float(self): + pyfunc = int_to_float + cfunc = njit((types.int64,))(pyfunc) + + self.assertEqual(cfunc.nopython_signatures[0].return_type, + types.float64) + self.assertEqual(cfunc(321), pyfunc(321)) + self.assertEqual(cfunc(321), 321. / 2) + + def test_float_to_unsigned(self): + pyfunc = float_to_unsigned + cfunc = njit((types.float32,))(pyfunc) + + self.assertEqual(cfunc.nopython_signatures[0].return_type, types.uint32) + self.assertEqual(cfunc(3.21), pyfunc(3.21)) + self.assertEqual(cfunc(3.21), struct.unpack('I', struct.pack('i', + 3))[0]) + + def test_float_to_complex(self): + pyfunc = float_to_complex + cfunc = njit((types.float64,))(pyfunc) + self.assertEqual(cfunc.nopython_signatures[0].return_type, + types.complex128) + self.assertEqual(cfunc(-3.21), pyfunc(-3.21)) + self.assertEqual(cfunc(-3.21), -3.21 + 0j) + + def test_array_to_array(self): + """Make sure this compiles. + + Cast C to A array + """ + @njit("f8(f8[:])") + def inner(x): + return x[0] + + inner.disable_compile() + + @njit("f8(f8[::1])") + def driver(x): + return inner(x) + + x = np.array([1234], dtype=np.float64) + self.assertEqual(driver(x), x[0]) + self.assertEqual(len(inner.overloads), 1) + + def test_0darrayT_to_T(self): + @njit + def inner(x): + return x.dtype.type(x) + + inputs = [ + (np.bool_, True), + (np.float32, 12.3), + (np.float64, 12.3), + (np.int64, 12), + (np.complex64, 2j+3), + (np.complex128, 2j+3), + (np.timedelta64, np.timedelta64(3, 'h')), + (np.datetime64, np.datetime64('2016-01-01')), + (' 3 else y for y in x] + return z + + def list25(x): + # See issue #6260. Old style inline_closure_call uses get_ir_of_code + # for the closure->IR transform, without SSA there's multiply + # defined labels, the unary negation is self referent and DCE runs + # eliminating the duplicated labels. + included = np.array([1, 2, 6, 8]) + not_included = [i for i in range(10) if i not in list(included)] + return not_included + + # functions to test that are expected to pass + f = [list1, list2, list3, list4, + list6, list7, list8, list9, list10, list11, + list12, list13, list14, list15, + list16, list17, list18, list19, list20, + list21, list22, list23, list24, list25] + + var = [1, 2, 3, 4, 5] + for ref in f: + try: + cfunc = jit(nopython=True)(ref) + self.assertEqual(cfunc(var), ref(var)) + except ValueError: # likely np array returned + try: + np.testing.assert_allclose(cfunc(var), ref(var)) + except Exception: + raise + + # test functions that are expected to fail + with self.assertRaises(TypingError) as raises: + cfunc = jit(nopython=True)(list5) + cfunc(var) + # TODO: we can't really assert the error message for the above + # Also, test_nested_array is a similar case (but without list) that works. + + if sys.maxsize > 2 ** 32: + bits = 64 + else: + bits = 32 + + def test_objmode_inlining(self): + def objmode_func(y): + z = object() + inlined = [x for x in y] + return inlined + + cfunc = jit(forceobj=True)(objmode_func) + t = [1, 2, 3] + expected = objmode_func(t) + got = cfunc(t) + self.assertPreciseEqual(expected, got) + + +class TestArrayComprehension(unittest.TestCase): + + _numba_parallel_test_ = False + + def check(self, pyfunc, *args, **kwargs): + """A generic check function that run both pyfunc, and jitted pyfunc, + and compare results.""" + run_parallel = kwargs.get('run_parallel', False) + assert_allocate_list = kwargs.get('assert_allocate_list', False) + assert_dtype = kwargs.get('assert_dtype', False) + cfunc = jit(nopython=True,parallel=run_parallel)(pyfunc) + pyres = pyfunc(*args) + cres = cfunc(*args) + np.testing.assert_array_equal(pyres, cres) + if assert_dtype: + self.assertEqual(cres[1].dtype, assert_dtype) + if assert_allocate_list: + self.assertIn('allocate list', cfunc.inspect_llvm(cfunc.signatures[0])) + else: + self.assertNotIn('allocate list', cfunc.inspect_llvm(cfunc.signatures[0])) + if run_parallel: + self.assertIn('@do_scheduling', cfunc.inspect_llvm(cfunc.signatures[0])) + + def test_comp_with_array_1(self): + def comp_with_array_1(n): + m = n * 2 + l = np.array([i + m for i in range(n)]) + return l + + self.check(comp_with_array_1, 5) + if PARALLEL_SUPPORTED: + self.check(comp_with_array_1, 5, run_parallel=True) + + def test_comp_with_array_2(self): + def comp_with_array_2(n, threshold): + A = np.arange(-n, n) + return np.array([ x * x if x < threshold else x * 2 for x in A ]) + + self.check(comp_with_array_2, 5, 0) + + def test_comp_with_array_noinline(self): + def comp_with_array_noinline(n): + m = n * 2 + l = np.array([i + m for i in range(n)]) + return l + + import numba.core.inline_closurecall as ic + try: + ic.enable_inline_arraycall = False + self.check(comp_with_array_noinline, 5, assert_allocate_list=True) + finally: + ic.enable_inline_arraycall = True + + def test_comp_with_array_noinline_issue_6053(self): + def comp_with_array_noinline(n): + lst = [0] + for i in range(n): + lst.append(i) + l = np.array(lst) + return l + + self.check(comp_with_array_noinline, 5, assert_allocate_list=True) + + def test_comp_nest_with_array(self): + def comp_nest_with_array(n): + l = np.array([[i * j for j in range(n)] for i in range(n)]) + return l + + self.check(comp_nest_with_array, 5) + if PARALLEL_SUPPORTED: + self.check(comp_nest_with_array, 5, run_parallel=True) + + def test_comp_nest_with_array_3(self): + def comp_nest_with_array_3(n): + l = np.array([[[i * j * k for k in range(n)] for j in range(n)] for i in range(n)]) + return l + + self.check(comp_nest_with_array_3, 5) + if PARALLEL_SUPPORTED: + self.check(comp_nest_with_array_3, 5, run_parallel=True) + + def test_comp_nest_with_array_noinline(self): + def comp_nest_with_array_noinline(n): + l = np.array([[i * j for j in range(n)] for i in range(n)]) + return l + + import numba.core.inline_closurecall as ic + try: + ic.enable_inline_arraycall = False + self.check(comp_nest_with_array_noinline, 5, + assert_allocate_list=True) + finally: + ic.enable_inline_arraycall = True + + def test_comp_with_array_range(self): + def comp_with_array_range(m, n): + l = np.array([i for i in range(m, n)]) + return l + + self.check(comp_with_array_range, 5, 10) + + def test_comp_with_array_range_and_step(self): + def comp_with_array_range_and_step(m, n): + l = np.array([i for i in range(m, n, 2)]) + return l + + self.check(comp_with_array_range_and_step, 5, 10) + + def test_comp_with_array_conditional(self): + def comp_with_array_conditional(n): + l = np.array([i for i in range(n) if i % 2 == 1]) + return l + # arraycall inline would not happen when conditional is present + self.check(comp_with_array_conditional, 10, assert_allocate_list=True) + + def test_comp_nest_with_array_conditional(self): + def comp_nest_with_array_conditional(n): + l = np.array([[i * j for j in range(n)] for i in range(n) if i % 2 == 1]) + return l + self.check(comp_nest_with_array_conditional, 5, + assert_allocate_list=True) + + def test_comp_unsupported_iter(self): + def comp_unsupported_iter(): + val = zip([1, 2, 3], [4, 5, 6]) + return np.array([a for a, b in val]) + with self.assertRaises(TypingError) as raises: + self.check(comp_unsupported_iter) + self.assertIn(_header_lead, str(raises.exception)) + self.assertIn('Unsupported iterator found in array comprehension', + str(raises.exception)) + + def test_no_array_comp(self): + def no_array_comp1(n): + l = [1,2,3,4] + a = np.array(l) + return a + # const 1D array is actually inlined + self.check(no_array_comp1, 10, assert_allocate_list=False) + def no_array_comp2(n): + l = [1,2,3,4] + a = np.array(l) + l.append(5) + return a + self.check(no_array_comp2, 10, assert_allocate_list=True) + + def test_nested_array(self): + def nested_array(n): + l = np.array([ np.array([x for x in range(n)]) for y in range(n)]) + return l + + self.check(nested_array, 10) + + def test_nested_array_with_const(self): + def nested_array(n): + l = np.array([ np.array([x for x in range(3)]) for y in range(4)]) + return l + + self.check(nested_array, 0) + + def test_array_comp_with_iter(self): + def array_comp(a): + l = np.array([ x * x for x in a ]) + return l + # with list iterator + l = [1,2,3,4,5] + self.check(array_comp, l) + # with array iterator + self.check(array_comp, np.array(l)) + # with tuple iterator (issue #7394) + self.check(array_comp, tuple(l)) + # with typed.List iterator (issue #6550) + self.check(array_comp, typed.List(l)) + + def test_array_comp_with_dtype(self): + def array_comp(n): + l = np.array([i for i in range(n)], dtype=np.complex64) + return l + + self.check(array_comp, 10, assert_dtype=np.complex64) + + def test_array_comp_inferred_dtype(self): + def array_comp(n): + l = np.array([i * 1j for i in range(n)]) + return l + + self.check(array_comp, 10) + + def test_array_comp_inferred_dtype_nested(self): + def array_comp(n): + l = np.array([[i * j for j in range(n)] for i in range(n)]) + return l + + self.check(array_comp, 10) + + def test_array_comp_inferred_dtype_nested_sum(self): + def array_comp(n): + l = np.array([[i * j for j in range(n)] for i in range(n)]) + # checks that operations on the inferred array + return l + + self.check(array_comp, 10) + + def test_array_comp_inferred_dtype_outside_setitem(self): + def array_comp(n, v): + arr = np.array([i for i in range(n)]) + # the following should not change the dtype + arr[0] = v + return arr + + # float to int cast is valid + v = 1.2 + self.check(array_comp, 10, v, assert_dtype=np.intp) + # complex to int cast is invalid + with self.assertRaises(TypingError) as raises: + cfunc = jit(nopython=True)(array_comp) + cfunc(10, 2.3j) + self.assertIn( + _header_lead + " Function({})".format(operator.setitem), + str(raises.exception), + ) + self.assertIn( + "(array({}, 1d, C), Literal[int](0), complex128)".format(types.intp), + str(raises.exception), + ) + + def test_array_comp_shuffle_sideeffect(self): + nelem = 100 + + @jit(nopython=True) + def foo(): + numbers = np.array([i for i in range(nelem)]) + np.random.shuffle(numbers) + print(numbers) + + with captured_stdout() as gotbuf: + foo() + got = gotbuf.getvalue().strip() + + with captured_stdout() as expectbuf: + print(np.array([i for i in range(nelem)])) + expect = expectbuf.getvalue().strip() + + # For a large enough array, the chances of shuffle to not move any + # element is tiny enough. + self.assertNotEqual(got, expect) + self.assertRegex(got, r'\[(\s*\d+)+\]') + + def test_empty_list_not_removed(self): + # see issue #3724 + def f(x): + t = [] + myList = np.array([1]) + a = np.random.choice(myList, 1) + t.append(x + a) + return a + self.check(f, 5, assert_allocate_list=True) + + def test_reuse_of_array_var(self): + """ Test issue 3742 """ + # redefinition of z breaks array comp as there's multiple defn + def foo(n): + # doesn't matter where this is in the code, it's just to ensure a + # `make_function` opcode exists + [i for i in range(1)] + z = np.empty(n) + for i in range(n): + z = np.zeros(n) + z[i] = i # write is required to trip the bug + + return z + + self.check(foo, 10, assert_allocate_list=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_conditions_as_predicates.py b/venv/lib/python3.10/site-packages/numba/tests/test_conditions_as_predicates.py new file mode 100644 index 0000000000000000000000000000000000000000..030b48a04ac1df7689ce3b8b9fd64cfc441a426c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_conditions_as_predicates.py @@ -0,0 +1,200 @@ +from numba.tests.support import TestCase, numpy_support +from numba import njit, types +from numba.typed import List, Dict +import numpy as np + + +class TestConditionsAsPredicates(TestCase): + + def test_scalars(self): + # checks that scalar types can be used as predicates + dts = [np.int8, np.uint16, np.int64, np.float32, np.float64, + np.complex128, int, float, complex, str, bool] + for dt in dts: + for c in 1, 0: + x = dt(c) + + @njit + def foo(): + if x: + return 10 + else: + return 20 + self.assertEqual(foo(), foo.py_func()) + self.assertEqual(foo(), 10 if c == 1 or dt is str else 20) + + # empty string + @njit + def foo(x): + if x: + return 10 + else: + return 20 + s = "" + self.assertEqual(foo(s), foo.py_func(s)) + + def test_typed_list(self): + @njit + def foo(x): + if x: + return 10 + else: + return 20 + + # empty list + z = List.empty_list(types.int64) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 20) + + # non-empty list + z.append(1) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 10) + + def test_reflected_list(self): + # non-empty + @njit + def foo(x): + if x: + return 10 + else: + return 20 + + z = [1] + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 10) + + # non-empty local + @njit + def foo(): + y = [1, 2] + if y: + return 10 + else: + return 20 + + self.assertEqual(foo(), foo.py_func()) + self.assertEqual(foo.py_func(), 10) + + # empty local + @njit + def foo(): + y = [1, 2] + y.pop() + y.pop() + assert len(y) == 0 + if y: + return 10 + else: + return 20 + + self.assertEqual(foo(), foo.py_func()) + self.assertEqual(foo.py_func(), 20) + + def test_reflected_set(self): + # non-empty + @njit + def foo(x): + if x: + return 10 + else: + return 20 + + z = {1} + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 10) + + # non-empty local + @njit + def foo(): + y = {1, 2} + if y: + return 10 + else: + return 20 + + self.assertEqual(foo(), foo.py_func()) + self.assertEqual(foo.py_func(), 10) + + # empty local + @njit + def foo(): + y = {1, 2} + y.pop() + y.pop() + assert len(y) == 0 + if y: + return 10 + else: + return 20 + + self.assertEqual(foo(), foo.py_func()) + self.assertEqual(foo.py_func(), 20) + + def test_typed_dict(self): + @njit + def foo(x): + if x: + return 10 + else: + return 20 + + # empty + z = Dict.empty(types.int64, types.int64) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 20) + + # non-empty + z[2] = 3 + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 10) + + def test_arrays(self): + @njit + def foo(x): + if x: + return 10 + else: + return 20 + + # non-empty 0d, True + z = np.array(1) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 10) + + # non-empty 0d, False + z = np.array(0) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 20) + # non-empty nd True + z = np.array([[[1]]]) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 10) + + # non-empty nd False + z = np.array([[[0]]]) + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 20) + + # various problems: + + # empty, NumPy warns or raises if NumPy >= 2.2 + z = np.empty(0) + if numpy_support.numpy_version >= (2, 2): + with self.assertRaises(ValueError) as raises: + foo(z) + msg = ("The truth value of an empty array is ambiguous." + " Use `array.size > 0` to check that an array is not empty.") + self.assertIn(msg, str(raises.exception)) + else: + self.assertEqual(foo(z), foo.py_func(z)) + self.assertEqual(foo.py_func(z), 20) + + # nd, NumPy raises + z = np.array([1, 2]) + with self.assertRaises(ValueError) as raises: + foo(z) + + msg = ("The truth value of an array with more than one element " + "is ambiguous. Use a.any() or a.all()") + self.assertIn(msg, str(raises.exception)) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_config.py b/venv/lib/python3.10/site-packages/numba/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..d4efb07a9d9f77bad5c4f2428d361f24cae011c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_config.py @@ -0,0 +1,218 @@ +import os +import tempfile +from textwrap import dedent +import unittest +from unittest import mock +from numba.tests.support import (TestCase, temp_directory, override_env_config, + run_in_subprocess) +from numba.core import config + +try: + import yaml + _HAVE_YAML = True +except ImportError: + _HAVE_YAML = False + +_skip_msg = "pyyaml needed for configuration file tests" +needs_yaml = unittest.skipIf(not _HAVE_YAML, _skip_msg) + + +@needs_yaml +class TestConfig(TestCase): + + # Disable parallel testing due to envvars modification + _numba_parallel_test_ = False + + def setUp(self): + # use support.temp_directory, it can do the clean up + self.tmppath = temp_directory('config_tmp') + self.maxDiff = 2500 + super(TestConfig, self).setUp() + + def mock_cfg_location(self): + """ + Creates a mock launch location. + Returns the location path. + """ + return tempfile.mkdtemp(dir=self.tmppath) + + def inject_mock_cfg(self, location, cfg): + """ + Injects a mock configuration at 'location' + """ + tmpcfg = os.path.join(location, config._config_fname) + with open(tmpcfg, 'wt') as f: + yaml.dump(cfg, f, default_flow_style=False) + + def get_settings(self): + """ + Gets the current numba config settings + """ + store = dict() + for x in dir(config): + if x.isupper(): + store[x] = getattr(config, x) + return store + + def create_config_effect(self, cfg): + """ + Returns a config "original" from a location with no config file + and then the impact of applying the supplied cfg dictionary as + a config file at a location in the returned "current". + """ + + # store original cwd + original_cwd = os.getcwd() + + # create mock launch location + launch_dir = self.mock_cfg_location() + + # switch cwd to the mock launch location, get and store settings + os.chdir(launch_dir) + # use override to ensure that the config is zero'd out with respect + # to any existing settings + with override_env_config('_', '_'): + original = self.get_settings() + + # inject new config into a file in the mock launch location + self.inject_mock_cfg(launch_dir, cfg) + + try: + # override something but don't change the value, this is to refresh + # the config and make sure the injected config file is read + with override_env_config('_', '_'): + current = self.get_settings() + finally: + # switch back to original dir with no new config + os.chdir(original_cwd) + return original, current + + def test_config(self): + # ensure a non empty settings file does impact config and that the + # case of the key makes no difference + key = 'COLOR_SCHEME' + for case in [str.upper, str.lower]: + orig, curr = self.create_config_effect({case(key): 'light_bg'}) + self.assertTrue(orig != curr) + self.assertTrue(orig[key] != curr[key]) + self.assertEqual(curr[key], 'light_bg') + # check that just the color scheme is the cause of difference + orig.pop(key) + curr.pop(key) + self.assertEqual(orig, curr) + + def test_empty_config(self): + # ensure an empty settings file does not impact config + orig, curr = self.create_config_effect({}) + self.assertEqual(orig, curr) + + @unittest.skipUnless(config.ENABLE_AVX, + "test expects NUMBA_ENABLE_AVX==True") + def test_nocona_disables_avx(self): + # test with nocona + new_env = os.environ.copy() + new_env.pop('NUMBA_ENABLE_AVX', None) # clear NUMBA_ENABLE_AVX + + new_env['NUMBA_CPU_NAME'] = 'nocona' + code = ("from numba.core import config\n" + "print('---->', bool(config.ENABLE_AVX))\n" + "assert not config.ENABLE_AVX") + out, err = run_in_subprocess(dedent(code), env=new_env) + err_msg = err.decode('utf-8') + out_msg = out.decode('utf-8') + ex_expected = "----> False" + self.assertIn(ex_expected, out_msg, msg=err_msg) + + # test with skylake-avx512 + new_env['NUMBA_CPU_NAME'] = 'skylake-avx512' + code = ("from numba.core import config\n" + "print('---->', bool(config.ENABLE_AVX))\n" + "assert config.ENABLE_AVX") + out, err = run_in_subprocess(dedent(code), env=new_env) + err_msg = err.decode('utf-8') + out_msg = out.decode('utf-8') + ex_expected = "----> True" + self.assertIn(ex_expected, out_msg, msg=err_msg) + + +class TestNumbaOptLevel(TestCase): + # Tests that the setting of NUMBA_OPT influences the "cheap" module pass. + # Spot checks NUMBA_OPT={'max', '3', '0'} + + def check(self, expected, opt_value, raw_value): + # local imports for state-safety + from numba import config, njit + + # check opt value and its raw_value + self.assertEqual(config.OPT, opt_value) + self.assertEqual(config.OPT._raw_value, raw_value) + + # Patch the CPUCodegen to make capture calls to the + # `_module_pass_manager` through a `side_effect` function that asserts + # that the kwargs being passed are as expected per the "NUMBA_OPT" + # level. The `side_effect` function immediately raises with a knwon + # message to abort further stages compilation once the check is + # complete. + from numba.core.codegen import CPUCodegen + side_effect_message = "expected side effect" + + def side_effect(*args, **kwargs): + self.assertEqual(kwargs, expected) + raise RuntimeError(side_effect_message) + + with mock.patch.object(CPUCodegen, '_module_pass_manager', + side_effect=side_effect): + with self.assertRaises(RuntimeError) as raises: + njit(lambda : ...)() + + self.assertIn(side_effect_message, str(raises.exception)) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_OPT': 'max'}) + def test_opt_max(self): + # NUMBA_OPT='max' should set opt to 3 and enable loop_vectorize + expected = {'loop_vectorize': True, + 'slp_vectorize': False, + 'opt': 3, + 'cost': 'cheap'} + self.check(expected, 3, 'max') + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_OPT': '3'}) + def test_opt_3(self): + # NUMBA_OPT='3' should not impact opt or loop_vectorize + expected = {'loop_vectorize': False, + 'slp_vectorize': False, + 'opt': 0, + 'cost': 'cheap'} + self.check(expected, 3, 3) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_OPT': '0'}) + def test_opt_0(self): + # NUMBA_OPT='0' should not impact opt or loop_vectorize + expected = {'loop_vectorize': False, + 'slp_vectorize': False, + 'opt': 0, + 'cost': 'cheap'} + self.check(expected, 0, 0) + + @TestCase.run_test_in_subprocess() + def test_opt_default(self): + # NUMBA_OPT is not set, the default should not impact opt or + # loop_vectorize + expected = {'loop_vectorize': False, + 'slp_vectorize': False, + 'opt': 0, + 'cost': 'cheap'} + self.check(expected, 3, 3) + + @TestCase.run_test_in_subprocess(envvars={'NUMBA_OPT': 'invalid'}) + def test_opt_invalid(self): + # NUMBA_OPT='invalid' should just proceed as default case + expected = {'loop_vectorize': False, + 'slp_vectorize': False, + 'opt': 0, + 'cost': 'cheap'} + self.check(expected, 3, 3) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_conversion.py b/venv/lib/python3.10/site-packages/numba/tests/test_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..54bb94ca78f66c00ebc9fe0b8f535cf2b0cd8f98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_conversion.py @@ -0,0 +1,223 @@ +import gc +import itertools + +import numpy as np + +import unittest +from numba import jit, njit +from numba.core import types +from numba.tests.support import TestCase +from numba.np import numpy_support + + +def identity(x): + return x + +def addition(x, y): + return x + y + +def equality(x, y): + return x == y + +def foobar(x, y, z): + return x + + +class TestConversion(TestCase): + """ + Testing Python to Native conversion + """ + + def test_complex_identity(self): + pyfunc = identity + cfunc = njit(types.complex64(types.complex64))(pyfunc) + + xs = [1.0j, (1+1j), (-1-1j), (1+0j)] + for x in xs: + self.assertEqual(cfunc(x), x) + for x in np.complex64(xs): + self.assertEqual(cfunc(x), x) + + cfunc = njit(types.complex128(types.complex128))(pyfunc) + + xs = [1.0j, (1+1j), (-1-1j), (1+0j)] + for x in xs: + self.assertEqual(cfunc(x), x) + for x in np.complex128(xs): + self.assertEqual(cfunc(x), x) + + def test_complex_addition(self): + pyfunc = addition + cfunc = njit(types.complex64(types.complex64, types.complex64))(pyfunc) + + xs = [1.0j, (1+1j), (-1-1j), (1+0j)] + for x in xs: + y = x + self.assertEqual(cfunc(x, y), x + y) + for x in np.complex64(xs): + y = x + self.assertEqual(cfunc(x, y), x + y) + + + cfunc = njit(types.complex128(types.complex128, + types.complex128))(pyfunc) + + xs = [1.0j, (1+1j), (-1-1j), (1+0j)] + for x in xs: + y = x + self.assertEqual(cfunc(x, y), x + y) + for x in np.complex128(xs): + y = x + self.assertEqual(cfunc(x, y), x + y) + + def test_boolean_as_int(self): + pyfunc = equality + cfunc = njit((types.boolean, types.intp))(pyfunc) + + xs = True, False + ys = -1, 0, 1 + + for xs, ys in itertools.product(xs, ys): + self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys)) + + def test_boolean_as_float(self): + pyfunc = equality + cfunc = njit((types.boolean, types.float64))(pyfunc) + + xs = True, False + ys = -1, 0, 1 + + for xs, ys in itertools.product(xs, ys): + self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys)) + + def test_boolean_eq_boolean(self): + pyfunc = equality + cfunc = njit((types.boolean, types.boolean))(pyfunc) + + xs = True, False + ys = True, False + + for xs, ys in itertools.product(xs, ys): + self.assertEqual(pyfunc(xs, ys), cfunc(xs, ys)) + + # test when a function parameters are jitted as unsigned types + # the function is called with negative parameters the Python error + # that it generates is correctly handled -- a Python error is returned to the user + # For more info, see the comment in Include/longobject.h for _PyArray_AsByteArray + # which PyLong_AsUnsignedLongLong calls + def test_negative_to_unsigned(self): + def f(x): + return x + with self.assertRaises(OverflowError): + jit('uintp(uintp)', nopython=True)(f)(-5) + + # test the switch logic in callwraper.py:build_wrapper() works for more than one argument + # and where the error occurs + def test_multiple_args_negative_to_unsigned(self): + pyfunc = foobar + cfunc = njit(types.uint64(types.uint64, types.uint64, + types.uint64),)(pyfunc) + + test_fail_args = ((-1, 0, 1), (0, -1, 1), (0, 1, -1)) + with self.assertRaises(OverflowError): + for a, b, c in test_fail_args: + cfunc(a, b, c) + + # test switch logic of callwraper.py:build_wrapper() with records as function parameters + def test_multiple_args_records(self): + pyfunc = foobar + + mystruct_dt = np.dtype([('p', np.float64), + ('row', np.float64), + ('col', np.float64)]) + mystruct = numpy_support.from_dtype(mystruct_dt) + + cfunc = njit(mystruct[:](mystruct[:], types.uint64, + types.uint64),)(pyfunc) + + st1 = np.recarray(3, dtype=mystruct_dt) + + st1.p = np.arange(st1.size) + 1 + st1.row = np.arange(st1.size) + 1 + st1.col = np.arange(st1.size) + 1 + + with self.assertRefCount(st1): + test_fail_args = ((st1, -1, 1), (st1, 1, -1)) + + for a, b, c in test_fail_args: + with self.assertRaises(OverflowError): + cfunc(a, b, c) + + del test_fail_args, a, b, c + gc.collect() + + # test switch logic of callwraper.py:build_wrapper() with no function parameters + def test_with_no_parameters(self): + def f(): + pass + self.assertEqual(f(), jit('()', nopython=True)(f)()) + + def check_argument_cleanup(self, typ, obj): + """ + Check that argument cleanup doesn't leak references. + """ + def f(x, y): + pass + + def _objects(obj): + objs = [obj] + if isinstance(obj, tuple): + for v in obj: + objs += _objects(v) + return objs + + objects = _objects(obj) + + cfunc = njit((typ, types.uint32))(f) + with self.assertRefCount(*objects): + cfunc(obj, 1) + with self.assertRefCount(*objects): + with self.assertRaises(OverflowError): + cfunc(obj, -1) + + cfunc = njit((types.uint32, typ))(f) + with self.assertRefCount(*objects): + cfunc(1, obj) + with self.assertRefCount(*objects): + with self.assertRaises(OverflowError): + cfunc(-1, obj) + + def test_cleanup_buffer(self): + mem = memoryview(bytearray(b"xyz")) + self.check_argument_cleanup(types.MemoryView(types.byte, 1, 'C'), mem) + + def test_cleanup_record(self): + dtype = np.dtype([('x', np.float64), ('y', np.float64)]) + recarr = np.zeros(1, dtype=dtype) + self.check_argument_cleanup(numpy_support.from_dtype(dtype), recarr[0]) + + def test_cleanup_tuple(self): + mem = memoryview(bytearray(b"xyz")) + tp = types.UniTuple(types.MemoryView(types.byte, 1, 'C'), 2) + self.check_argument_cleanup(tp, (mem, mem)) + + def test_cleanup_optional(self): + mem = memoryview(bytearray(b"xyz")) + tp = types.Optional(types.MemoryView(types.byte, 1, 'C')) + self.check_argument_cleanup(tp, mem) + + def test_stringliteral_to_unicode(self): + # See issue #6907, explicit signature on bar() takes a unicode_type but + # the call to bar() in foo() is with a StringLiteral + + @jit(types.void(types.unicode_type), nopython=True) + def bar(string): + pass + + @jit(types.void(), nopython=True) + def foo2(): + bar("literal string") + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_copy_propagate.py b/venv/lib/python3.10/site-packages/numba/tests/test_copy_propagate.py new file mode 100644 index 0000000000000000000000000000000000000000..ca410fedaf4c8f40f26bee8143457f5e596c3f14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_copy_propagate.py @@ -0,0 +1,169 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +from numba import jit, njit +from numba.core import types, ir, config, compiler +from numba.core.registry import cpu_target +from numba.core.annotations import type_annotations +from numba.core.ir_utils import (copy_propagate, apply_copy_propagate, + get_name_var_table) +from numba.core.typed_passes import type_inference_stage +from numba.tests.support import IRPreservingTestPipeline +import numpy as np +import unittest + + +def test_will_propagate(b, z, w): + x = 3 + x1 = x + if b > 0: + y = z + w + else: + y = 0 + a = 2 * x1 + return a < b + + +def test_wont_propagate(b, z, w): + x = 3 + if b > 0: + y = z + w + x = 1 + else: + y = 0 + a = 2 * x + return a < b + + +def null_func(a,b,c,d): + False + + +def inListVar(list_var, var): + for i in list_var: + if i.name == var: + return True + return False + + +def findAssign(func_ir, var): + for label, block in func_ir.blocks.items(): + for i, inst in enumerate(block.body): + if isinstance(inst, ir.Assign) and inst.target.name!=var: + all_var = inst.list_vars() + if inListVar(all_var, var): + return True + + return False + + +class TestCopyPropagate(unittest.TestCase): + def test1(self): + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + test_ir = compiler.run_frontend(test_will_propagate) + typingctx.refresh() + targetctx.refresh() + args = (types.int64, types.int64, types.int64) + typemap, return_type, calltypes, _ = type_inference_stage(typingctx, + targetctx, + test_ir, + args, + None) + type_annotation = type_annotations.TypeAnnotation( + func_ir=test_ir, + typemap=typemap, + calltypes=calltypes, + lifted=(), + lifted_from=None, + args=args, + return_type=return_type, + html_output=config.HTML) + in_cps, out_cps = copy_propagate(test_ir.blocks, typemap) + apply_copy_propagate(test_ir.blocks, in_cps, + get_name_var_table(test_ir.blocks), typemap, + calltypes) + + self.assertFalse(findAssign(test_ir, "x1")) + + def test2(self): + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + test_ir = compiler.run_frontend(test_wont_propagate) + typingctx.refresh() + targetctx.refresh() + args = (types.int64, types.int64, types.int64) + typemap, return_type, calltypes, _ = type_inference_stage(typingctx, + targetctx, + test_ir, + args, + None) + type_annotation = type_annotations.TypeAnnotation( + func_ir=test_ir, + typemap=typemap, + calltypes=calltypes, + lifted=(), + lifted_from=None, + args=args, + return_type=return_type, + html_output=config.HTML) + in_cps, out_cps = copy_propagate(test_ir.blocks, typemap) + apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), typemap, calltypes) + + self.assertTrue(findAssign(test_ir, "x")) + + def test_input_ir_extra_copies(self): + """make sure Interpreter._remove_unused_temporaries() has removed extra copies + in the IR in simple cases so copy propagation is faster + """ + def test_impl(a): + b = a + 3 + return b + + j_func = njit(pipeline_class=IRPreservingTestPipeline)(test_impl) + self.assertEqual(test_impl(5), j_func(5)) + + # make sure b is the target of the expression assignment, not a temporary + fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir'] + self.assertTrue(len(fir.blocks) == 1) + block = next(iter(fir.blocks.values())) + b_found = False + for stmt in block.body: + if isinstance(stmt, ir.Assign) and stmt.target.name == "b": + b_found = True + self.assertTrue(isinstance(stmt.value, ir.Expr) + and stmt.value.op == "binop" and stmt.value.lhs.name == "a") + + self.assertTrue(b_found) + + def test_input_ir_copy_remove_transform(self): + """make sure Interpreter._remove_unused_temporaries() does not generate + invalid code for rare chained assignment cases + """ + # regular chained assignment + def impl1(a): + b = c = a + 1 + return (b, c) + + # chained assignment with setitem + def impl2(A, i, a): + b = A[i] = a + 1 + return b, A[i] + 2 + + # chained assignment with setattr + def impl3(A, a): + b = A.a = a + 1 + return b, A.a + 2 + + class C: + pass + + self.assertEqual(impl1(5), njit(impl1)(5)) + self.assertEqual(impl2(np.ones(3), 0, 5), njit(impl2)(np.ones(3), 0, 5)) + self.assertEqual(impl3(C(), 5), jit(forceobj=True)(impl3)(C(), 5)) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ctypes.py b/venv/lib/python3.10/site-packages/numba/tests/test_ctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a7edf5099f6415576dda54faa52f8d18f150e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ctypes.py @@ -0,0 +1,255 @@ +from ctypes import * +import sys +import threading + +import numpy as np + + +from numba import jit, njit +from numba.core import types, errors +from numba.core.typing import ctypes_utils +from numba.tests.support import MemoryLeakMixin, tag, TestCase +from numba.tests.ctypes_usecases import * +import unittest + + +class TestCTypesTypes(TestCase): + + def _conversion_tests(self, check): + check(c_double, types.float64) + check(c_int, types.intc) + check(c_uint16, types.uint16) + check(c_size_t, types.size_t) + check(c_ssize_t, types.ssize_t) + + check(c_void_p, types.voidptr) + check(POINTER(c_float), types.CPointer(types.float32)) + check(POINTER(POINTER(c_float)), + types.CPointer(types.CPointer(types.float32))) + + check(None, types.void) + + def test_from_ctypes(self): + """ + Test converting a ctypes type to a Numba type. + """ + def check(cty, ty): + got = ctypes_utils.from_ctypes(cty) + self.assertEqual(got, ty) + + self._conversion_tests(check) + + # An unsupported type + with self.assertRaises(TypeError) as raises: + ctypes_utils.from_ctypes(c_wchar_p) + self.assertIn("Unsupported ctypes type", str(raises.exception)) + + def test_to_ctypes(self): + """ + Test converting a Numba type to a ctypes type. + """ + def check(cty, ty): + got = ctypes_utils.to_ctypes(ty) + self.assertEqual(got, cty) + + self._conversion_tests(check) + + # An unsupported type + with self.assertRaises(TypeError) as raises: + ctypes_utils.to_ctypes(types.ellipsis) + self.assertIn("Cannot convert Numba type '...' to ctypes type", + str(raises.exception)) + + +class TestCTypesUseCases(MemoryLeakMixin, TestCase): + + def test_c_sin(self): + pyfunc = use_c_sin + cfunc = njit((types.double,))(pyfunc) + x = 3.14 + self.assertEqual(pyfunc(x), cfunc(x)) + + def test_two_funcs(self): + # Check that two constant functions don't get mixed up. + pyfunc = use_two_funcs + cfunc = njit((types.double,))(pyfunc) + x = 3.14 + self.assertEqual(pyfunc(x), cfunc(x)) + + @unittest.skipUnless(is_windows, "Windows-specific test") + def test_stdcall(self): + # Just check that it doesn't crash + cfunc = njit((types.uintc,))(use_c_sleep) + + cfunc(1) + + def test_ctype_wrapping(self): + pyfunc = use_ctype_wrapping + cfunc = njit((types.double,))(pyfunc) + x = 3.14 + self.assertEqual(pyfunc(x), cfunc(x)) + + def test_ctype_voidptr(self): + pyfunc = use_c_pointer + # pyfunc will segfault if called + cfunc = njit((types.int32,))(pyfunc) + x = 123 + self.assertEqual(cfunc(x), x + 1) + + def test_function_pointer(self): + pyfunc = use_func_pointer + cfunc = jit(nopython=True)(pyfunc) + for (fa, fb, x) in [ + (c_sin, c_cos, 1.0), + (c_sin, c_cos, -1.0), + (c_cos, c_sin, 1.0), + (c_cos, c_sin, -1.0)]: + expected = pyfunc(fa, fb, x) + got = cfunc(fa, fb, x) + self.assertEqual(got, expected) + # A single specialization was compiled for all calls + self.assertEqual(len(cfunc.overloads), 1, cfunc.overloads) + + def test_untyped_function(self): + with self.assertRaises(TypeError) as raises: + njit((types.double,))(use_c_untyped) + self.assertIn("ctypes function '_numba_test_exp' doesn't define its argument types", + str(raises.exception)) + + def test_python_call_back(self): + mydct = {'what': 1232121} + + def call_me_maybe(arr): + return mydct[arr[0].decode('ascii')] + + # Create a callback into the python interpreter + py_call_back = CFUNCTYPE(c_int, py_object)(call_me_maybe) + + def pyfunc(a): + what = py_call_back(a) + return what + + cfunc = jit(nopython=True, nogil=True)(pyfunc) + arr = np.array(["what"], dtype='S10') + self.assertEqual(pyfunc(arr), cfunc(arr)) + + def test_python_call_back_threaded(self): + def pyfunc(a, repeat): + out = 0 + for _ in range(repeat): + out += py_call_back(a) + return out + + cfunc = jit(nopython=True, nogil=True)(pyfunc) + + arr = np.array(["what"], dtype='S10') + repeat = 1000 + + expected = pyfunc(arr, repeat) + outputs = [] + + # Warm up + cfunc(arr, repeat) + + # Test the function in multiple threads to exercise the + # GIL ensure/release code + + def run(func, arr, repeat): + outputs.append(func(arr, repeat)) + + threads = [threading.Thread(target=run, args=(cfunc, arr, repeat)) + for _ in range(10)] + + # Start threads + for th in threads: + th.start() + + # End threads + for th in threads: + th.join() + + # Check results + for got in outputs: + self.assertEqual(expected, got) + + def test_passing_array_ctypes_data(self): + """ + Test the ".ctypes.data" attribute of an array can be passed + as a "void *" parameter. + """ + def pyfunc(arr): + return c_take_array_ptr(arr.ctypes.data) + + cfunc = jit(nopython=True, nogil=True)(pyfunc) + + arr = np.arange(5) + + expected = pyfunc(arr) + got = cfunc(arr) + + self.assertEqual(expected, got) + + def check_array_ctypes(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + arr = np.linspace(0, 10, 5) + expected = arr ** 2.0 + got = cfunc(arr) + self.assertPreciseEqual(expected, got) + return cfunc + + def test_passing_array_ctypes_voidptr(self): + """ + Test the ".ctypes" attribute of an array can be passed + as a "void *" parameter. + """ + self.check_array_ctypes(use_c_vsquare) + + def test_passing_array_ctypes_voidptr_pass_ptr(self): + """ + Test the ".ctypes" attribute of an array can be passed + as a pointer parameter of the right type. + """ + cfunc = self.check_array_ctypes(use_c_vcube) + + # Non-compatible pointers are not accepted (here float32* vs. float64*) + with self.assertRaises(errors.TypingError) as raises: + cfunc(np.float32([0.0])) + + self.assertIn("No implementation of function ExternalFunctionPointer", + str(raises.exception)) + + def test_storing_voidptr_to_int_array(self): + # Make C callback that returns a void* + cproto = CFUNCTYPE(c_void_p) + + @cproto + def get_voidstar(): + return 0xdeadbeef + + # Make python functions that use the C callback + def pyfunc(a): + ptr = get_voidstar() + a[0] = ptr + return ptr + + # Compile it + cfunc = njit((types.uintp[::1],))(pyfunc) + + # Setup inputs + arr_got = np.zeros(1, dtype=np.uintp) + arr_expect = arr_got.copy() + + # Run functions + ret_got = cfunc(arr_got) + ret_expect = pyfunc(arr_expect) + + # Check + self.assertEqual(ret_expect, 0xdeadbeef) + self.assertPreciseEqual(ret_got, ret_expect) + self.assertPreciseEqual(arr_got, arr_expect) + + +if __name__ == '__main__': + unittest.main() + diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dataflow.py b/venv/lib/python3.10/site-packages/numba/tests/test_dataflow.py new file mode 100644 index 0000000000000000000000000000000000000000..1659ad2bda25df82a2d1704b26837b38a34ea359 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dataflow.py @@ -0,0 +1,170 @@ +import unittest +from numba import jit, njit +from numba.core import types +from numba.tests.support import TestCase + + +force_pyobj_jit_opt = {'forceobj': True} +no_pyobj_jit_opt = {'nopython': True} + + +def assignments(a): + b = c = str(a) + return b + c + + +def assignments2(a): + b = c = d = str(a) + return b + c + d + + +# Use cases for issue #503 + +def var_propagate1(a, b): + c = (a if a > b else b) + 5 + return c + + +def var_propagate2(a, b): + c = 5 + (a if a > b else b + 12) / 2.0 + return c + + +def var_propagate3(a, b): + c = 5 + (a > b and a or b) + return c + + +def var_propagate4(a, b): + c = 5 + (a - 1 and b + 1) or (a + 1 and b - 1) + return c + + +# Issue #480 +def chained_compare(a): + return 1 < a < 3 + + +# Issue #591 +def stack_effect_error(x): + i = 2 + c = 1 + if i == x: + for i in range(3): + c = i + return i + c + +# Some more issues with stack effect and blocks +def for_break(n, x): + for i in range(n): + n = 0 + if i == x: + break + else: + n = i + return i, n + +# Issue #571 +def var_swapping(a, b, c, d, e): + a, b = b, a + c, d, e = e, c, d + a, b, c, d = b, c, d, a + return a + b + c + d +e + +class TestDataFlow(TestCase): + + def test_assignments(self, flags=force_pyobj_jit_opt): + pyfunc = assignments + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-1, 0, 1]: + self.assertPreciseEqual(pyfunc(x), cfunc(x)) + + def test_assignments2(self, flags=force_pyobj_jit_opt): + pyfunc = assignments2 + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [-1, 0, 1]: + self.assertPreciseEqual(pyfunc(x), cfunc(x)) + + if flags is force_pyobj_jit_opt: + cfunc("a") + + # The dataflow analysis must be good enough for native mode + # compilation to succeed, hence the use of njit in the following tests. + + def run_propagate_func(self, func, args): + self.assertPreciseEqual(func(*args), func.py_func(*args)) + + def test_var_propagate1(self): + cfunc = njit((types.intp, types.intp))(var_propagate1) + self.run_propagate_func(cfunc, (2, 3)) + self.run_propagate_func(cfunc, (3, 2)) + + def test_var_propagate2(self): + cfunc = njit((types.intp, types.intp))(var_propagate2) + self.run_propagate_func(cfunc, (2, 3)) + self.run_propagate_func(cfunc, (3, 2)) + + def test_var_propagate3(self): + cfunc = njit((types.intp, types.intp))(var_propagate3) + self.run_propagate_func(cfunc, (2, 3)) + self.run_propagate_func(cfunc, (3, 2)) + self.run_propagate_func(cfunc, (2, 0)) + self.run_propagate_func(cfunc, (-1, 0)) + self.run_propagate_func(cfunc, (0, 2)) + self.run_propagate_func(cfunc, (0, -1)) + + def test_var_propagate4(self): + cfunc = njit((types.intp, types.intp))(var_propagate4) + self.run_propagate_func(cfunc, (1, 1)) + self.run_propagate_func(cfunc, (1, 0)) + self.run_propagate_func(cfunc, (1, -1)) + self.run_propagate_func(cfunc, (0, 1)) + self.run_propagate_func(cfunc, (0, 0)) + self.run_propagate_func(cfunc, (0, -1)) + self.run_propagate_func(cfunc, (-1, 1)) + self.run_propagate_func(cfunc, (-1, 0)) + self.run_propagate_func(cfunc, (-1, -1)) + + def test_chained_compare(self, flags=force_pyobj_jit_opt): + pyfunc = chained_compare + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in [0, 1, 2, 3, 4]: + self.assertPreciseEqual(pyfunc(x), cfunc(x)) + + def test_chained_compare_npm(self): + self.test_chained_compare(no_pyobj_jit_opt) + + def test_stack_effect_error(self, flags=force_pyobj_jit_opt): + # Issue #591: POP_BLOCK must undo all stack pushes done inside + # the block. + pyfunc = stack_effect_error + cfunc = jit((types.int32,), **flags)(pyfunc) + for x in (0, 1, 2, 3): + self.assertPreciseEqual(pyfunc(x), cfunc(x)) + + def test_stack_effect_error_npm(self): + self.test_stack_effect_error(no_pyobj_jit_opt) + + def test_var_swapping(self, flags=force_pyobj_jit_opt): + pyfunc = var_swapping + cfunc = jit((types.int32,) * 5, **flags)(pyfunc) + args = tuple(range(0, 10, 2)) + self.assertPreciseEqual(pyfunc(*args), cfunc(*args)) + + def test_var_swapping_npm(self): + self.test_var_swapping(no_pyobj_jit_opt) + + def test_for_break(self, flags=force_pyobj_jit_opt): + # BREAK_LOOP must unwind the current inner syntax block. + pyfunc = for_break + cfunc = jit((types.intp, types.intp), **flags)(pyfunc) + for (n, x) in [(4, 2), (4, 6)]: + self.assertPreciseEqual(pyfunc(n, x), cfunc(n, x)) + + def test_for_break_npm(self): + self.test_for_break(no_pyobj_jit_opt) + + +if __name__ == '__main__': + unittest.main() + diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_datamodel.py b/venv/lib/python3.10/site-packages/numba/tests/test_datamodel.py new file mode 100644 index 0000000000000000000000000000000000000000..03a0179fa74ca84e673b72ca28bfc36499d1cfe5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_datamodel.py @@ -0,0 +1,257 @@ +from llvmlite import ir, binding as ll + +from numba.core import types, datamodel +from numba.core.datamodel.testing import test_factory +from numba.core.datamodel.manager import DataModelManager +from numba.core.datamodel.models import OpaqueModel +import unittest + + +class TestBool(test_factory()): + fe_type = types.boolean + + +class TestPyObject(test_factory()): + fe_type = types.pyobject + + +class TestInt8(test_factory()): + fe_type = types.int8 + + +class TestInt16(test_factory()): + fe_type = types.int16 + + +class TestInt32(test_factory()): + fe_type = types.int32 + + +class TestInt64(test_factory()): + fe_type = types.int64 + + +class TestUInt8(test_factory()): + fe_type = types.uint8 + + +class TestUInt16(test_factory()): + fe_type = types.uint16 + + +class TestUInt32(test_factory()): + fe_type = types.uint32 + + +class TestUInt64(test_factory()): + fe_type = types.uint64 + + +class TestFloat(test_factory()): + fe_type = types.float32 + + +class TestDouble(test_factory()): + fe_type = types.float64 + + +class TestComplex(test_factory()): + fe_type = types.complex64 + + +class TestDoubleComplex(test_factory()): + fe_type = types.complex128 + + +class TestPointerOfInt32(test_factory()): + fe_type = types.CPointer(types.int32) + + +class TestUniTupleOf2xInt32(test_factory()): + fe_type = types.UniTuple(types.int32, 2) + + +class TestUniTupleEmpty(test_factory()): + fe_type = types.UniTuple(types.int32, 0) + + +class TestTupleInt32Float32(test_factory()): + fe_type = types.Tuple([types.int32, types.float32]) + + +class TestTupleEmpty(test_factory()): + fe_type = types.Tuple([]) + + +class Test1DArrayOfInt32(test_factory()): + fe_type = types.Array(types.int32, 1, 'C') + + +class Test2DArrayOfComplex128(test_factory()): + fe_type = types.Array(types.complex128, 2, 'C') + + +class Test0DArrayOfInt32(test_factory()): + fe_type = types.Array(types.int32, 0, 'C') + + +class TestArgInfo(unittest.TestCase): + + def _test_as_arguments(self, fe_args): + """ + Test round-tripping types *fe_args* through the default data model's + argument conversion and unpacking logic. + """ + dmm = datamodel.default_manager + fi = datamodel.ArgPacker(dmm, fe_args) + + module = ir.Module() + fnty = ir.FunctionType(ir.VoidType(), []) + function = ir.Function(module, fnty, name="test_arguments") + builder = ir.IRBuilder() + builder.position_at_end(function.append_basic_block()) + + args = [ir.Constant(dmm.lookup(t).get_value_type(), None) + for t in fe_args] + + # Roundtrip + values = fi.as_arguments(builder, args) + asargs = fi.from_arguments(builder, values) + + self.assertEqual(len(asargs), len(fe_args)) + valtys = tuple([v.type for v in values]) + self.assertEqual(valtys, fi.argument_types) + + expect_types = [a.type for a in args] + got_types = [a.type for a in asargs] + + self.assertEqual(expect_types, got_types) + + # Assign names (check this doesn't raise) + fi.assign_names(values, ["arg%i" for i in range(len(fe_args))]) + + builder.ret_void() + + ll.parse_assembly(str(module)) + + def test_int32_array_complex(self): + fe_args = [types.int32, + types.Array(types.int32, 1, 'C'), + types.complex64] + self._test_as_arguments(fe_args) + + def test_two_arrays(self): + fe_args = [types.Array(types.int32, 1, 'C')] * 2 + self._test_as_arguments(fe_args) + + def test_two_0d_arrays(self): + fe_args = [types.Array(types.int32, 0, 'C')] * 2 + self._test_as_arguments(fe_args) + + def test_tuples(self): + fe_args = [types.UniTuple(types.int32, 2), + types.UniTuple(types.int32, 3)] + self._test_as_arguments(fe_args) + # Tuple of struct-likes + arrty = types.Array(types.int32, 1, 'C') + fe_args = [types.UniTuple(arrty, 2), + types.UniTuple(arrty, 3)] + self._test_as_arguments(fe_args) + # Nested tuple + fe_args = [types.UniTuple(types.UniTuple(types.int32, 2), 3)] + self._test_as_arguments(fe_args) + + def test_empty_tuples(self): + # Empty tuple + fe_args = [types.UniTuple(types.int16, 0), + types.Tuple(()), + types.int32] + self._test_as_arguments(fe_args) + + def test_nested_empty_tuples(self): + fe_args = [types.int32, + types.UniTuple(types.Tuple(()), 2), + types.int64] + self._test_as_arguments(fe_args) + + +class TestMemInfo(unittest.TestCase): + def setUp(self): + self.dmm = datamodel.default_manager + + def test_number(self): + ty = types.int32 + dm = self.dmm[ty] + self.assertFalse(dm.contains_nrt_meminfo()) + + def test_array(self): + ty = types.int32[:] + dm = self.dmm[ty] + self.assertTrue(dm.contains_nrt_meminfo()) + + def test_tuple_of_number(self): + ty = types.UniTuple(dtype=types.int32, count=2) + dm = self.dmm[ty] + self.assertFalse(dm.contains_nrt_meminfo()) + + def test_tuple_of_array(self): + ty = types.UniTuple(dtype=types.int32[:], count=2) + dm = self.dmm[ty] + self.assertTrue(dm.contains_nrt_meminfo()) + + +class TestMisc(unittest.TestCase): + + def test_issue2921(self): + import numpy as np + from numba import njit + + @njit + def copy(a, b): + for i in range(a.shape[0]): + a[i] = b[i] + + b = np.arange(5, dtype=np.uint8).view(np.bool_) + a = np.zeros_like(b) + copy(a, b) + np.testing.assert_equal(a, np.array((False,) + (True,) * 4)) + + +class TestDMMChaining(unittest.TestCase): + def test_basic(self): + dmm = DataModelManager() + + class int_handler(OpaqueModel): + pass + + class float_handler(OpaqueModel): + pass + + dmm.register(types.Integer, int_handler) + dmm.register(types.Float, float_handler) + + inter_dmm = DataModelManager() + + class new_int_handler(OpaqueModel): + pass + + inter_dmm.register(types.Integer, new_int_handler) + chained_dmm = inter_dmm.chain(dmm) + + # Check that the chained DMM has the new handler + self.assertIsInstance(chained_dmm.lookup(types.intp), new_int_handler) + # and not the old handler + self.assertNotIsInstance(chained_dmm.lookup(types.intp), int_handler) + # Check that the base DMM has the old handler + self.assertIsInstance(dmm.lookup(types.intp), int_handler) + # Check that float goes to the float_handler + self.assertIsInstance(chained_dmm.lookup(types.float32), float_handler) + self.assertIsInstance(dmm.lookup(types.float32), float_handler) + # Check the intermediate DMM + self.assertIsInstance(inter_dmm.lookup(types.intp), new_int_handler) + with self.assertRaises(KeyError): + inter_dmm.lookup(types.float32) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_debug.py b/venv/lib/python3.10/site-packages/numba/tests/test_debug.py new file mode 100644 index 0000000000000000000000000000000000000000..57d6a3f09b2c44c964af67b881444bf7eb118508 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_debug.py @@ -0,0 +1,340 @@ +import os +import platform +import re +import textwrap +import warnings + +import numpy as np + +from numba.tests.support import (TestCase, override_config, override_env_config, + captured_stdout, forbid_codegen, skip_parfors_unsupported, + needs_blas) +from numba import jit, njit +from numba.core import types, compiler, utils +from numba.core.errors import NumbaPerformanceWarning +from numba import prange +from numba.experimental import jitclass +import unittest + + +def simple_nopython(somearg): + retval = somearg + 1 + return retval + +def simple_gen(x, y): + yield x + yield y + + +class SimpleClass(object): + def __init__(self): + self.h = 5 + +simple_class_spec = [('h', types.int32)] + +def simple_class_user(obj): + return obj.h + +def unsupported_parfor(a, b): + return np.dot(a, b) # dot as gemm unsupported + +def supported_parfor(n): + a = np.ones(n) + for i in prange(n): + a[i] = a[i] + np.sin(i) + return a + +def unsupported_prange(n): + a = np.ones(n) + for i in prange(n): + a[i] = a[i] + np.sin(i) + assert i + 13 < 100000 + return a + + +class DebugTestBase(TestCase): + + all_dumps = set(['bytecode', 'cfg', 'ir', 'typeinfer', 'llvm', + 'func_opt_llvm', 'optimized_llvm', 'assembly']) + + def assert_fails(self, *args, **kwargs): + self.assertRaises(AssertionError, *args, **kwargs) + + def check_debug_output(self, out, dump_names): + enabled_dumps = dict.fromkeys(self.all_dumps, False) + for name in dump_names: + assert name in enabled_dumps + enabled_dumps[name] = True + for name, enabled in sorted(enabled_dumps.items()): + check_meth = getattr(self, '_check_dump_%s' % name) + if enabled: + check_meth(out) + else: + self.assert_fails(check_meth, out) + + def _check_dump_bytecode(self, out): + if utils.PYVERSION in ((3, 11), (3, 12), (3, 13)): + self.assertIn('BINARY_OP', out) + elif utils.PYVERSION in ((3, 10),): + self.assertIn('BINARY_ADD', out) + else: + raise NotImplementedError(utils.PYVERSION) + + def _check_dump_cfg(self, out): + self.assertIn('CFG dominators', out) + + def _check_dump_ir(self, out): + self.assertIn('--IR DUMP: %s--' % self.func_name, out) + + def _check_dump_typeinfer(self, out): + self.assertIn('--propagate--', out) + + def _check_dump_llvm(self, out): + self.assertIn('--LLVM DUMP', out) + if compiler.Flags.options["auto_parallel"].default.enabled == False: + self.assertRegex(out, r'store i64 %\"\.\d", i64\* %"retptr"', out) + + def _check_dump_func_opt_llvm(self, out): + self.assertIn('--FUNCTION OPTIMIZED DUMP %s' % self.func_name, out) + # allocas have been optimized away + self.assertIn('add nsw i64 %arg.somearg, 1', out) + + def _check_dump_optimized_llvm(self, out): + self.assertIn('--OPTIMIZED DUMP %s' % self.func_name, out) + self.assertIn('add nsw i64 %arg.somearg, 1', out) + + def _check_dump_assembly(self, out): + self.assertIn('--ASSEMBLY %s' % self.func_name, out) + if platform.machine() in ('x86_64', 'AMD64', 'i386', 'i686'): + self.assertIn('xorl', out) + + +class FunctionDebugTestBase(DebugTestBase): + + func_name = 'simple_nopython' + + def compile_simple_nopython(self): + with captured_stdout() as out: + cfunc = njit((types.int64,))(simple_nopython) + # Sanity check compiled function + self.assertPreciseEqual(cfunc(2), 3) + return out.getvalue() + + +class TestFunctionDebugOutput(FunctionDebugTestBase): + + def test_dump_bytecode(self): + with override_config('DUMP_BYTECODE', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['bytecode']) + + def test_dump_ir(self): + with override_config('DUMP_IR', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['ir']) + + def test_dump_cfg(self): + with override_config('DUMP_CFG', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['cfg']) + + def test_dump_llvm(self): + with override_config('DUMP_LLVM', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['llvm']) + + def test_dump_func_opt_llvm(self): + with override_config('DUMP_FUNC_OPT', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['func_opt_llvm']) + + def test_dump_optimized_llvm(self): + with override_config('DUMP_OPTIMIZED', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['optimized_llvm']) + + def test_dump_assembly(self): + with override_config('DUMP_ASSEMBLY', True): + out = self.compile_simple_nopython() + self.check_debug_output(out, ['assembly']) + + +class TestGeneratorDebugOutput(DebugTestBase): + + func_name = 'simple_gen' + + def compile_simple_gen(self): + with captured_stdout() as out: + cfunc = njit((types.int64, types.int64))(simple_gen) + # Sanity check compiled function + self.assertPreciseEqual(list(cfunc(2, 5)), [2, 5]) + return out.getvalue() + + def test_dump_ir_generator(self): + with override_config('DUMP_IR', True): + out = self.compile_simple_gen() + self.check_debug_output(out, ['ir']) + self.assertIn('--GENERATOR INFO: %s' % self.func_name, out) + expected_gen_info = textwrap.dedent(""" + generator state variables: ['x', 'y'] + yield point #1: live variables = ['y'], weak live variables = ['x'] + yield point #2: live variables = [], weak live variables = ['y'] + """) + self.assertIn(expected_gen_info, out) + + +class TestDisableJIT(DebugTestBase): + """ + Test the NUMBA_DISABLE_JIT environment variable. + """ + + def test_jit(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + cfunc = jit(nopython=True)(simple_nopython) + self.assertPreciseEqual(cfunc(2), 3) + + def test_jitclass(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + SimpleJITClass = jitclass(simple_class_spec)(SimpleClass) + + obj = SimpleJITClass() + self.assertPreciseEqual(obj.h, 5) + + cfunc = jit(nopython=True)(simple_class_user) + self.assertPreciseEqual(cfunc(obj), 5) + + +class TestEnvironmentOverride(FunctionDebugTestBase): + """ + Test that environment variables are reloaded by Numba when modified. + """ + + # mutates env with os.environ so must be run serially + _numba_parallel_test_ = False + + def test_debug(self): + out = self.compile_simple_nopython() + self.assertFalse(out) + with override_env_config('NUMBA_DEBUG', '1'): + out = self.compile_simple_nopython() + # Note that all variables dependent on NUMBA_DEBUG are + # updated too. + self.check_debug_output(out, ['ir', 'typeinfer', + 'llvm', 'func_opt_llvm', + 'optimized_llvm', 'assembly']) + out = self.compile_simple_nopython() + self.assertFalse(out) + +class TestParforsDebug(TestCase): + """ + Tests debug options associated with parfors + """ + + # mutates env with os.environ so must be run serially + _numba_parallel_test_ = False + + def check_parfors_warning(self, warn_list): + msg = ("'parallel=True' was specified but no transformation for " + "parallel execution was possible.") + warning_found = False + for w in warn_list: + if msg in str(w.message): + warning_found = True + break + self.assertTrue(warning_found, "Warning message should be found.") + + def check_parfors_unsupported_prange_warning(self, warn_list): + msg = ("prange or pndindex loop will not be executed in parallel " + "due to there being more than one entry to or exit from the " + "loop (e.g., an assertion).") + warning_found = False + for w in warn_list: + if msg in str(w.message): + warning_found = True + break + self.assertTrue(warning_found, "Warning message should be found.") + + @needs_blas + @skip_parfors_unsupported + def test_warns(self): + """ + Test that using parallel=True on a function that does not have parallel + semantics warns. + """ + arr_ty = types.Array(types.float64, 2, "C") + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", NumbaPerformanceWarning) + njit((arr_ty, arr_ty), parallel=True)(unsupported_parfor) + self.check_parfors_warning(w) + + @needs_blas + @skip_parfors_unsupported + def test_unsupported_prange_warns(self): + """ + Test that prange with multiple exits issues a warning + """ + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", NumbaPerformanceWarning) + njit((types.int64,), parallel=True)(unsupported_prange) + self.check_parfors_unsupported_prange_warning(w) + + @skip_parfors_unsupported + def test_array_debug_opt_stats(self): + """ + Test that NUMBA_DEBUG_ARRAY_OPT_STATS produces valid output + """ + # deliberately trigger a compilation loop to increment the + # Parfor class state, this is to ensure the test works based + # on indices computed based on this state and not hard coded + # indices. + njit((types.int64,), parallel=True)(supported_parfor) + + with override_env_config('NUMBA_DEBUG_ARRAY_OPT_STATS', '1'): + with captured_stdout() as out: + njit((types.int64,), parallel=True)(supported_parfor) + + # grab the various parts out the output + output = out.getvalue().split('\n') + parallel_loop_output = \ + [x for x in output if 'is produced from pattern' in x] + fuse_output = \ + [x for x in output if 'is fused into' in x] + after_fusion_output = \ + [x for x in output if 'After fusion, function' in x] + + # Parfor's have a shared state index, grab the current value + # as it will be used as an offset for all loop messages + parfor_state = int(re.compile(r'#([0-9]+)').search( + parallel_loop_output[0]).group(1)) + bounds = range(parfor_state, + parfor_state + len(parallel_loop_output)) + + # Check the Parallel for-loop is produced from + # works first + pattern = ("('ones function', 'NumPy mapping')", + ('prange', 'user', '')) + fmt = 'Parallel for-loop #{} is produced from pattern \'{}\' at' + for i, trials, lpattern in zip(bounds, parallel_loop_output, + pattern): + to_match = fmt.format(i, lpattern) + self.assertIn(to_match, trials) + + # Check the fusion statements are correct + pattern = (parfor_state + 1, parfor_state + 0) + fmt = 'Parallel for-loop #{} is fused into for-loop #{}.' + for trials in fuse_output: + to_match = fmt.format(*pattern) + self.assertIn(to_match, trials) + + # Check the post fusion statements are correct + pattern = (supported_parfor.__name__, 1, set([parfor_state])) + fmt = 'After fusion, function {} has {} parallel for-loop(s) #{}.' + for trials in after_fusion_output: + to_match = fmt.format(*pattern) + self.assertIn(to_match, trials) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_debuginfo.py b/venv/lib/python3.10/site-packages/numba/tests/test_debuginfo.py new file mode 100644 index 0000000000000000000000000000000000000000..720c7f48cf77f28f37c5cadd104af61e8759c21c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_debuginfo.py @@ -0,0 +1,759 @@ +from collections import namedtuple +import inspect +import re +import numpy as np +import math +from textwrap import dedent +import unittest +import warnings + +from numba.tests.support import (TestCase, override_config, + ignore_internal_warnings) +from numba import jit, njit +from numba.core import types +from numba.core.datamodel import default_manager +from numba.core.errors import NumbaDebugInfoWarning +import llvmlite.binding as llvm + +#NOTE: These tests are potentially sensitive to changes in SSA or lowering +# behaviour and may need updating should changes be made to the corresponding +# algorithms. + + +class TestDebugInfo(TestCase): + """ + These tests only checks the compiled assembly for debuginfo. + """ + + def _getasm(self, fn, sig): + fn.compile(sig) + return fn.inspect_asm(sig) + + def _check(self, fn, sig, expect): + asm = self._getasm(fn, sig=sig) + m = re.search(r"\.section.+debug", asm, re.I) + got = m is not None + self.assertEqual(expect, got, msg='debug info not found in:\n%s' % asm) + + def test_no_debuginfo_in_asm(self): + @jit(nopython=True, debug=False) + def foo(x): + return x + + self._check(foo, sig=(types.int32,), expect=False) + + def test_debuginfo_in_asm(self): + @jit(nopython=True, debug=True) + def foo(x): + return x + + self._check(foo, sig=(types.int32,), expect=True) + + def test_environment_override(self): + with override_config('DEBUGINFO_DEFAULT', 1): + # Using default value + @jit(nopython=True) + def foo(x): + return x + self._check(foo, sig=(types.int32,), expect=True) + + # User override default + @jit(nopython=True, debug=False) + def bar(x): + return x + self._check(bar, sig=(types.int32,), expect=False) + + def test_llvm_inliner_flag_conflict(self): + # bar will be marked as 'alwaysinline', but when DEBUGINFO_DEFAULT is + # set functions are marked as 'noinline' this results in a conflict. + # baz will be marked as 'noinline' as a result of DEBUGINFO_DEFAULT + + @njit(forceinline=True) + def bar(x): + return math.sin(x) + + @njit(forceinline=False) + def baz(x): + return math.cos(x) + + @njit + def foo(x): + a = bar(x) + b = baz(x) + return a, b + + # check it compiles + with override_config('DEBUGINFO_DEFAULT', 1): + result = foo(np.pi) + + self.assertPreciseEqual(result, foo.py_func(np.pi)) + + # check the LLVM IR has bar marked as 'alwaysinline' and baz as noinline + full_ir = foo.inspect_llvm(foo.signatures[0]) + module = llvm.parse_assembly(full_ir) + name = foo.overloads[foo.signatures[0]].fndesc.mangled_name + funcs = [x for x in module.functions if x.name == name] + self.assertEqual(len(funcs), 1) + func = funcs[0] + + # find the function calls and save the associated statements + f_names = [] + for blk in func.blocks: + for stmt in blk.instructions: + if stmt.opcode == 'call': + # stmt.function.name This is the function being called + f_names.append(str(stmt).strip()) + + # Need to check there's two specific things in the calls in the IR + # 1. a call to the llvm.sin.f64 intrinsic, this is from the inlined bar + # 2. a call to the baz function, this is from the noinline baz + found_sin = False + found_baz = False + baz_name = baz.overloads[baz.signatures[0]].fndesc.mangled_name + for x in f_names: + if not found_sin and re.match('.*llvm.sin.f64.*', x): + found_sin = True + if not found_baz and re.match(f'.*{baz_name}.*', x): + found_baz = True + + self.assertTrue(found_sin) + self.assertTrue(found_baz) + + +class TestDebugInfoEmission(TestCase): + """ Tests that debug info is emitted correctly. + """ + + _NUMBA_OPT_0_ENV = {'NUMBA_OPT': '0'} + + def _get_llvmir(self, fn, sig): + with override_config('OPT', 0): + fn.compile(sig) + return fn.inspect_llvm(sig) + + def _get_metadata(self, fn, sig): + ll = self._get_llvmir(fn, sig).splitlines() + meta_re = re.compile(r'![0-9]+ =.*') + metadata = [] + for line in ll: + if meta_re.match(line): + metadata.append(line) + return metadata + + def _get_metadata_map(self, metadata): + """Gets the map of DI label to md, e.g. + '!33' -> '!{!"branch_weights", i32 1, i32 99}' + """ + metadata_definition_map = dict() + meta_definition_split = re.compile(r'(![0-9]+) = (.*)') + for line in metadata: + matched = meta_definition_split.match(line) + if matched: + dbg_val, info = matched.groups() + metadata_definition_map[dbg_val] = info + return metadata_definition_map + + def _get_lines_from_debuginfo(self, metadata): + # Get the lines contained in the debug info + md_def_map = self._get_metadata_map(metadata) + + lines = set() + for md in md_def_map.values(): + m = re.match(r"!DILocation\(line: (\d+),", md) + if m: + ln = int(m.group(1)) + lines.add(ln) + return lines + + def test_DW_LANG(self): + + @njit(debug=True) + def foo(): + pass + + metadata = self._get_metadata(foo, sig=()) + DICompileUnit = metadata[0] + self.assertEqual('!0', DICompileUnit[:2]) + self.assertIn('!DICompileUnit(language: DW_LANG_C_plus_plus', + DICompileUnit) + self.assertIn('producer: "clang (Numba)"', DICompileUnit) + + def test_DILocation(self): + """ Tests that DILocation information is reasonable. + """ + @njit(debug=True, error_model='numpy') + def foo(a): + b = a + 1.23 + c = b * 2.34 + d = b / c + print(d) + return d + + # the above produces LLVM like: + # define function() { + # entry: + # alloca + # store 0 to alloca + # + # setup for print + # branch + # other_labels: + # ... + # } + # + # The following checks that: + # * the alloca and store have no !dbg + # * the arithmetic occurs in the order defined and with !dbg + # * that the !dbg entries are monotonically increasing in value with + # source line number + + sig = (types.float64,) + metadata = self._get_metadata(foo, sig=sig) + full_ir = self._get_llvmir(foo, sig=sig) + + module = llvm.parse_assembly(full_ir) + + name = foo.overloads[foo.signatures[0]].fndesc.mangled_name + funcs = [x for x in module.functions if x.name == name] + self.assertEqual(len(funcs), 1) + func = funcs[0] + blocks = [x for x in func.blocks] + self.assertGreater(len(blocks), 1) + block = blocks[0] + + # Find non-call/non-memory instr and check the sequence is as expected + instrs = [x for x in block.instructions if x.opcode not in + ['call', 'load', 'store']] + op_expect = {'fadd', 'fmul', 'fdiv'} + started = False + for x in instrs: + if x.opcode in op_expect: + op_expect.remove(x.opcode) + if not started: + started = True + elif op_expect and started: + self.fail("Math opcodes are not contiguous") + self.assertFalse(op_expect, "Math opcodes were not found") + + # Parse out metadata from end of each line, check it monotonically + # ascends with LLVM source line. Also store all the dbg references, + # these will be checked later. + line2dbg = set() + re_dbg_ref = re.compile(r'.*!dbg (![0-9]+).*$') + found = -1 + for instr in instrs: + inst_as_str = str(instr) + matched = re_dbg_ref.match(inst_as_str) + if not matched: + # if there's no match, ensure it is one of alloca or store, + # it's important that the zero init/alloca instructions have + # no dbg data + accepted = ('alloca ', 'store ') + self.assertTrue(any([x in inst_as_str for x in accepted])) + continue + groups = matched.groups() + self.assertEqual(len(groups), 1) + dbg_val = groups[0] + int_dbg_val = int(dbg_val[1:]) + if found >= 0: + self.assertTrue(int_dbg_val >= found) + found = int_dbg_val + # some lines will alias dbg info, this is fine, it's only used to + # make sure that the line numbers are correct WRT python + line2dbg.add(dbg_val) + + pysrc, pysrc_line_start = inspect.getsourcelines(foo) + + # build a map of dbg reference to DI* information + metadata_definition_map = self._get_metadata_map(metadata) + + # Pull out metadata entries referred to by the llvm line end !dbg + # check they match the python source, the +2 is for the @njit decorator + # and the function definition line. + offsets = [0, # b = a + 1 + 1, # a * 2.34 + 2, # d = b / c + 3, # print(d) + ] + pyln_range = [pysrc_line_start + 2 + x for x in offsets] + + # do the check + for (k, line_no) in zip(sorted(line2dbg, key=lambda x: int(x[1:])), + pyln_range): + dilocation_info = metadata_definition_map[k] + self.assertIn(f'line: {line_no}', dilocation_info) + + # Check that variable "a" is declared as on the same line as function + # definition. + expr = r'.*!DILocalVariable\(name: "a",.*line: ([0-9]+),.*' + match_local_var_a = re.compile(expr) + for entry in metadata_definition_map.values(): + matched = match_local_var_a.match(entry) + if matched: + groups = matched.groups() + self.assertEqual(len(groups), 1) + dbg_line = int(groups[0]) + # +1 for the decorator. + # Recall that Numba's DWARF refers to the "def" line, but + # `inspect` uses the decorator as the first line. + defline = pysrc_line_start + 1 + self.assertEqual(dbg_line, defline) + break + else: + self.fail('Assertion on DILocalVariable not made') + + @TestCase.run_test_in_subprocess(envvars=_NUMBA_OPT_0_ENV) + def test_DILocation_entry_blk(self): + # Needs a subprocess as jitting literally anything at any point in the + # lifetime of the process ends up with a codegen at opt 3. This is not + # amenable to this test! + # This test relies on the CFG not being simplified as it checks the jump + # from the entry block to the first basic block. Force OPT as 0, if set + # via the env var the targetmachine and various pass managers all end up + # at OPT 0 and the IR is minimally transformed prior to lowering to ELF. + # + # This tests that the unconditional jump emitted at the tail of + # the entry block has no debug metadata associated with it. In practice, + # if debug metadata is associated with it, it manifests as the + # prologue_end being associated with the end_sequence or similar (due to + # the way code gen works for the entry block). + + @njit(debug=True) + def foo(a): + return a + 1 + foo(123) + + full_ir = foo.inspect_llvm(foo.signatures[0]) + # The above produces LLVM like: + # + # define function() { + # entry: + # alloca + # store 0 to alloca + # unconditional jump to body: + # + # body: + # ... + # } + + module = llvm.parse_assembly(full_ir) + name = foo.overloads[foo.signatures[0]].fndesc.mangled_name + funcs = [x for x in module.functions if x.name == name] + self.assertEqual(len(funcs), 1) + func = funcs[0] + blocks = [x for x in func.blocks] + self.assertEqual(len(blocks), 2) + entry_block, body_block = blocks + + # Assert that the tail of the entry block is an unconditional jump to + # the body block and that the jump has no associated debug info. + entry_instr = [x for x in entry_block.instructions] + ujmp = entry_instr[-1] + self.assertEqual(ujmp.opcode, 'br') + ujmp_operands = [x for x in ujmp.operands] + self.assertEqual(len(ujmp_operands), 1) + target_data = ujmp_operands[0] + target = str(target_data).split(':')[0].strip() + # check the unconditional jump target is to the body block + self.assertEqual(target, body_block.name) + # check the uncondition jump instr itself has no metadata + self.assertTrue(str(ujmp).endswith(target)) + + @TestCase.run_test_in_subprocess(envvars=_NUMBA_OPT_0_ENV) + def test_DILocation_decref(self): + """ This tests that decref's generated from `ir.Del`s as variables go + out of scope do not have debuginfo associated with them (the location of + `ir.Del` is an implementation detail). + """ + + @njit(debug=True) + def sink(*x): + pass + + # This function has many decrefs! + @njit(debug=True) + def foo(a): + x = (a, a) + if a[0] == 0: + sink(x) + return 12 + z = x[0][0] + return z + + sig = (types.float64[::1],) + full_ir = self._get_llvmir(foo, sig=sig) + + # make sure decref lines end with `meminfo.)` without !dbg info. + count = 0 + for line in full_ir.splitlines(): + line_stripped = line.strip() + if line_stripped.startswith('call void @NRT_decref'): + self.assertRegex(line, r'.*meminfo\.[0-9]+\)$') + count += 1 + self.assertGreater(count, 0) # make sure there were some decrefs! + + def test_DILocation_undefined(self): + """ Tests that DILocation information for undefined vars is associated + with the line of the function definition (so it ends up in the prologue) + """ + @njit(debug=True) + def foo(n): + if n: + if n > 0: + c = 0 + return c + else: + # variable c is not defined in this branch + c += 1 + return c + + sig = (types.intp,) + metadata = self._get_metadata(foo, sig=sig) + pysrc, pysrc_line_start = inspect.getsourcelines(foo) + # Looks for versions of variable "c" and captures the line number + expr = r'.*!DILocalVariable\(name: "c\$?[0-9]?",.*line: ([0-9]+),.*' + matcher = re.compile(expr) + associated_lines = set() + for md in metadata: + match = matcher.match(md) + if match: + groups = match.groups() + self.assertEqual(len(groups), 1) + associated_lines.add(int(groups[0])) + # 3 versions of 'c': `c = 0`, `return c`, `c+=1` + self.assertEqual(len(associated_lines), 3) + self.assertIn(pysrc_line_start, associated_lines) + + def test_DILocation_versioned_variables(self): + """ Tests that DILocation information for versions of variables matches + up to their definition site.""" + # Note: there's still something wrong in the DI/SSA naming, the ret c is + # associated with the logically first definition. + + @njit(debug=True) + def foo(n): + if n: + c = 5 + else: + c = 1 + # prevents inline of return on py310 + py310_defeat1 = 1 # noqa + py310_defeat2 = 2 # noqa + py310_defeat3 = 3 # noqa + py310_defeat4 = 4 # noqa + return c + + sig = (types.intp,) + metadata = self._get_metadata(foo, sig=sig) + pysrc, pysrc_line_start = inspect.getsourcelines(foo) + + # Looks for SSA versioned names i.e. $ of the + # variable 'c' and captures the line + expr = r'.*!DILocalVariable\(name: "c\$[0-9]?",.*line: ([0-9]+),.*' + matcher = re.compile(expr) + associated_lines = set() + for md in metadata: + match = matcher.match(md) + if match: + groups = match.groups() + self.assertEqual(len(groups), 1) + associated_lines.add(int(groups[0])) + self.assertEqual(len(associated_lines), 2) # 2 SSA versioned names 'c' + + # Now find the `c = ` lines in the python source + py_lines = set() + for ix, pyln in enumerate(pysrc): + if 'c = ' in pyln: + py_lines.add(ix + pysrc_line_start) + self.assertEqual(len(py_lines), 2) # 2 assignments to c + + # check that the DILocation from the DI for `c` matches the python src + self.assertEqual(associated_lines, py_lines) + + def test_numeric_scalars(self): + """ Tests that dwarf info is correctly emitted for numeric scalars.""" + + DI = namedtuple('DI', 'name bits encoding') + + type_infos = {np.float32: DI("float32", 32, "DW_ATE_float"), + np.float64: DI("float64", 64, "DW_ATE_float"), + np.int8: DI("int8", 8, "DW_ATE_signed"), + np.int16: DI("int16", 16, "DW_ATE_signed"), + np.int32: DI("int32", 32, "DW_ATE_signed"), + np.int64: DI("int64", 64, "DW_ATE_signed"), + np.uint8: DI("uint8", 8, "DW_ATE_unsigned"), + np.uint16: DI("uint16", 16, "DW_ATE_unsigned"), + np.uint32: DI("uint32", 32, "DW_ATE_unsigned"), + np.uint64: DI("uint64", 64, "DW_ATE_unsigned"), + np.complex64: DI("complex64", 64, + "DW_TAG_structure_type"), + np.complex128: DI("complex128", 128, + "DW_TAG_structure_type"),} + + for ty, dwarf_info in type_infos.items(): + + @njit(debug=True) + def foo(): + a = ty(10) + return a + + metadata = self._get_metadata(foo, sig=()) + metadata_definition_map = self._get_metadata_map(metadata) + + for k, v in metadata_definition_map.items(): + if 'DILocalVariable(name: "a"' in v: + lvar = metadata_definition_map[k] + break + else: + assert 0, "missing DILocalVariable 'a'" + + type_marker = re.match('.*type: (![0-9]+).*', lvar).groups()[0] + type_decl = metadata_definition_map[type_marker] + + if 'DW_ATE' in dwarf_info.encoding: + expected = (f'!DIBasicType(name: "{dwarf_info.name}", ' + f'size: {dwarf_info.bits}, ' + f'encoding: {dwarf_info.encoding})') + self.assertEqual(type_decl, expected) + else: # numerical complex type + # Don't match the whole string, just the known parts + raw_flt = 'float' if dwarf_info.bits == 64 else 'double' + expected = (f'distinct !DICompositeType(' + f'tag: {dwarf_info.encoding}, ' + f'name: "{dwarf_info.name} ' + f'({{{raw_flt}, {raw_flt}}})", ' + f'size: {dwarf_info.bits}') + self.assertIn(expected, type_decl) + + def test_arrays(self): + + @njit(debug=True) + def foo(): + a = np.ones((2, 3), dtype=np.float64) + return a + + metadata = self._get_metadata(foo, sig=()) + metadata_definition_map = self._get_metadata_map(metadata) + + for k, v in metadata_definition_map.items(): + if 'DILocalVariable(name: "a"' in v: + lvar = metadata_definition_map[k] + break + else: + assert 0, "missing DILocalVariable 'a'" + + type_marker = re.match('.*type: (![0-9]+).*', lvar).groups()[0] + type_decl = metadata_definition_map[type_marker] + + # check type + self.assertIn("!DICompositeType(tag: DW_TAG_structure_type", type_decl) + # check name encoding + self.assertIn(f'name: "{str(types.float64[:, ::1])}', type_decl) + + # pop out the "elements" of the composite type + match_elements = re.compile(r'.*elements: (![0-9]+),.*') + elem_matches = match_elements.match(type_decl).groups() + self.assertEqual(len(elem_matches), 1) + elem_match = elem_matches[0] + # The match should be something like, it's the elements from an array + # data model. + # !{!35, !36, !37, !39, !40, !43, !45}' + struct_markers = metadata_definition_map[elem_match] + struct_pattern = '!{' + '(![0-9]+), ' * 6 + '(![0-9]+)}' + match_struct = re.compile(struct_pattern) + struct_member_matches = match_struct.match(struct_markers).groups() + self.assertIsNotNone(struct_member_matches is not None) + data_model = default_manager.lookup(types.float64[:, ::1]) + self.assertEqual(len(struct_member_matches), len(data_model._fields)) + + ptr_size = types.intp.bitwidth + ptr_re = (r'!DIDerivedType\(tag: DW_TAG_pointer_type, ' + rf'baseType: ![0-9]+, size: {ptr_size}\)') + int_re = (rf'!DIBasicType\(name: "int{ptr_size}", size: {ptr_size}, ' + r'encoding: DW_ATE_signed\)') + utuple_re = (r'!DICompositeType\(tag: DW_TAG_array_type, ' + rf'name: "UniTuple\(int{ptr_size} x 2\) ' + rf'\(\[2 x i{ptr_size}\]\)", baseType: ![0-9]+, ' + rf'size: {2 * ptr_size}, elements: ![0-9]+, ' + rf'identifier: "\[2 x i{ptr_size}\]"\)') + expected = {'meminfo': ptr_re, + 'parent': ptr_re, + 'nitems': int_re, + 'itemsize': int_re, + 'data': ptr_re, + 'shape': utuple_re, + 'strides': utuple_re} + + # look for `baseType: <>` for the type + base_type_pattern = r'!DIDerivedType\(.*, baseType: (![0-9]+),.*' + base_type_matcher = re.compile(base_type_pattern) + + for ix, field in enumerate(data_model._fields): + derived_type = metadata_definition_map[struct_member_matches[ix]] + self.assertIn("DIDerivedType", derived_type) + self.assertIn(f'name: "{field}"', derived_type) + base_type_match = base_type_matcher.match(derived_type) + base_type_matches = base_type_match.groups() + self.assertEqual(len(base_type_matches), 1) + base_type_marker = base_type_matches[0] + data_type = metadata_definition_map[base_type_marker] + self.assertRegex(data_type, expected[field]) + + def test_debug_optnone(self): + def get_debug_lines(fn): + metadata = self._get_metadata(fn, fn.signatures[0]) + lines = self._get_lines_from_debuginfo(metadata) + return lines + + def get_func_attrs(fn): + cres = fn.overloads[fn.signatures[0]] + lib = cres.library + fn = lib._final_module.get_function(cres.fndesc.mangled_name) + attrs = set(b' '.join(fn.attributes).split()) + return attrs + + def foo(): + n = 10 + c = 0 + for i in range(n): + c += i + return c + + foo_debug = njit(debug=True)(foo) + foo_debug_optnone = njit(debug=True, _dbg_optnone=True)(foo) + foo_debug_optnone_inline = njit(debug=True, _dbg_optnone=True, + forceinline=True)(foo) + + firstline = foo.__code__.co_firstlineno + + expected_info = {} + expected_info[foo_debug] = dict( + # just the dummy line-0 and the line of the return statement + lines={0, firstline + 5}, + must_have_attrs=set(), + must_not_have_attrs=set([b"optnone"]), + ) + expected_info[foo_debug_optnone] = dict( + # all the lines should be included + lines=set(range(firstline + 1, firstline + 6)), + must_have_attrs=set([b"optnone"]), + must_not_have_attrs=set(), + ) + expected_info[foo_debug_optnone_inline] = dict( + # optnone=True is overridden by forceinline, so this looks like the + # foo_debug version + lines={0, firstline + 5}, + must_have_attrs=set([b"alwaysinline"]), + must_not_have_attrs=set([b"optnone"]), + ) + + expected_ret = foo() + + for udt, expected in expected_info.items(): + with self.subTest(udt.targetoptions): + got = udt() + self.assertEqual(got, expected_ret) + + # Compare the line locations in the debug info. + self.assertEqual(get_debug_lines(udt), expected["lines"]) + + # Check for attributes on the LLVM function + attrs = get_func_attrs(udt) + must_have = expected["must_have_attrs"] + self.assertEqual(attrs & must_have, must_have) + must_not_have = expected["must_not_have_attrs"] + self.assertFalse(attrs & must_not_have) + + def test_omitted_arg(self): + # See issue 7726 + @njit(debug=True) + def foo(missing=None): + pass + + # check that it will actually compile (verifies DI emission is ok) + with override_config('DEBUGINFO_DEFAULT', 1): + foo() + + metadata = self._get_metadata(foo, sig=(types.Omitted(None),)) + metadata_definition_map = self._get_metadata_map(metadata) + + # Find DISubroutineType + tmp_disubr = [] + for md in metadata: + if "DISubroutineType" in md: + tmp_disubr.append(md) + self.assertEqual(len(tmp_disubr), 1) + disubr = tmp_disubr.pop() + + disubr_matched = re.match(r'.*!DISubroutineType\(types: ([!0-9]+)\)$', + disubr) + self.assertIsNotNone(disubr_matched) + disubr_groups = disubr_matched.groups() + self.assertEqual(len(disubr_groups), 1) + disubr_meta = disubr_groups[0] + + # Find the types in the DISubroutineType arg list + disubr_types = metadata_definition_map[disubr_meta] + disubr_types_matched = re.match(r'!{(.*)}', disubr_types) + self.assertIsNotNone(disubr_matched) + disubr_types_groups = disubr_types_matched.groups() + self.assertEqual(len(disubr_types_groups), 1) + + # fetch out and assert the last argument type, should be void * + md_fn_arg = [x.strip() for x in disubr_types_groups[0].split(',')][-1] + arg_ty = metadata_definition_map[md_fn_arg] + expected_arg_ty = (r'^.*!DICompositeType\(tag: DW_TAG_structure_type, ' + r'name: "Anonymous struct \({}\)", elements: ' + r'(![0-9]+), identifier: "{}"\)') + self.assertRegex(arg_ty, expected_arg_ty) + md_base_ty = re.match(expected_arg_ty, arg_ty).groups()[0] + base_ty = metadata_definition_map[md_base_ty] + # expect ir.LiteralStructType([]) + self.assertEqual(base_ty, ('!{}')) + + def test_missing_source(self): + strsrc = """ + def foo(): + return 1 + """ + l = dict() + exec(dedent(strsrc), {}, l) + foo = njit(debug=True)(l['foo']) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaDebugInfoWarning) + ignore_internal_warnings() + foo() + + self.assertEqual(len(w), 1) + found = w[0] + self.assertEqual(found.category, NumbaDebugInfoWarning) + msg = str(found.message) + # make sure the warning contains the right message + self.assertIn('Could not find source for function', msg) + # and refers to the offending function + self.assertIn(str(foo.py_func), msg) + + def test_irregularly_indented_source(self): + + @njit(debug=True) + def foo(): +# NOTE: THIS COMMENT MUST START AT COLUMN 0 FOR THIS SAMPLE CODE TO BE VALID # noqa: E115, E501 + return 1 + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaDebugInfoWarning) + ignore_internal_warnings() + foo() + + # No warnings + self.assertEqual(len(w), 0) + + metadata = self._get_metadata(foo, foo.signatures[0]) + lines = self._get_lines_from_debuginfo(metadata) + # Only one line + self.assertEqual(len(lines), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_deprecations.py b/venv/lib/python3.10/site-packages/numba/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..48b5a36aec59b9bc15c672486d947d13ca595870 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_deprecations.py @@ -0,0 +1,262 @@ +import warnings +import unittest +from contextlib import contextmanager + +from numba import jit, vectorize, guvectorize +from numba.core.errors import (NumbaDeprecationWarning, + NumbaPendingDeprecationWarning, NumbaWarning) +from numba.tests.support import TestCase, needs_setuptools + + +@contextmanager +def _catch_numba_deprecation_warnings(): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("ignore", category=NumbaWarning) + warnings.simplefilter("always", category=NumbaDeprecationWarning) + yield w + + +class TestDeprecation(TestCase): + + def check_warning(self, warnings, expected_str, category, check_rtd=True): + self.assertEqual(len(warnings), 1) + self.assertEqual(warnings[0].category, category) + self.assertIn(expected_str, str(warnings[0].message)) + if check_rtd: + self.assertIn("https://numba.readthedocs.io", + str(warnings[0].message)) + + @TestCase.run_test_in_subprocess + def test_explicit_false_nopython_kwarg(self): + # tests that explicitly setting `nopython=False` in @jit raises a + # warning about it doing nothing. + with _catch_numba_deprecation_warnings() as w: + + @jit(nopython=False) + def foo(): + pass + + foo() + + msg = "The keyword argument 'nopython=False' was supplied" + self.check_warning(w, msg, NumbaDeprecationWarning, check_rtd=False) + + @TestCase.run_test_in_subprocess + def test_vectorize_missing_nopython_kwarg_not_reported(self): + # Checks that use of @vectorize without a nopython kwarg doesn't raise + # a warning about lack of said kwarg. + + with _catch_numba_deprecation_warnings() as w: + # This compiles via nopython mode directly + @vectorize('float64(float64)') + def foo(a): + return a + 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_vectorize_nopython_false_is_reported(self): + # Checks that use of @vectorize with nopython=False raises a warning + # about supplying it. + + with _catch_numba_deprecation_warnings() as w: + # This compiles via nopython mode directly + @vectorize('float64(float64)', nopython=False) + def foo(a): + return a + 1 + + msg = "The keyword argument 'nopython=False' was supplied" + self.check_warning(w, msg, NumbaDeprecationWarning, check_rtd=False) + + @TestCase.run_test_in_subprocess + def test_vectorize_objmode_direct_compilation_no_warnings(self): + # Checks that use of @vectorize with forceobj=True raises no warnings. + + with _catch_numba_deprecation_warnings() as w: + # Compiles via objmode directly with no warnings raised + @vectorize('float64(float64)', forceobj=True) + def foo(a): + object() + return a + 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_vectorize_objmode_compilation_nopython_false_warns(self): + # Checks that use of @vectorize with forceobj set and nopython set as + # False raises no warnings. + + with _catch_numba_deprecation_warnings() as w: + # Compiles via objmode directly with no warnings raised + @vectorize('float64(float64)', forceobj=True, nopython=False) + def foo(a): + object() + return a + 1 + + msg = "The keyword argument 'nopython=False' was supplied" + self.check_warning(w, msg, NumbaDeprecationWarning, check_rtd=False) + + @TestCase.run_test_in_subprocess + def test_vectorize_parallel_true_no_warnings(self): + # Checks that use of @vectorize with the parallel target doesn't + # raise warnings about nopython kwarg, the parallel target doesn't + # support objmode so nopython=True is implicit. + with _catch_numba_deprecation_warnings() as w: + @vectorize('float64(float64)', target='parallel') + def foo(x): + return x + 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_vectorize_parallel_true_nopython_true_no_warnings(self): + # Checks that use of @vectorize with the parallel target and + # nopython=True doesn't raise warnings about nopython kwarg. + with _catch_numba_deprecation_warnings() as w: + @vectorize('float64(float64)', target='parallel', nopython=True) + def foo(x): + return x + 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_vectorize_parallel_true_nopython_false_warns(self): + # Checks that use of @vectorize with the parallel target and + # nopython=False raises a warning about the nopython kwarg being False. + with _catch_numba_deprecation_warnings() as w: + @vectorize('float64(float64)', target='parallel', nopython=False) + def foo(x): + return x + 1 + + msg = "The keyword argument 'nopython=False' was supplied" + self.check_warning(w, msg, NumbaDeprecationWarning, check_rtd=False) + + @TestCase.run_test_in_subprocess + def test_vectorize_calling_jit_with_nopython_false_warns_from_jit(self): + # Checks the scope of the suppression of deprecation warnings that are + # present in e.g. vectorize. The function `bar` should raise a + # deprecation warning, the `@vectorize`d `foo` function should not, + # even though both don't have a nopython kwarg. + + # First check that the @vectorize call doesn't raise anything + with _catch_numba_deprecation_warnings() as w: + @vectorize('float64(float64)', forceobj=True) + def foo(x): + return bar(x + 1) + + def bar(*args): + pass + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_guvectorize_implicit_nopython_no_warnings(self): + # Checks that use of @guvectorize with implicit nopython compilation + # does not warn on compilation. + with _catch_numba_deprecation_warnings() as w: + + @guvectorize('void(float64[::1], float64[::1])', '(n)->(n)') + def bar(a, b): + a += 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_guvectorize_forceobj_no_warnings(self): + # Checks that use of @guvectorize with direct objmode compilation does + # not warn. + with _catch_numba_deprecation_warnings() as w: + + @guvectorize('void(float64[::1], float64[::1])', '(n)->(n)', + forceobj=True) + def bar(a, b): + object() + a += 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_guvectorize_parallel_implicit_nopython_no_warnings(self): + # Checks that use of @guvectorize with parallel target and implicit + # nopython mode compilation does not warn. + with _catch_numba_deprecation_warnings() as w: + + @guvectorize('void(float64[::1], float64[::1])', '(n)->(n)', + target='parallel') + def bar(a, b): + a += 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_guvectorize_parallel_forceobj_no_warnings(self): + # Checks that use of @guvectorize with parallel target and direct + # objmode compilation does not warn. + with _catch_numba_deprecation_warnings() as w: + + # This compiles somewhat surprisingly for the parallel target using + # object mode?! + @guvectorize('void(float64[::1], float64[::1])', '(n)->(n)', + target='parallel', forceobj=True) + def bar(a, b): + object() + a += 1 + + self.assertFalse(w) + + @TestCase.run_test_in_subprocess + def test_reflection_of_mutable_container(self): + # tests that reflection in list/set warns + def foo_list(a): + return a.append(1) + + def foo_set(a): + return a.add(1) + + for f in [foo_list, foo_set]: + container = f.__name__.strip('foo_') + inp = eval(container)([10, ]) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("ignore", category=NumbaWarning) + warnings.simplefilter("always", + category=NumbaPendingDeprecationWarning) + jit(nopython=True)(f)(inp) + self.assertEqual(len(w), 1) + self.assertEqual(w[0].category, NumbaPendingDeprecationWarning) + warn_msg = str(w[0].message) + msg = ("Encountered the use of a type that is scheduled for " + "deprecation") + self.assertIn(msg, warn_msg) + msg = ("\'reflected %s\' found for argument" % container) + self.assertIn(msg, warn_msg) + self.assertIn("https://numba.readthedocs.io", warn_msg) + + @needs_setuptools + @TestCase.run_test_in_subprocess + def test_pycc_module(self): + # checks import of module warns + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", + category=NumbaPendingDeprecationWarning) + import numba.pycc # noqa: F401 + + expected_str = ("The 'pycc' module is pending deprecation.") + self.check_warning(w, expected_str, NumbaPendingDeprecationWarning) + + @needs_setuptools + @TestCase.run_test_in_subprocess + def test_pycc_CC(self): + # check the most commonly used functionality (CC) warns + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", + category=NumbaPendingDeprecationWarning) + from numba.pycc import CC # noqa: F401 + + expected_str = ("The 'pycc' module is pending deprecation.") + self.check_warning(w, expected_str, NumbaPendingDeprecationWarning) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dictimpl.py b/venv/lib/python3.10/site-packages/numba/tests/test_dictimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..4814e843610a7d53c9d6edf6598f19e3bd43113d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dictimpl.py @@ -0,0 +1,656 @@ +""" +Testing C implementation of the numba dictionary +""" + +import ctypes +import random + +from numba.tests.support import TestCase +from numba import _helperlib, jit, typed, types +from numba.core.config import IS_32BITS +from numba.core.datamodel.models import UniTupleModel +from numba.extending import register_model, typeof_impl, unbox, overload + + +DKIX_EMPTY = -1 + + +ALIGN = 4 if IS_32BITS else 8 + + +class Dict(object): + """A wrapper around the C-API to provide a minimal dictionary object for + testing. + """ + def __init__(self, tc, keysize, valsize): + """ + Parameters + ---------- + tc : TestCase instance + keysize : int + byte size for the key + valsize : int + byte size for the value + """ + self.tc = tc + self.keysize = keysize + self.valsize = valsize + self.dp = self.dict_new_minsize(keysize, valsize) + + def __del__(self): + self.tc.numba_dict_free(self.dp) + + def __len__(self): + return self.dict_length() + + def __setitem__(self, k, v): + bk = bytes(k.encode()) + bv = bytes(v.encode()) + self.tc.assertEqual(len(bk), self.keysize) + self.tc.assertEqual(len(bv), self.valsize) + self.dict_insert(bk, bv) + + def __getitem__(self, k): + bk = bytes(k.encode()) + self.tc.assertEqual(len(bk), self.keysize) + ix, old = self.dict_lookup(bk) + if ix == DKIX_EMPTY: + raise KeyError + else: + return old.decode() + + def __delitem__(self, k): + bk = bytes(k.encode()) + self.tc.assertEqual(len(bk), self.keysize) + if not self.dict_delitem(bk): + raise KeyError(k) + + def get(self, k): + try: + return self[k] + except KeyError: + return + + def items(self): + return DictIter(self) + + def popitem(self): + k, v = self.dict_popitem() + return k.decode(), v.decode() + + # + # The methods below are higher-level wrappers for the C-API + # + + def dict_new_minsize(self, key_size, val_size): + dp = ctypes.c_void_p() + status = self.tc.numba_dict_new_sized( + ctypes.byref(dp), 0, key_size, val_size, + ) + self.tc.assertEqual(status, 0) + return dp + + def dict_length(self): + return self.tc.numba_dict_length(self.dp) + + def dict_insert(self, key_bytes, val_bytes): + hashval = hash(key_bytes) + status = self.tc.numba_dict_insert_ez( + self.dp, key_bytes, hashval, val_bytes, + ) + self.tc.assertGreaterEqual(status, 0) + + def dict_lookup(self, key_bytes): + hashval = hash(key_bytes) + oldval_bytes = ctypes.create_string_buffer(self.valsize) + ix = self.tc.numba_dict_lookup( + self.dp, key_bytes, hashval, oldval_bytes, + ) + self.tc.assertGreaterEqual(ix, DKIX_EMPTY) + return ix, oldval_bytes.value + + def dict_delitem(self, key_bytes): + ix, oldval = self.dict_lookup(key_bytes) + if ix == DKIX_EMPTY: + return False + hashval = hash(key_bytes) + status = self.tc.numba_dict_delitem(self.dp, hashval, ix) + self.tc.assertEqual(status, 0) + return True + + def dict_popitem(self): + key_bytes = ctypes.create_string_buffer(self.keysize) + val_bytes = ctypes.create_string_buffer(self.valsize) + status = self.tc.numba_dict_popitem(self.dp, key_bytes, val_bytes) + if status != 0: + if status == -4: + raise KeyError('popitem(): dictionary is empty') + else: + self.tc._fail('Unknown') + return key_bytes.value, val_bytes.value + + def dict_iter(self, itptr): + self.tc.numba_dict_iter(itptr, self.dp) + + def dict_iter_next(self, itptr): + bk = ctypes.c_void_p(0) + bv = ctypes.c_void_p(0) + status = self.tc.numba_dict_iter_next( + itptr, ctypes.byref(bk), ctypes.byref(bv), + ) + if status == -2: + raise ValueError('dictionary mutated') + elif status == -3: + return + else: + self.tc.assertGreaterEqual(status, 0) + + # Check the alignment of the key-value in the entries. + # We know we are getting the pointers to data in the entries. + + self.tc.assertEqual(bk.value % ALIGN, 0, msg='key not aligned') + self.tc.assertEqual(bv.value % ALIGN, 0, msg='val not aligned') + + key = (ctypes.c_char * self.keysize).from_address(bk.value) + val = (ctypes.c_char * self.valsize).from_address(bv.value) + return key.value, val.value + + +class DictIter(object): + """A iterator for the `Dict.items()`. + + Only the `.items()` is needed. `.keys` and `.values` can be trivially + implemented on the `.items` iterator. + """ + def __init__(self, parent): + self.parent = parent + itsize = self.parent.tc.numba_dict_iter_sizeof() + self.it_state_buf = (ctypes.c_char_p * itsize)(0) + self.it = ctypes.cast(self.it_state_buf, ctypes.c_void_p) + self.parent.dict_iter(self.it) + + def __iter__(self): + return self + + def __next__(self): + out = self.parent.dict_iter_next(self.it) + if out is None: + raise StopIteration + else: + k, v = out + return k.decode(), v.decode() + + next = __next__ # needed for py2 only + + +class Parametrized(tuple): + """supporting type for TestDictImpl.test_parametrized_types + needs to be global to be cacheable""" + def __init__(self, tup): + assert all(isinstance(v, str) for v in tup) + + +class ParametrizedType(types.Type): + """this is essentially UniTuple(unicode_type, n) + BUT type name is the same for all n""" + + def __init__(self, value): + super(ParametrizedType, self).__init__('ParametrizedType') + self.dtype = types.unicode_type + self.n = len(value) + + @property + def key(self): + return self.n + + def __len__(self): + return self.n + + +class TestDictImpl(TestCase): + def setUp(self): + """Bind to the c_helper library and provide the ctypes wrapper. + """ + dict_t = ctypes.c_void_p + iter_t = ctypes.c_void_p + hash_t = ctypes.c_ssize_t + + def wrap(name, restype, argtypes=()): + proto = ctypes.CFUNCTYPE(restype, *argtypes) + return proto(_helperlib.c_helpers[name]) + + # numba_test_dict() + self.numba_test_dict = wrap( + 'test_dict', + ctypes.c_int, + ) + # numba_dict_new_sized( + # NB_Dict **out, + # Py_ssize_t n_keys, + # Py_ssize_t key_size, + # Py_ssize_t val_size + # ) + self.numba_dict_new_sized = wrap( + 'dict_new_sized', + ctypes.c_int, + [ + ctypes.POINTER(dict_t), # out + ctypes.c_ssize_t, # n_keys + ctypes.c_ssize_t, # key_size + ctypes.c_ssize_t, # val_size + ], + ) + # numba_dict_free(NB_Dict *d) + self.numba_dict_free = wrap( + 'dict_free', + None, + [dict_t], + ) + # numba_dict_length(NB_Dict *d) + self.numba_dict_length = wrap( + 'dict_length', + ctypes.c_ssize_t, + [dict_t], + ) + # numba_dict_insert_ez( + # NB_Dict *d, + # const char *key_bytes, + # Py_hash_t hash, + # const char *val_bytes, + # ) + self.numba_dict_insert_ez = wrap( + 'dict_insert_ez', + ctypes.c_int, + [ + dict_t, # d + ctypes.c_char_p, # key_bytes + hash_t, # hash + ctypes.c_char_p, # val_bytes + ], + ) + # numba_dict_lookup( + # NB_Dict *d, + # const char *key_bytes, + # Py_hash_t hash, + # char *oldval_bytes + # ) + self.numba_dict_lookup = wrap( + 'dict_lookup', + ctypes.c_ssize_t, + [ + dict_t, # d + ctypes.c_char_p, # key_bytes + hash_t, # hash + ctypes.c_char_p, # oldval_bytes + ], + ) + # numba_dict_delitem( + # NB_Dict *d, + # Py_hash_t hash, + # Py_ssize_t ix + # ) + self.numba_dict_delitem = wrap( + 'dict_delitem', + ctypes.c_int, + [ + dict_t, # d + hash_t, # hash + ctypes.c_ssize_t, # ix + ], + ) + # numba_dict_popitem( + # NB_Dict *d, + # char *key_bytes, + # char *val_bytes + # ) + self.numba_dict_popitem = wrap( + 'dict_popitem', + ctypes.c_int, + [ + dict_t, # d + ctypes.c_char_p, # key_bytes + ctypes.c_char_p, # val_bytes + ], + ) + # numba_dict_iter_sizeof() + self.numba_dict_iter_sizeof = wrap( + 'dict_iter_sizeof', + ctypes.c_size_t, + ) + # numba_dict_iter( + # NB_DictIter *it, + # NB_Dict *d + # ) + self.numba_dict_iter = wrap( + 'dict_iter', + None, + [ + iter_t, + dict_t, + ], + ) + # numba_dict_iter_next( + # NB_DictIter *it, + # const char **key_ptr, + # const char **val_ptr + # ) + self.numba_dict_iter_next = wrap( + 'dict_iter_next', + ctypes.c_int, + [ + iter_t, # it + ctypes.POINTER(ctypes.c_void_p), # key_ptr + ctypes.POINTER(ctypes.c_void_p), # val_ptr + ], + ) + + def test_simple_c_test(self): + # Runs the basic test in C. + ret = self.numba_test_dict() + self.assertEqual(ret, 0) + + def test_insertion_small(self): + # Tests insertion and lookup for a small dict. + d = Dict(self, 4, 8) + self.assertEqual(len(d), 0) + self.assertIsNone(d.get('abcd')) + + # First key + d['abcd'] = 'beefcafe' + self.assertEqual(len(d), 1) + self.assertIsNotNone(d.get('abcd')) + self.assertEqual(d['abcd'], 'beefcafe') + + # Duplicated key replaces + d['abcd'] = 'cafe0000' + self.assertEqual(len(d), 1) + self.assertEqual(d['abcd'], 'cafe0000') + + # Second key + d['abce'] = 'cafe0001' + self.assertEqual(len(d), 2) + self.assertEqual(d['abcd'], 'cafe0000') + self.assertEqual(d['abce'], 'cafe0001') + + # Third key + d['abcf'] = 'cafe0002' + self.assertEqual(len(d), 3) + self.assertEqual(d['abcd'], 'cafe0000') + self.assertEqual(d['abce'], 'cafe0001') + self.assertEqual(d['abcf'], 'cafe0002') + + def check_insertion_many(self, nmax): + # Helper to test insertion/lookup/resize + d = Dict(self, 8, 8) + + def make_key(v): + return "key_{:04}".format(v) + + def make_val(v): + return "val_{:04}".format(v) + + # Check insert + for i in range(nmax): + d[make_key(i)] = make_val(i) + self.assertEqual(len(d), i + 1) + + # Check lookup + for i in range(nmax): + self.assertEqual(d[make_key(i)], make_val(i)) + + def test_insertion_many(self): + # Test insertion for differently sized dict + # Around minsize + self.check_insertion_many(nmax=7) + self.check_insertion_many(nmax=8) + self.check_insertion_many(nmax=9) + # Around nmax = 32 + self.check_insertion_many(nmax=31) + self.check_insertion_many(nmax=32) + self.check_insertion_many(nmax=33) + # Around nmax = 1024 + self.check_insertion_many(nmax=1023) + self.check_insertion_many(nmax=1024) + self.check_insertion_many(nmax=1025) + # Around nmax = 4096 + self.check_insertion_many(nmax=4095) + self.check_insertion_many(nmax=4096) + self.check_insertion_many(nmax=4097) + + def test_deletion_small(self): + # Test deletion + d = Dict(self, 4, 8) + self.assertEqual(len(d), 0) + self.assertIsNone(d.get('abcd')) + + d['abcd'] = 'cafe0000' + d['abce'] = 'cafe0001' + d['abcf'] = 'cafe0002' + + self.assertEqual(len(d), 3) + self.assertEqual(d['abcd'], 'cafe0000') + self.assertEqual(d['abce'], 'cafe0001') + self.assertEqual(d['abcf'], 'cafe0002') + self.assertEqual(len(d), 3) + + # Delete first item + del d['abcd'] + self.assertIsNone(d.get('abcd')) + self.assertEqual(d['abce'], 'cafe0001') + self.assertEqual(d['abcf'], 'cafe0002') + self.assertEqual(len(d), 2) + + # Delete first item again + with self.assertRaises(KeyError): + del d['abcd'] + + # Delete third + del d['abcf'] + self.assertIsNone(d.get('abcd')) + self.assertEqual(d['abce'], 'cafe0001') + self.assertIsNone(d.get('abcf')) + self.assertEqual(len(d), 1) + + # Delete second + del d['abce'] + self.assertIsNone(d.get('abcd')) + self.assertIsNone(d.get('abce')) + self.assertIsNone(d.get('abcf')) + self.assertEqual(len(d), 0) + + def check_delete_randomly(self, nmax, ndrop, nrefill, seed=0): + # Helper to test deletion + random.seed(seed) + + d = Dict(self, 8, 8) + keys = {} + + def make_key(v): + return "k_{:06x}".format(v) + + def make_val(v): + return "v_{:06x}".format(v) + + for i in range(nmax): + d[make_key(i)] = make_val(i) + + # Fill to nmax + for i in range(nmax): + k = make_key(i) + v = make_val(i) + keys[k] = v + self.assertEqual(d[k], v) + + self.assertEqual(len(d), nmax) + + # Randomly drop + droplist = random.sample(list(keys), ndrop) + remain = keys.copy() + for i, k in enumerate(droplist, start=1): + del d[k] + del remain[k] + self.assertEqual(len(d), nmax - i) + self.assertEqual(len(d), nmax - ndrop) + + # Make sure everything dropped is gone + for k in droplist: + self.assertIsNone(d.get(k)) + + # Make sure everything else is still here + for k in remain: + self.assertEqual(d[k], remain[k]) + + # Refill + for i in range(nrefill): + k = make_key(nmax + i) + v = make_val(nmax + i) + remain[k] = v + d[k] = v + + self.assertEqual(len(remain), len(d)) + + # Make sure everything is here + for k in remain: + self.assertEqual(d[k], remain[k]) + + def test_delete_randomly(self): + # Test deletion for differently sized dict + self.check_delete_randomly(nmax=8, ndrop=2, nrefill=2) + self.check_delete_randomly(nmax=13, ndrop=10, nrefill=31) + self.check_delete_randomly(nmax=100, ndrop=50, nrefill=200) + self.check_delete_randomly(nmax=100, ndrop=99, nrefill=100) + self.check_delete_randomly(nmax=100, ndrop=100, nrefill=100) + self.check_delete_randomly(nmax=1024, ndrop=999, nrefill=1) + self.check_delete_randomly(nmax=1024, ndrop=999, nrefill=2048) + + def test_delete_randomly_large(self): + # Go beyond 2^16 to exercise large indices. + # Internally, size of index changes as the hashtable size changes. + # Size of index can be 8, 16, 32 or 64 bytes (on 64-bit). + # We are not inserting >2^32 elements because of limitation of time. + self.check_delete_randomly(nmax=2**17, ndrop=2**16, nrefill=2**10) + + def test_popitem(self): + nmax = 10 + d = Dict(self, 8, 8) + + def make_key(v): + return "k_{:06x}".format(v) + + def make_val(v): + return "v_{:06x}".format(v) + + for i in range(nmax): + d[make_key(i)] = make_val(i) + + self.assertEqual(len(d), nmax) + k, v = d.popitem() + self.assertEqual(len(d), nmax - 1) + self.assertEqual(k, make_key(len(d))) + self.assertEqual(v, make_val(len(d))) + + while len(d): + n = len(d) + k, v = d.popitem() + self.assertEqual(len(d), n - 1) + self.assertEqual(k, make_key(len(d))) + self.assertEqual(v, make_val(len(d))) + + self.assertEqual(len(d), 0) + with self.assertRaises(KeyError) as raises: + d.popitem() + self.assertIn( + 'popitem(): dictionary is empty', + str(raises.exception), + ) + + def test_iter_items(self): + # Test .items iteration + d = Dict(self, 4, 4) + nmax = 1000 + + def make_key(v): + return "{:04}".format(v) + + def make_val(v): + return "{:04}".format(v + nmax) + + for i in range(nmax): + d[make_key(i)] = make_val(i) + + # Check that the everything is ordered + for i, (k, v) in enumerate(d.items()): + self.assertEqual(make_key(i), k) + self.assertEqual(make_val(i), v) + + def check_sizing(self, key_size, val_size, nmax): + # Helper to verify different key/value sizes. + d = Dict(self, key_size, val_size) + + def make_key(v): + return "{:0{}}".format(v, key_size)[:key_size] + + def make_val(v): + return "{:0{}}".format(nmax - v - 1, val_size)[:val_size] + + for i in range(nmax): + d[make_key(i)] = make_val(i) + + # Check that the everything is ordered + for i, (k, v) in enumerate(d.items()): + self.assertEqual(make_key(i), k) + self.assertEqual(make_val(i), v) + + def test_sizing(self): + # Check different sizes of the key & value. + for i in range(1, 8): + self.check_sizing(key_size=i, val_size=i, nmax=2**i) + + def test_parameterized_types(self): + """https://github.com/numba/numba/issues/6401""" + + register_model(ParametrizedType)(UniTupleModel) + + @typeof_impl.register(Parametrized) + def typeof_unit(val, c): + return ParametrizedType(val) + + @unbox(ParametrizedType) + def unbox_parametrized(typ, obj, context): + return context.unbox(types.UniTuple(typ.dtype, len(typ)), obj) + + def dict_vs_cache_vs_parametrized(v): + assert 0 + + @overload(dict_vs_cache_vs_parametrized) + def ol_dict_vs_cache_vs_parametrized(v): + typ = v + + def objmode_vs_cache_vs_parametrized_impl(v): + # typed.List shows same behaviour after fix for #6397 + d = typed.Dict.empty(types.unicode_type, typ) + d['data'] = v + + return objmode_vs_cache_vs_parametrized_impl + + @jit(nopython=True, cache=True) + def set_parametrized_data(x, y): + # Has had a tendency to segfault when the compiled function + # was loaded from cache in a different process than the one + # it was originally compiled in. + # The new process is simulated below by resetting the dispatchers + # and the target context + dict_vs_cache_vs_parametrized(x) + dict_vs_cache_vs_parametrized(y) + + x, y = Parametrized(('a', 'b')), Parametrized(('a',)) + set_parametrized_data(x, y) + + # reset dispatchers and targetctx to force re-load from cache as if a + # new process would jit the function + set_parametrized_data._make_finalizer()() + set_parametrized_data._reset_overloads() + set_parametrized_data.targetctx.init() + + for ii in range(50): # <- sometimes works a few times + self.assertIsNone(set_parametrized_data(x, y)) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dictobject.py b/venv/lib/python3.10/site-packages/numba/tests/test_dictobject.py new file mode 100644 index 0000000000000000000000000000000000000000..6b91bfeb33c8a604b6f7739ec2e42a904fadcd7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dictobject.py @@ -0,0 +1,2561 @@ +""" +Testing numba implementation of the numba dictionary. + +The tests here only check that the numba typing and codegen are working +correctly. Detailed testing of the underlying dictionary operations is done +in test_dictimpl.py. +""" + +import sys +import warnings + +import numpy as np + +from numba import njit, literally +from numba import int32, int64, float32, float64 +from numba import typeof +from numba.typed import Dict, dictobject, List +from numba.typed.typedobjectutils import _sentry_safe_cast +from numba.core.errors import TypingError +from numba.core import types +from numba.tests.support import (TestCase, MemoryLeakMixin, unittest, + override_config, forbid_codegen) +from numba.experimental import jitclass +from numba.extending import overload + + +class TestDictObject(MemoryLeakMixin, TestCase): + def test_dict_bool(self): + """ + Exercise bool(dict) + """ + @njit + def foo(n): + d = dictobject.new_dict(int32, float32) + for i in range(n): + d[i] = i + 1 + return bool(d) + + # Insert nothing + self.assertEqual(foo(n=0), False) + # Insert 1 entry + self.assertEqual(foo(n=1), True) + # Insert 2 entries + self.assertEqual(foo(n=2), True) + # Insert 100 entries + self.assertEqual(foo(n=100), True) + + def test_dict_create(self): + """ + Exercise dictionary creation, insertion and len + """ + @njit + def foo(n): + d = dictobject.new_dict(int32, float32) + for i in range(n): + d[i] = i + 1 + return len(d) + + # Insert nothing + self.assertEqual(foo(n=0), 0) + # Insert 1 entry + self.assertEqual(foo(n=1), 1) + # Insert 2 entries + self.assertEqual(foo(n=2), 2) + # Insert 100 entries + self.assertEqual(foo(n=100), 100) + + def test_dict_get(self): + """ + Exercise dictionary creation, insertion and get + """ + @njit + def foo(n, targets): + d = dictobject.new_dict(int32, float64) + # insertion loop + for i in range(n): + d[i] = i + # retrieval loop + output = [] + for t in targets: + output.append(d.get(t)) + return output + + self.assertEqual(foo(5, [0, 1, 9]), [0, 1, None]) + self.assertEqual(foo(10, [0, 1, 9]), [0, 1, 9]) + self.assertEqual(foo(10, [-1, 9, 1]), [None, 9, 1]) + + def test_dict_get_with_default(self): + """ + Exercise dict.get(k, d) where d is set + """ + @njit + def foo(n, target, default): + d = dictobject.new_dict(int32, float64) + # insertion loop + for i in range(n): + d[i] = i + # retrieval loop + return d.get(target, default) + + self.assertEqual(foo(5, 3, -1), 3) + self.assertEqual(foo(5, 5, -1), -1) + + def test_dict_getitem(self): + """ + Exercise dictionary __getitem__ + """ + @njit + def foo(keys, vals, target): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + + # lookup + return d[target] + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + self.assertEqual(foo(keys, vals, 1), 0.1) + self.assertEqual(foo(keys, vals, 2), 0.2) + self.assertEqual(foo(keys, vals, 3), 0.3) + # check no leak so far + self.assert_no_memory_leak() + # disable leak check for exception test + self.disable_leak_check() + with self.assertRaises(KeyError): + foo(keys, vals, 0) + with self.assertRaises(KeyError): + foo(keys, vals, 4) + + def test_dict_popitem(self): + """ + Exercise dictionary .popitem + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + + # popitem + return d.popitem() + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + for i in range(1, len(keys)): + self.assertEqual( + foo(keys[:i], vals[:i]), + (keys[i - 1], vals[i - 1]), + ) + + def test_dict_popitem_many(self): + """ + Exercise dictionary .popitem + """ + + @njit + def core(d, npop): + # popitem + keysum, valsum = 0, 0 + for _ in range(npop): + k, v = d.popitem() + keysum += k + valsum -= v + return keysum, valsum + + @njit + def foo(keys, vals, npop): + d = dictobject.new_dict(int32, int32) + # insertion + for k, v in zip(keys, vals): + d[k] = v + + return core(d, npop) + + keys = [1, 2, 3] + vals = [10, 20, 30] + + for i in range(len(keys)): + self.assertEqual( + foo(keys, vals, npop=3), + core.py_func(dict(zip(keys, vals)), npop=3), + ) + + # check no leak so far + self.assert_no_memory_leak() + # disable leak check for exception test + self.disable_leak_check() + + with self.assertRaises(KeyError): + foo(keys, vals, npop=4) + + def test_dict_pop(self): + """ + Exercise dictionary .pop + """ + @njit + def foo(keys, vals, target): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + + # popitem + return d.pop(target, None), len(d) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual(foo(keys, vals, 1), (0.1, 2)) + self.assertEqual(foo(keys, vals, 2), (0.2, 2)) + self.assertEqual(foo(keys, vals, 3), (0.3, 2)) + self.assertEqual(foo(keys, vals, 0), (None, 3)) + + # check no leak so far + self.assert_no_memory_leak() + # disable leak check for exception test + self.disable_leak_check() + + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # popitem + return d.pop(0) + + with self.assertRaises(KeyError): + foo() + + def test_dict_pop_many(self): + """ + Exercise dictionary .pop + """ + + @njit + def core(d, pops): + total = 0 + for k in pops: + total += k + d.pop(k, 0.123) + len(d) + total *= 2 + return total + + @njit + def foo(keys, vals, pops): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + # popitem + return core(d, pops) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + pops = [2, 3, 3, 1, 0, 2, 1, 0, -1] + + self.assertEqual( + foo(keys, vals, pops), + core.py_func(dict(zip(keys, vals)), pops), + ) + + def test_dict_delitem(self): + @njit + def foo(keys, vals, target): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + del d[target] + return len(d), d.get(target) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + self.assertEqual(foo(keys, vals, 1), (2, None)) + self.assertEqual(foo(keys, vals, 2), (2, None)) + self.assertEqual(foo(keys, vals, 3), (2, None)) + # check no leak so far + self.assert_no_memory_leak() + # disable leak check for exception test + self.disable_leak_check() + with self.assertRaises(KeyError): + foo(keys, vals, 0) + + def test_dict_clear(self): + """ + Exercise dict.clear + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + b4 = len(d) + # clear + d.clear() + return b4, len(d) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + self.assertEqual(foo(keys, vals), (3, 0)) + + def test_dict_items(self): + """ + Exercise dict.items + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + out = [] + for kv in d.items(): + out.append(kv) + return out + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals), + list(zip(keys, vals)), + ) + + # Test .items() on empty dict + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + out = [] + for kv in d.items(): + out.append(kv) + return out + + self.assertEqual(foo(), []) + + def test_dict_keys(self): + """ + Exercise dict.keys + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + out = [] + for k in d.keys(): + out.append(k) + return out + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals), + keys, + ) + + def test_dict_keys_len(self): + """ + Exercise len(dict.keys()) + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + return len(d.keys()) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals), + len(keys), + ) + + def test_dict_values(self): + """ + Exercise dict.values + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + out = [] + for v in d.values(): + out.append(v) + return out + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals), + vals, + ) + + def test_dict_values_len(self): + """ + Exercise len(dict.values()) + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + return len(d.values()) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals), + len(vals), + ) + + def test_dict_items_len(self): + """ + Exercise len(dict.items()) + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + return len(d.items()) + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + self.assertPreciseEqual( + foo(keys, vals), + len(vals), + ) + + def test_dict_iter(self): + """ + Exercise iter(dict) + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + out = [] + for k in d: + out.append(k) + return out + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals), + [1, 2, 3] + ) + + def test_dict_contains(self): + """ + Exercise operator.contains + """ + @njit + def foo(keys, vals, checklist): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + out = [] + for k in checklist: + out.append(k in d) + return out + + keys = [1, 2, 3] + vals = [0.1, 0.2, 0.3] + + self.assertEqual( + foo(keys, vals, [2, 3, 4, 1, 0]), + [True, True, False, True, False], + ) + + def test_dict_copy(self): + """ + Exercise dict.copy + """ + @njit + def foo(keys, vals): + d = dictobject.new_dict(int32, float64) + # insertion + for k, v in zip(keys, vals): + d[k] = v + return list(d.copy().items()) + + keys = list(range(20)) + vals = [x + i / 100 for i, x in enumerate(keys)] + out = foo(keys, vals) + self.assertEqual(out, list(zip(keys, vals))) + + def test_dict_setdefault(self): + """ + Exercise dict.setdefault + """ + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + d.setdefault(1, 1.2) # used because key is not in + a = d.get(1) + d[1] = 2.3 + b = d.get(1) + d[2] = 3.4 + d.setdefault(2, 4.5) # not used because key is in + c = d.get(2) + return a, b, c + + self.assertEqual(foo(), (1.2, 2.3, 3.4)) + + def test_dict_equality(self): + """ + Exercise dict.__eq__ and .__ne__ + """ + @njit + def foo(na, nb, fa, fb): + da = dictobject.new_dict(int32, float64) + db = dictobject.new_dict(int32, float64) + for i in range(na): + da[i] = i * fa + for i in range(nb): + db[i] = i * fb + return da == db, da != db + + # Same keys and values + self.assertEqual(foo(10, 10, 3, 3), (True, False)) + # Same keys and diff values + self.assertEqual(foo(10, 10, 3, 3.1), (False, True)) + # LHS has more keys + self.assertEqual(foo(11, 10, 3, 3), (False, True)) + # RHS has more keys + self.assertEqual(foo(10, 11, 3, 3), (False, True)) + + def test_dict_equality_more(self): + """ + Exercise dict.__eq__ + """ + @njit + def foo(ak, av, bk, bv): + # The key-value types are different in the two dictionaries + da = dictobject.new_dict(int32, float64) + db = dictobject.new_dict(int64, float32) + for i in range(len(ak)): + da[ak[i]] = av[i] + for i in range(len(bk)): + db[bk[i]] = bv[i] + return da == db + + # Simple equal case + ak = [1, 2, 3] + av = [2, 3, 4] + bk = [1, 2, 3] + bv = [2, 3, 4] + self.assertTrue(foo(ak, av, bk, bv)) + + # Equal with replacement + ak = [1, 2, 3] + av = [2, 3, 4] + bk = [1, 2, 2, 3] + bv = [2, 1, 3, 4] + self.assertTrue(foo(ak, av, bk, bv)) + + # Diff values + ak = [1, 2, 3] + av = [2, 3, 4] + bk = [1, 2, 3] + bv = [2, 1, 4] + self.assertFalse(foo(ak, av, bk, bv)) + + # Diff keys + ak = [0, 2, 3] + av = [2, 3, 4] + bk = [1, 2, 3] + bv = [2, 3, 4] + self.assertFalse(foo(ak, av, bk, bv)) + + def test_dict_equality_diff_type(self): + """ + Exercise dict.__eq__ + """ + @njit + def foo(na, b): + da = dictobject.new_dict(int32, float64) + for i in range(na): + da[i] = i + return da == b + + # dict != int + self.assertFalse(foo(10, 1)) + # dict != tuple[int] + self.assertFalse(foo(10, (1,))) + + def test_dict_to_from_meminfo(self): + """ + Exercise dictobject.{_as_meminfo, _from_meminfo} + """ + @njit + def make_content(nelem): + for i in range(nelem): + yield i, i + (i + 1) / 100 + + @njit + def boxer(nelem): + d = dictobject.new_dict(int32, float64) + for k, v in make_content(nelem): + d[k] = v + return dictobject._as_meminfo(d) + + dcttype = types.DictType(int32, float64) + + @njit + def unboxer(mi): + d = dictobject._from_meminfo(mi, dcttype) + return list(d.items()) + + mi = boxer(10) + self.assertEqual(mi.refcount, 1) + + got = unboxer(mi) + expected = list(make_content.py_func(10)) + self.assertEqual(got, expected) + + def test_001_cannot_downcast_key(self): + @njit + def foo(n): + d = dictobject.new_dict(int32, float64) + for i in range(n): + d[i] = i + 1 + # bad key type + z = d.get(1j) + return z + + with self.assertRaises(TypingError) as raises: + foo(10) + self.assertIn( + 'cannot safely cast complex128 to int32', + str(raises.exception), + ) + + def test_002_cannot_downcast_default(self): + @njit + def foo(n): + d = dictobject.new_dict(int32, float64) + for i in range(n): + d[i] = i + 1 + # bad default type + z = d.get(2 * n, 1j) + return z + + with self.assertRaises(TypingError) as raises: + foo(10) + self.assertIn( + 'cannot safely cast complex128 to float64', + str(raises.exception), + ) + + def test_003_cannot_downcast_key(self): + @njit + def foo(n): + d = dictobject.new_dict(int32, float64) + for i in range(n): + d[i] = i + 1 + # bad cast!? + z = d.get(2.4) + return z + + # should raise + with self.assertRaises(TypingError) as raises: + foo(10) + self.assertIn( + 'cannot safely cast float64 to int32', + str(raises.exception), + ) + + def test_004_cannot_downcast_key(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # should raise TypingError + d[1j] = 7. + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + 'cannot safely cast complex128 to int32', + str(raises.exception), + ) + + def test_005_cannot_downcast_value(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # should raise TypingError + d[1] = 1j + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + 'cannot safely cast complex128 to float64', + str(raises.exception), + ) + + def test_006_cannot_downcast_key(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # raise TypingError + d[11.5] + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + 'cannot safely cast float64 to int32', + str(raises.exception), + ) + + @unittest.skipUnless(sys.maxsize > 2 ** 32, "64 bit test only") + def test_007_collision_checks(self): + # this checks collisions in real life for 64bit systems + @njit + def foo(v1, v2): + d = dictobject.new_dict(int64, float64) + c1 = np.uint64(2 ** 61 - 1) + c2 = np.uint64(0) + assert hash(c1) == hash(c2) + d[c1] = v1 + d[c2] = v2 + return (d[c1], d[c2]) + + a, b = 10., 20. + x, y = foo(a, b) + self.assertEqual(x, a) + self.assertEqual(y, b) + + def test_008_lifo_popitem(self): + # check that (keys, vals) are LIFO .popitem() + @njit + def foo(n): + d = dictobject.new_dict(int32, float64) + for i in range(n): + d[i] = i + 1 + keys = [] + vals = [] + for i in range(n): + tmp = d.popitem() + keys.append(tmp[0]) + vals.append(tmp[1]) + return keys, vals + + z = 10 + gk, gv = foo(z) + + self.assertEqual(gk, [x for x in reversed(range(z))]) + self.assertEqual(gv, [x + 1 for x in reversed(range(z))]) + + def test_010_cannot_downcast_default(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + d[0] = 6. + d[1] = 7. + # pop'd default must have same type as value + d.pop(11, 12j) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "cannot safely cast complex128 to float64", + str(raises.exception), + ) + + def test_011_cannot_downcast_key(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + d[0] = 6. + d[1] = 7. + # pop'd key must have same type as key + d.pop(11j) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "cannot safely cast complex128 to int32", + str(raises.exception), + ) + + def test_012_cannot_downcast_key(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + d[0] = 6. + # invalid key type + return 1j in d + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "cannot safely cast complex128 to int32", + str(raises.exception), + ) + + def test_013_contains_empty_dict(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # contains on empty dict + return 1 in d + + self.assertFalse(foo()) + + def test_014_not_contains_empty_dict(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # not contains empty dict + return 1 not in d + + self.assertTrue(foo()) + + def test_015_dict_clear(self): + @njit + def foo(n): + d = dictobject.new_dict(int32, float64) + for i in range(n): + d[i] = i + 1 + x = len(d) + d.clear() + y = len(d) + return x, y + + m = 10 + self.assertEqual(foo(m), (m, 0)) + + def test_016_cannot_downcast_key(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # key is wrong type + d.setdefault(1j, 12.) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "cannot safely cast complex128 to int32", + str(raises.exception), + ) + + def test_017_cannot_downcast_default(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + # default value is wrong type + d.setdefault(1, 12.j) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "cannot safely cast complex128 to float64", + str(raises.exception), + ) + + def test_018_keys_iter_are_views(self): + # this is broken somewhere in llvmlite, intent of test is to check if + # keys behaves like a view or not + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + d[11] = 12. + k1 = d.keys() + d[22] = 9. + k2 = d.keys() + rk1 = [x for x in k1] + rk2 = [x for x in k2] + return rk1, rk2 + + a, b = foo() + self.assertEqual(a, b) + self.assertEqual(a, [11, 22]) + + # Not implemented yet + @unittest.expectedFailure + def test_019(self): + # should keys/vals be set-like? + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + d[11] = 12. + d[22] = 9. + k2 = d.keys() & {12, } + return k2 + + print(foo()) + + def test_020_string_key(self): + @njit + def foo(): + d = dictobject.new_dict(types.unicode_type, float64) + d['a'] = 1. + d['b'] = 2. + d['c'] = 3. + d['d'] = 4. + out = [] + for x in d.items(): + out.append(x) + return out, d['a'] + + items, da = foo() + self.assertEqual(items, [('a', 1.), ('b', 2.), ('c', 3.), ('d', 4)]) + self.assertEqual(da, 1.) + + def test_021_long_str_key(self): + @njit + def foo(): + d = dictobject.new_dict(types.unicode_type, float64) + tmp = [] + for i in range(10000): + tmp.append('a') + s = ''.join(tmp) + d[s] = 1. + out = list(d.items()) + return out + self.assertEqual(foo(), [('a' * 10000, 1)]) + + def test_022_references_juggle(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + e = d + d[1] = 12. + e[2] = 14. + e = dictobject.new_dict(int32, float64) + e[1] = 100. + e[2] = 1000. + f = d + d = e + + k1 = [x for x in d.items()] + k2 = [x for x in e.items()] + k3 = [x for x in f.items()] + + return k1, k2, k3 + + k1, k2, k3 = foo() + self.assertEqual(k1, [(1, 100.0), (2, 1000.0)]) + self.assertEqual(k2, [(1, 100.0), (2, 1000.0)]) + self.assertEqual(k3, [(1, 12), (2, 14)]) + + def test_023_closure(self): + @njit + def foo(): + d = dictobject.new_dict(int32, float64) + + def bar(): + d[1] = 12. + d[2] = 14. + bar() + return [x for x in d.keys()] + + self.assertEqual(foo(), [1, 2]) + + def test_024_unicode_getitem_keys(self): + # See issue #6135 + @njit + def foo(): + s = 'a\u1234' + d = {s[0] : 1} + return d['a'] + + self.assertEqual(foo(), foo.py_func()) + + @njit + def foo(): + s = 'abc\u1234' + d = {s[:1] : 1} + return d['a'] + + self.assertEqual(foo(), foo.py_func()) + + def test_issue6570_alignment_padding(self): + # Create a key type that is 12-bytes long on a 8-byte aligned system + # so that the a 4-byte padding is needed. + # If the 4-byte padding is not zero-filled, it will have garbage data + # that affects key matching in the lookup. + keyty = types.Tuple([types.uint64, types.float32]) + + @njit + def foo(): + d = dictobject.new_dict(keyty, float64) + t1 = np.array([3], dtype=np.uint64) + t2 = np.array([5.67], dtype=np.float32) + v1 = np.array([10.23], dtype=np.float32) + d[(t1[0], t2[0])] = v1[0] + return (t1[0], t2[0]) in d + + self.assertTrue(foo()) + + def test_dict_update(self): + """ + Tests dict.update works with various dictionaries. + """ + n = 10 + + def f1(n): + """ + Test update with a regular dictionary. + """ + d1 = {i: i + 1 for i in range(n)} + d2 = {3 * i: i for i in range(n)} + d1.update(d2) + return d1 + + py_func = f1 + cfunc = njit()(f1) + a = py_func(n) + b = cfunc(n) + self.assertEqual(a, b) + + def f2(n): + """ + Test update where one of the dictionaries + is created as a Python literal. + """ + d1 = { + 1: 2, + 3: 4, + 5: 6 + } + d2 = {3 * i: i for i in range(n)} + d1.update(d2) + return d1 + + py_func = f2 + cfunc = njit()(f2) + a = py_func(n) + b = cfunc(n) + self.assertEqual(a, b) + + +class TestDictTypeCasting(TestCase): + def check_good(self, fromty, toty): + _sentry_safe_cast(fromty, toty) + + def check_bad(self, fromty, toty): + with self.assertRaises(TypingError) as raises: + _sentry_safe_cast(fromty, toty) + self.assertIn( + 'cannot safely cast {fromty} to {toty}'.format(**locals()), + str(raises.exception), + ) + + def test_cast_int_to(self): + self.check_good(types.int32, types.float32) + self.check_good(types.int32, types.float64) + self.check_good(types.int32, types.complex128) + self.check_good(types.int64, types.complex128) + self.check_bad(types.int32, types.complex64) + self.check_good(types.int8, types.complex64) + + def test_cast_float_to(self): + self.check_good(types.float32, types.float64) + self.check_good(types.float32, types.complex64) + self.check_good(types.float64, types.complex128) + + def test_cast_bool_to(self): + self.check_good(types.boolean, types.int32) + self.check_good(types.boolean, types.float64) + self.check_good(types.boolean, types.complex128) + + +class TestTypedDict(MemoryLeakMixin, TestCase): + def test_basic(self): + d = Dict.empty(int32, float32) + # len + self.assertEqual(len(d), 0) + # setitems + d[1] = 1 + d[2] = 2.3 + d[3] = 3.4 + self.assertEqual(len(d), 3) + # keys + self.assertEqual(list(d.keys()), [1, 2, 3]) + # values + for x, y in zip(list(d.values()), [1, 2.3, 3.4]): + self.assertAlmostEqual(x, y, places=4) + # getitem + self.assertAlmostEqual(d[1], 1) + self.assertAlmostEqual(d[2], 2.3, places=4) + self.assertAlmostEqual(d[3], 3.4, places=4) + # deltiem + del d[2] + self.assertEqual(len(d), 2) + # get + self.assertIsNone(d.get(2)) + # setdefault + d.setdefault(2, 100) + d.setdefault(3, 200) + self.assertEqual(d[2], 100) + self.assertAlmostEqual(d[3], 3.4, places=4) + # update + d.update({4: 5, 5: 6}) + self.assertAlmostEqual(d[4], 5) + self.assertAlmostEqual(d[5], 6) + # contains + self.assertTrue(4 in d) + # items + pyd = dict(d.items()) + self.assertEqual(len(pyd), len(d)) + # pop + self.assertAlmostEqual(d.pop(4), 5) + # popitem + nelem = len(d) + k, v = d.popitem() + self.assertEqual(len(d), nelem - 1) + self.assertTrue(k not in d) + # __eq__ & copy + copied = d.copy() + self.assertEqual(copied, d) + self.assertEqual(list(copied.items()), list(d.items())) + + def test_copy_from_dict(self): + expect = {k: float(v) for k, v in zip(range(10), range(10, 20))} + nbd = Dict.empty(int32, float64) + for k, v in expect.items(): + nbd[k] = v + got = dict(nbd) + self.assertEqual(got, expect) + + def test_compiled(self): + @njit + def producer(): + d = Dict.empty(int32, float64) + d[1] = 1.23 + return d + + @njit + def consumer(d): + return d[1] + + d = producer() + val = consumer(d) + self.assertEqual(val, 1.23) + + def test_gh7908(self): + d = Dict.empty( + key_type=types.Tuple([types.uint32, + types.uint32]), + value_type=int64) + + d[(1, 1)] = 12345 + self.assertEqual(d[(1, 1)], d.get((1, 1))) + + def check_stringify(self, strfn, prefix=False): + nbd = Dict.empty(int32, int32) + d = {} + nbd[1] = 2 + d[1] = 2 + checker = self.assertIn if prefix else self.assertEqual + checker(strfn(d), strfn(nbd)) + nbd[2] = 3 + d[2] = 3 + checker(strfn(d), strfn(nbd)) + for i in range(10, 20): + nbd[i] = i + 1 + d[i] = i + 1 + checker(strfn(d), strfn(nbd)) + if prefix: + self.assertTrue(strfn(nbd).startswith('DictType')) + + def test_repr(self): + self.check_stringify(repr, prefix=True) + + def test_str(self): + self.check_stringify(str) + + +class DictIterableCtor: + + def test_iterable_type_constructor(self): + # https://docs.python.org/3/library/stdtypes.html#dict + @njit + def func1(a, b): + d = Dict(zip(a, b)) + return d + + @njit + def func2(a_, b): + a = range(3) + return Dict(zip(a, b)) + + @njit + def func3(a_, b): + a = [0, 1, 2] + return Dict(zip(a, b)) + + @njit + def func4(a, b): + c = zip(a, b) + return Dict(zip(a, zip(c, a))) + + @njit + def func5(a, b): + return Dict(zip(zip(a, b), b)) + + @njit + def func6(items): + return Dict(items) + + @njit + def func7(k, v): + return Dict({k: v}) # mapping - not supported + + @njit + def func8(k, v): + d = Dict() + d[k] = v + return d + + def _get_dict(py_dict): + d = Dict() + for k, v in py_dict.items(): + d[k] = v + return d + + vals = ( + (func1, [(0, 1, 2), 'abc'], _get_dict({0: 'a', 1: 'b', 2: 'c'})), + (func2, [(0, 1, 2), 'abc'], _get_dict({0: 'a', 1: 'b', 2: 'c'})), + (func3, [(0, 1, 2), 'abc'], _get_dict({0: 'a', 1: 'b', 2: 'c'})), + (func4, [(0, 1, 2), 'abc'], _get_dict( + {0: ((0, 'a'), 0), 1: ((1, 'b'), 1), 2: ((2, 'c'), 2)})), + (func5, [(0, 1, 2), 'abc'], _get_dict( + {(0, 'a'): 'a', (1, 'b'): 'b', (2, 'c'): 'c'})), + # (func6, [(),], Dict({})), + (func6, [((1, 'a'), (3, 'b')),], _get_dict({1: 'a', 3: 'b'})), + (func1, ['key', _get_dict({1: 'abc'})], _get_dict({'k': 1})), + (func8, ['key', _get_dict({1: 'abc'})], _get_dict( + {'key': _get_dict({1: 'abc'})})), + (func8, ['key', List([1, 2, 3])], _get_dict( + {'key': List([1, 2, 3])})), + ) + + for func, args, expected in vals: + if self.jit_enabled: + got = func(*args) + else: + got = func.py_func(*args) + self.assertPreciseEqual(expected, got) + + +class TestDictIterableCtorJit(TestCase, DictIterableCtor): + + def setUp(self): + self.jit_enabled = True + + def test_exception_no_iterable_arg(self): + @njit + def ctor(): + return Dict(3) + + msg = ".*No implementation of function.*" + with self.assertRaisesRegex(TypingError, msg): + ctor() + + def test_exception_dict_mapping(self): + @njit + def ctor(): + return Dict({1: 2, 3: 4}) + + msg = ".*No implementation of function.*" + with self.assertRaisesRegex(TypingError, msg): + ctor() + + def test_exception_setitem(self): + @njit + def ctor(): + return Dict(((1, 'a'), (2, 'b', 3))) + + msg = ".*No implementation of function.*" + with self.assertRaisesRegex(TypingError, msg): + ctor() + + +class TestDictIterableCtorNoJit(TestCase, DictIterableCtor): + + def setUp(self): + self.jit_enabled = False + + def test_exception_nargs(self): + msg = 'Dict expect at most 1 argument, got 2' + with self.assertRaisesRegex(TypingError, msg): + Dict(1, 2) + + def test_exception_mapping_ctor(self): + msg = r'.*dict\(mapping\) is not supported.*' # noqa: W605 + with self.assertRaisesRegex(TypingError, msg): + Dict({1: 2}) + + def test_exception_non_iterable_arg(self): + msg = '.*object is not iterable.*' + with self.assertRaisesRegex(TypingError, msg): + Dict(3) + + def test_exception_setitem(self): + msg = ".*dictionary update sequence element #1 has length 3.*" + with self.assertRaisesRegex(ValueError, msg): + Dict(((1, 'a'), (2, 'b', 3))) + + +class TestDictRefctTypes(MemoryLeakMixin, TestCase): + + def test_str_key(self): + @njit + def foo(): + d = Dict.empty( + key_type=types.unicode_type, + value_type=types.int32, + ) + d["123"] = 123 + d["321"] = 321 + return d + + d = foo() + self.assertEqual(d['123'], 123) + self.assertEqual(d['321'], 321) + expect = {'123': 123, '321': 321} + self.assertEqual(dict(d), expect) + # Test insert replacement + d['123'] = 231 + expect['123'] = 231 + self.assertEqual(d['123'], 231) + self.assertEqual(dict(d), expect) + # Test dictionary growth + nelem = 100 + for i in range(nelem): + d[str(i)] = i + expect[str(i)] = i + for i in range(nelem): + self.assertEqual(d[str(i)], i) + self.assertEqual(dict(d), expect) + + def test_str_val(self): + @njit + def foo(): + d = Dict.empty( + key_type=types.int32, + value_type=types.unicode_type, + ) + d[123] = "123" + d[321] = "321" + return d + + d = foo() + self.assertEqual(d[123], '123') + self.assertEqual(d[321], '321') + expect = {123: '123', 321: '321'} + self.assertEqual(dict(d), expect) + # Test insert replacement + d[123] = "231" + expect[123] = "231" + self.assertEqual(dict(d), expect) + # Test dictionary growth + nelem = 1 + for i in range(nelem): + d[i] = str(i) + expect[i] = str(i) + for i in range(nelem): + self.assertEqual(d[i], str(i)) + self.assertEqual(dict(d), expect) + + def test_str_key_array_value(self): + np.random.seed(123) + d = Dict.empty( + key_type=types.unicode_type, + value_type=types.float64[:], + ) + expect = [] + expect.append(np.random.random(10)) + d['mass'] = expect[-1] + expect.append(np.random.random(20)) + d['velocity'] = expect[-1] + for i in range(100): + expect.append(np.random.random(i)) + d[str(i)] = expect[-1] + self.assertEqual(len(d), len(expect)) + self.assertPreciseEqual(d['mass'], expect[0]) + self.assertPreciseEqual(d['velocity'], expect[1]) + # Ordering is kept + for got, exp in zip(d.values(), expect): + self.assertPreciseEqual(got, exp) + + # Try deleting + self.assertTrue('mass' in d) + self.assertTrue('velocity' in d) + del d['mass'] + self.assertFalse('mass' in d) + del d['velocity'] + self.assertFalse('velocity' in d) + del expect[0:2] + + for i in range(90): + k, v = d.popitem() + w = expect.pop() + self.assertPreciseEqual(v, w) + + # Trigger a resize + expect.append(np.random.random(10)) + d["last"] = expect[-1] + + # Ordering is kept + for got, exp in zip(d.values(), expect): + self.assertPreciseEqual(got, exp) + + def test_dict_of_dict_int_keyval(self): + def inner_numba_dict(): + d = Dict.empty( + key_type=types.intp, + value_type=types.intp, + ) + return d + + d = Dict.empty( + key_type=types.intp, + value_type=types.DictType(types.intp, types.intp), + ) + + def usecase(d, make_inner_dict): + for i in range(100): + mid = make_inner_dict() + for j in range(i + 1): + mid[j] = j * 10000 + d[i] = mid + return d + + got = usecase(d, inner_numba_dict) + expect = usecase({}, dict) + + self.assertIsInstance(expect, dict) + + self.assertEqual(dict(got), expect) + + # Delete items + for where in [12, 3, 6, 8, 10]: + del got[where] + del expect[where] + self.assertEqual(dict(got), expect) + + def test_dict_of_dict_npm(self): + inner_dict_ty = types.DictType(types.intp, types.intp) + + @njit + def inner_numba_dict(): + d = Dict.empty( + key_type=types.intp, + value_type=types.intp, + ) + return d + + @njit + def foo(count): + d = Dict.empty( + key_type=types.intp, + value_type=inner_dict_ty, + ) + for i in range(count): + d[i] = inner_numba_dict() + for j in range(i + 1): + d[i][j] = j + + return d + + d = foo(100) + ct = 0 + for k, dd in d.items(): + ct += 1 + self.assertEqual(len(dd), k + 1) + for kk, vv in dd.items(): + self.assertEqual(kk, vv) + + self.assertEqual(ct, 100) + + def test_delitem(self): + d = Dict.empty(types.int64, types.unicode_type) + d[1] = 'apple' + + @njit + def foo(x, k): + del x[1] + + foo(d, 1) + self.assertEqual(len(d), 0) + self.assertFalse(d) + + def test_getitem_return_type(self): + # Dict.__getitem__ must return non-optional type. + d = Dict.empty(types.int64, types.int64[:]) + d[1] = np.arange(10, dtype=np.int64) + + @njit + def foo(d): + d[1] += 100 + return d[1] + + foo(d) + # Return type is an array, not optional + retty = foo.nopython_signatures[0].return_type + self.assertIsInstance(retty, types.Array) + self.assertNotIsInstance(retty, types.Optional) + # Value is correctly updated + self.assertPreciseEqual(d[1], np.arange(10, dtype=np.int64) + 100) + + def test_storage_model_mismatch(self): + # https://github.com/numba/numba/issues/4520 + # check for storage model mismatch in refcount ops generation + dct = Dict() + ref = [ + ("a", True, "a"), + ("b", False, "b"), + ("c", False, "c"), + ] + # populate + for x in ref: + dct[x] = x + # test + for i, x in enumerate(ref): + self.assertEqual(dct[x], x) + + +class TestDictForbiddenTypes(TestCase): + def assert_disallow(self, expect, callable): + with self.assertRaises(TypingError) as raises: + callable() + msg = str(raises.exception) + self.assertIn(expect, msg) + + def assert_disallow_key(self, ty): + msg = '{} as key is forbidden'.format(ty) + self.assert_disallow(msg, lambda: Dict.empty(ty, types.intp)) + + @njit + def foo(): + Dict.empty(ty, types.intp) + self.assert_disallow(msg, foo) + + def assert_disallow_value(self, ty): + msg = '{} as value is forbidden'.format(ty) + self.assert_disallow(msg, lambda: Dict.empty(types.intp, ty)) + + @njit + def foo(): + Dict.empty(types.intp, ty) + self.assert_disallow(msg, foo) + + def test_disallow_list(self): + self.assert_disallow_key(types.List(types.intp)) + self.assert_disallow_value(types.List(types.intp)) + + def test_disallow_set(self): + self.assert_disallow_key(types.Set(types.intp)) + self.assert_disallow_value(types.Set(types.intp)) + + +class TestDictInferred(TestCase): + def test_simple_literal(self): + @njit + def foo(): + d = Dict() + d[123] = 321 + return d + + k, v = 123, 321 + d = foo() + self.assertEqual(dict(d), {k: v}) + self.assertEqual(typeof(d).key_type, typeof(k)) + self.assertEqual(typeof(d).value_type, typeof(v)) + + def test_simple_args(self): + @njit + def foo(k, v): + d = Dict() + d[k] = v + return d + + k, v = 123, 321 + d = foo(k, v) + self.assertEqual(dict(d), {k: v}) + self.assertEqual(typeof(d).key_type, typeof(k)) + self.assertEqual(typeof(d).value_type, typeof(v)) + + def test_simple_upcast(self): + @njit + def foo(k, v, w): + d = Dict() + d[k] = v + d[k] = w + return d + + k, v, w = 123, 32.1, 321 + d = foo(k, v, w) + self.assertEqual(dict(d), {k: w}) + self.assertEqual(typeof(d).key_type, typeof(k)) + self.assertEqual(typeof(d).value_type, typeof(v)) + + def test_conflicting_value_type(self): + @njit + def foo(k, v, w): + d = Dict() + d[k] = v + d[k] = w + return d + + k, v, w = 123, 321, 32.1 + with self.assertRaises(TypingError) as raises: + foo(k, v, w) + self.assertIn( + 'cannot safely cast float64 to {}'.format(typeof(v)), + str(raises.exception), + ) + + def test_conflicting_key_type(self): + @njit + def foo(k, h, v): + d = Dict() + d[k] = v + d[h] = v + return d + + k, h, v = 123, 123.1, 321 + with self.assertRaises(TypingError) as raises: + foo(k, h, v) + self.assertIn( + 'cannot safely cast float64 to {}'.format(typeof(v)), + str(raises.exception), + ) + + def test_conflict_key_type_non_number(self): + # Allow non-number types to cast unsafely + @njit + def foo(k1, v1, k2): + d = Dict() + d[k1] = v1 + return d, d[k2] + + # k2 will unsafely downcast typeof(k1) + k1 = (np.int8(1), np.int8(2)) + k2 = (np.int32(1), np.int32(2)) + v1 = np.intp(123) + + with warnings.catch_warnings(record=True) as w: + d, dk2 = foo(k1, v1, k2) + self.assertEqual(len(w), 1) + # Make sure the warning is about unsafe cast + msg = 'unsafe cast from UniTuple(int32 x 2) to UniTuple(int8 x 2)' + self.assertIn(msg, str(w[0])) + + keys = list(d.keys()) + self.assertEqual(keys[0], (1, 2)) + self.assertEqual(dk2, d[(np.int32(1), np.int32(2))]) + + def test_ifelse_filled_both_branches(self): + @njit + def foo(k, v): + d = Dict() + if k: + d[k] = v + else: + d[0xdead] = v + 1 + + return d + + k, v = 123, 321 + d = foo(k, v) + self.assertEqual(dict(d), {k: v}) + k, v = 0, 0 + d = foo(k, v) + self.assertEqual(dict(d), {0xdead: v + 1}) + + def test_ifelse_empty_one_branch(self): + @njit + def foo(k, v): + d = Dict() + if k: + d[k] = v + + return d + + k, v = 123, 321 + d = foo(k, v) + self.assertEqual(dict(d), {k: v}) + k, v = 0, 0 + d = foo(k, v) + self.assertEqual(dict(d), {}) + self.assertEqual(typeof(d).key_type, typeof(k)) + self.assertEqual(typeof(d).value_type, typeof(v)) + + def test_loop(self): + @njit + def foo(ks, vs): + d = Dict() + for k, v in zip(ks, vs): + d[k] = v + return d + + vs = list(range(4)) + ks = list(map(lambda x : x + 100, vs)) + d = foo(ks, vs) + self.assertEqual(dict(d), dict(zip(ks, vs))) + + def test_unused(self): + @njit + def foo(): + d = Dict() + return d + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "imprecise type", + str(raises.exception) + ) + + def test_define_after_use(self): + @njit + def foo(define): + d = Dict() + ct = len(d) + for k, v in d.items(): + ct += v + + if define: + # This will set the type + d[1] = 2 + return ct, d, len(d) + + ct, d, n = foo(True) + self.assertEqual(ct, 0) + self.assertEqual(n, 1) + self.assertEqual(dict(d), {1: 2}) + + ct, d, n = foo(False) + self.assertEqual(ct, 0) + self.assertEqual(dict(d), {}) + self.assertEqual(n, 0) + + def test_dict_of_dict(self): + @njit + def foo(k1, k2, v): + d = Dict() + z1 = Dict() + z1[k1 + 1] = v + k1 + z2 = Dict() + z2[k2 + 2] = v + k2 + d[k1] = z1 + d[k2] = z2 + return d + + k1, k2, v = 100, 200, 321 + d = foo(k1, k2, v) + self.assertEqual( + dict(d), + { + k1: {k1 + 1: k1 + v}, + k2: {k2 + 2: k2 + v}, + }, + ) + + def test_comprehension_basic(self): + @njit + def foo(): + return {i: 2 * i for i in range(10)} + + self.assertEqual(foo(), foo.py_func()) + + def test_comprehension_basic_mixed_type(self): + @njit + def foo(): + return {i: float(j) for i, j in zip(range(10), range(10, 0, -1))} + + self.assertEqual(foo(), foo.py_func()) + + def test_comprehension_involved(self): + @njit + def foo(): + a = {0: 'A', 1: 'B', 2: 'C'} + return {3 + i: a[i] for i in range(3)} + + self.assertEqual(foo(), foo.py_func()) + + def test_comprehension_fail_mixed_type(self): + @njit + def foo(): + a = {0: 'A', 1: 'B', 2: 1j} + return {3 + i: a[i] for i in range(3)} + + with self.assertRaises(TypingError) as e: + foo() + + excstr = str(e.exception) + self.assertIn("Cannot cast complex128 to unicode_type", excstr) + + +class TestNonCompiledInfer(TestCase): + def test_check_untyped_dict_ops(self): + # Check operation on untyped dictionary + d = Dict() + self.assertFalse(d._typed) + self.assertEqual(len(d), 0) + self.assertEqual(str(d), str({})) + self.assertEqual(list(iter(d)), []) + # Test __getitem__ + with self.assertRaises(KeyError) as raises: + d[1] + self.assertEqual(str(raises.exception), str(KeyError(1))) + # Test __delitem__ + with self.assertRaises(KeyError) as raises: + del d[1] + self.assertEqual(str(raises.exception), str(KeyError(1))) + # Test .pop + with self.assertRaises(KeyError): + d.pop(1) + self.assertEqual(str(raises.exception), str(KeyError(1))) + # Test .pop + self.assertIs(d.pop(1, None), None) + # Test .get + self.assertIs(d.get(1), None) + # Test .popitem + with self.assertRaises(KeyError) as raises: + d.popitem() + self.assertEqual(str(raises.exception), + str(KeyError('dictionary is empty'))) + # Test setdefault(k) + with self.assertRaises(TypeError) as raises: + d.setdefault(1) + self.assertEqual( + str(raises.exception), + str(TypeError('invalid operation on untyped dictionary')), + ) + # Test __contains__ + self.assertFalse(1 in d) + # It's untyped + self.assertFalse(d._typed) + + def test_getitem(self): + # Test __getitem__ + d = Dict() + d[1] = 2 + # It's typed now + self.assertTrue(d._typed) + self.assertEqual(d[1], 2) + + def test_setdefault(self): + # Test setdefault(k, d) + d = Dict() + d.setdefault(1, 2) + # It's typed now + self.assertTrue(d._typed) + self.assertEqual(d[1], 2) + + +@jitclass(spec=[('a', types.intp)]) +class Bag(object): + def __init__(self, a): + self.a = a + + def __hash__(self): + return hash(self.a) + + +class TestDictWithJitclass(TestCase): + def test_jitclass_as_value(self): + @njit + def foo(x): + d = Dict() + d[0] = x + d[1] = Bag(101) + return d + + d = foo(Bag(a=100)) + self.assertEqual(d[0].a, 100) + self.assertEqual(d[1].a, 101) + + +class TestNoJit(TestCase): + """Exercise dictionary creation with JIT disabled. """ + + def test_dict_create_no_jit_using_new_dict(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + d = dictobject.new_dict(int32, float32) + self.assertEqual(type(d), dict) + + def test_dict_create_no_jit_using_Dict(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + d = Dict() + self.assertEqual(type(d), dict) + + def test_dict_create_no_jit_using_empty(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + d = Dict.empty(types.int32, types.float32) + self.assertEqual(type(d), dict) + + +class TestDictIterator(TestCase): + def test_dict_iterator(self): + @njit + def fun1(): + dd = Dict.empty(key_type=types.intp, + value_type=types.intp) + dd[0] = 10 + dd[1] = 20 + dd[2] = 30 + + return list(dd.keys()), list(dd.values()) + + @njit + def fun2(): + dd = Dict.empty(key_type=types.intp, + value_type=types.intp) + dd[4] = 77 + dd[5] = 88 + dd[6] = 99 + + return list(dd.keys()), list(dd.values()) + res1 = fun1() + res2 = fun2() + + self.assertEqual([0,1,2], res1[0]) + self.assertEqual([10,20,30], res1[1]) + self.assertEqual([4,5,6], res2[0]) + self.assertEqual([77,88,99], res2[1]) + + +class TestTypedDictInitialValues(MemoryLeakMixin, TestCase): + """Tests that typed dictionaries carry their initial value if present""" + + def test_homogeneous_and_literal(self): + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + if d.initial_value is None: + return lambda d: literally(d) + self.assertTrue(isinstance(d, types.DictType)) + self.assertEqual(d.initial_value, {'a': 1, 'b': 2, 'c': 3}) + self.assertEqual(hasattr(d, 'literal_value'), False) + return lambda d: d + + @njit + def foo(): + # keys and values all have literal representation + x = {'a': 1, 'b': 2, 'c': 3} + bar(x) + + foo() + + def test_heterogeneous_but_castable_to_homogeneous(self): + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + self.assertTrue(isinstance(d, types.DictType)) + self.assertEqual(d.initial_value, None) + self.assertEqual(hasattr(d, 'literal_value'), False) + return lambda d: d + + @njit + def foo(): + # This dictionary will be typed based on 1j, i.e. complex128 + # as the values are not all literals, there's no "initial_value" + # available irrespective of whether it's possible to rip this + # information out of the bytecode. + x = {'a': 1j, 'b': 2, 'c': 3} + bar(x) + + foo() + + def test_heterogeneous_but_not_castable_to_homogeneous(self): + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + a = {'a': 1, 'b': 2j, 'c': 3} + + def specific_ty(z): + return types.literal(z) if types.maybe_literal(z) else typeof(z) + expected = {types.literal(x): specific_ty(y) for x, y in a.items()} + self.assertTrue(isinstance(d, types.LiteralStrKeyDict)) + self.assertEqual(d.literal_value, expected) + self.assertEqual(hasattr(d, 'initial_value'), False) + return lambda d: d + + @njit + def foo(): + # This dictionary will be typed based on 1, i.e. intp, as the values + # cannot all be cast to this type, but the keys are literal strings + # this is a LiteralStrKey[Dict], there's no initial_value but there + # is a literal_value. + x = {'a': 1, 'b': 2j, 'c': 3} + bar(x) + + foo() + + def test_mutation_not_carried(self): + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + if d.initial_value is None: + return lambda d: literally(d) + self.assertTrue(isinstance(d, types.DictType)) + self.assertEqual(d.initial_value, {'a': 1, 'b': 2, 'c': 3}) + return lambda d: d + + @njit + def foo(): + # This dictionary is mutated, check the initial_value carries + # correctly and is not mutated + x = {'a': 1, 'b': 2, 'c': 3} + x['d'] = 4 + bar(x) + + foo() + + def test_mutation_not_carried_single_function(self): + # this is another pattern for using literally + + @njit + def nop(*args): + pass + + for fn, iv in (nop, None), (literally, {'a': 1, 'b': 2, 'c': 3}): + @njit + def baz(x): + pass + + def bar(z): + pass + + @overload(bar) + def ol_bar(z): + def impl(z): + fn(z) + baz(z) + return impl + + @njit + def foo(): + x = {'a': 1, 'b': 2, 'c': 3} + bar(x) + x['d'] = 4 + return x + + foo() + # baz should be specialised based on literally being invoked and + # the literal/unliteral arriving at the call site + larg = baz.signatures[0][0] + self.assertEqual(larg.initial_value, iv) + + def test_unify_across_function_call(self): + + @njit + def bar(x): + o = {1: 2} + if x: + o = {2: 3} + return o + + @njit + def foo(x): + if x: + d = {3: 4} + else: + d = bar(x) + return d + + e1 = Dict() + e1[3] = 4 + e2 = Dict() + e2[1] = 2 + self.assertEqual(foo(True), e1) + self.assertEqual(foo(False), e2) + + +class TestLiteralStrKeyDict(MemoryLeakMixin, TestCase): + """ Tests for dictionaries with string keys that can map to anything!""" + + def test_basic_const_lowering_boxing(self): + @njit + def foo(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + return (ld['a'], ld['b'], ld['c']) + + self.assertEqual(foo(), (1, 2j, 'd')) + + def test_basic_nonconst_in_scope(self): + @njit + def foo(x): + y = x + 5 + e = True if y > 2 else False + ld = {'a': 1, 'b': 2j, 'c': 'd', 'non_const': e} + return ld['non_const'] + + # Recall that key non_const has a value of a known type, bool, and it's + # value is stuffed in at run time, this is permitted as the dictionary + # is immutable in type + self.assertTrue(foo(34)) + self.assertFalse(foo(-100)) + + def test_basic_nonconst_freevar(self): + e = 5 + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertEqual(x.literal_value, + {types.literal('a'): types.literal(1), + types.literal('b'): typeof(2j), + types.literal('c'): types.literal('d'), + types.literal('d'): types.literal(5)}) + + def impl(x): + pass + return impl + + @njit + def foo(): + ld = {'a': 1, 'b': 2j, 'c': 'd', 'd': e} + bar(ld) + + foo() + + def test_literal_value(self): + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertEqual(x.literal_value, + {types.literal('a'): types.literal(1), + types.literal('b'): typeof(2j), + types.literal('c'): types.literal('d')}) + + def impl(x): + pass + return impl + + @njit + def foo(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + bar(ld) + + foo() + + def test_list_and_array_as_value(self): + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertEqual(x.literal_value, + {types.literal('a'): types.literal(1), + types.literal('b'): + types.List(types.intp, initial_value=[1,2,3]), + types.literal('c'): typeof(np.zeros(5))}) + + def impl(x): + pass + return impl + + @njit + def foo(): + b = [1, 2, 3] + ld = {'a': 1, 'b': b, 'c': np.zeros(5)} + bar(ld) + + foo() + + def test_repeated_key_literal_value(self): + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + # order is important, 'a' was seen first, but updated later + self.assertEqual(x.literal_value, + {types.literal('a'): types.literal('aaaa'), + types.literal('b'): typeof(2j), + types.literal('c'): types.literal('d')}) + + def impl(x): + pass + return impl + + @njit + def foo(): + ld = {'a': 1, 'a': 10, 'b': 2j, 'c': 'd', 'a': 'aaaa'} # noqa #F601 + bar(ld) + + foo() + + def test_read_only(self): + + def _len(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + return len(ld) + + def static_getitem(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + return ld['b'] + + def contains(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + return 'b' in ld, 'f' in ld + + def copy(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + new = ld.copy() + return ld == new + + rdonlys = (_len, static_getitem, contains, copy) + + for test in rdonlys: + with self.subTest(test.__name__): + self.assertPreciseEqual(njit(test)(), test()) + + def test_mutation_failure(self): + + def setitem(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + ld['a'] = 12 + + def delitem(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + del ld['a'] + + def popitem(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + ld.popitem() + + def pop(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + ld.pop() + + def clear(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + ld.clear() + + def setdefault(): + ld = {'a': 1, 'b': 2j, 'c': 'd'} + ld.setdefault('f', 1) + + illegals = (setitem, delitem, popitem, pop, clear, setdefault) + + for test in illegals: + with self.subTest(test.__name__): + with self.assertRaises(TypingError) as raises: + njit(test)() + expect = "Cannot mutate a literal dictionary" + self.assertIn(expect, str(raises.exception)) + + def test_get(self): + + @njit + def get(x): + ld = {'a': 2j, 'c': 'd'} + return ld.get(x) + + @njit + def getitem(x): + ld = {'a': 2j, 'c': 'd'} + return ld[x] + + for test in (get, getitem): + with self.subTest(test.__name__): + with self.assertRaises(TypingError) as raises: + test('a') + expect = "Cannot get{item}() on a literal dictionary" + self.assertIn(expect, str(raises.exception)) + + def test_dict_keys(self): + + @njit + def foo(): + ld = {'a': 2j, 'c': 'd'} + return [x for x in ld.keys()] + + self.assertEqual(foo(), ['a', 'c']) + + def test_dict_values(self): + + @njit + def foo(): + ld = {'a': 2j, 'c': 'd'} + return ld.values() + + self.assertEqual(foo(), (2j, 'd')) + + def test_dict_items(self): + @njit + def foo(): + ld = {'a': 2j, 'c': 'd', 'f': np.zeros((5))} + return ld.items() + + self.assertPreciseEqual(foo(), + (('a', 2j), ('c', 'd'), ('f', np.zeros((5))))) + + def test_dict_return(self): + + @njit + def foo(): + ld = {'a': 2j, 'c': 'd'} + return ld + + # escaping heterogeneous dictionary is not supported + with self.assertRaises(TypeError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("cannot convert native LiteralStrKey", excstr) + + def test_dict_unify(self): + @njit + def foo(x): + if x + 7 > 4: + a = {'a': 2j, 'c': 'd', 'e': np.zeros(4)} + else: + # Note the use of a different literal str for key 'c' + a = {'a': 5j, 'c': 'CAT', 'e': np.zeros((5,))} + return a['c'] + + self.assertEqual(foo(100), 'd') + self.assertEqual(foo(-100), 'CAT') + self.assertEqual(foo(100), foo.py_func(100)) + self.assertEqual(foo(-100), foo.py_func(-100)) + + def test_dict_not_unify(self): + + @njit + def key_mismatch(x): + if x + 7 > 4: + a = {'BAD_KEY': 2j, 'c': 'd', 'e': np.zeros(4)} + else: + a = {'a': 5j, 'c': 'CAT', 'e': np.zeros((5,))} + # prevents inline of return on py310 + py310_defeat1 = 1 # noqa + py310_defeat2 = 2 # noqa + py310_defeat3 = 3 # noqa + py310_defeat4 = 4 # noqa + return a['a'] + + with self.assertRaises(TypingError) as raises: + key_mismatch(100) + + self.assertIn("Cannot unify LiteralStrKey", str(raises.exception)) + + @njit + def value_type_mismatch(x): + if x + 7 > 4: + a = {'a': 2j, 'c': 'd', 'e': np.zeros((4, 3))} + else: + a = {'a': 5j, 'c': 'CAT', 'e': np.zeros((5,))} + # prevents inline of return on py310 + py310_defeat1 = 1 # noqa + py310_defeat2 = 2 # noqa + py310_defeat3 = 3 # noqa + py310_defeat4 = 4 # noqa + return a['a'] + + with self.assertRaises(TypingError) as raises: + value_type_mismatch(100) + + self.assertIn("Cannot unify LiteralStrKey", str(raises.exception)) + + def test_dict_value_coercion(self): + # checks that things coerce or not! + + p = {# safe and no conversion: TypedDict + (np.int32, np.int32): types.DictType, + # safe and convertible: TypedDict + (np.int32, np.int8): types.DictType, + # safe convertible: TypedDict + (np.complex128, np.int32): types.DictType, + # unsafe not convertible: LiteralStrKey + (np.int32, np.complex128): types.LiteralStrKeyDict, + # unsafe not convertible: LiteralStrKey + (np.int32, np.array): types.LiteralStrKeyDict, + # unsafe not convertible: LiteralStrKey + (np.array, np.int32): types.LiteralStrKeyDict, + # unsafe not convertible: LiteralStrKey + (np.int8, np.int32): types.LiteralStrKeyDict, + # unsafe not convertible: LiteralStrKey (issue #6420 case) + (np.int64, np.float64): types.LiteralStrKeyDict,} + + def bar(x): + pass + + for dts, container in p.items(): + @overload(bar) + def ol_bar(x): + self.assertTrue(isinstance(x, container)) + + def impl(x): + pass + return impl + + ty1, ty2 = dts + + @njit + def foo(): + d = {'a': ty1(1), 'b': ty2(2)} + bar(d) + + foo() + + def test_build_map_op_code(self): + # tests building dictionaries via `build_map`, which, for statically + # determinable str key->things cases is just a single key:value + # any other build_map would either end up as being non-const str keys + # or keys of some non-string type and therefore not considered. + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + def impl(x): + pass + return impl + + @njit + def foo(): + a = {'a': {'b1': 10, 'b2': 'string'}} + bar(a) + + foo() + + def test_dict_as_arg(self): + @njit + def bar(fake_kwargs=None): + if fake_kwargs is not None: + # Add 10 to array in key 'd' + fake_kwargs['d'][:] += 10 + + @njit + def foo(): + a = 1 + b = 2j + c = 'string' + d = np.zeros(3) + e = {'a': a, 'b': b, 'c': c, 'd': d} + bar(fake_kwargs=e) + return e['d'] + + np.testing.assert_allclose(foo(), np.ones(3) * 10) + + def test_dict_with_single_literallist_value(self): + #see issue #6094 + @njit + def foo(): + z = {"A": [lambda a: 2 * a, "B"]} + return z["A"][0](5) + + self.assertPreciseEqual(foo(), foo.py_func()) + + def test_tuple_not_in_mro(self): + # Related to #6094, make sure that LiteralStrKey does not inherit from + # types.BaseTuple as this breaks isinstance checks. + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertFalse(isinstance(x, types.BaseTuple)) + self.assertTrue(isinstance(x, types.LiteralStrKeyDict)) + return lambda x: ... + + @njit + def foo(): + d = {'a': 1, 'b': 'c'} + bar(d) + + foo() + + def test_const_key_not_in_dict(self): + + @njit + def foo(): + a = {'not_a': 2j, 'c': 'd', 'e': np.zeros(4)} + return a['a'] + + with self.assertRaises(TypingError) as raises: + foo() + + self.assertIn("Key 'a' is not in dict.", str(raises.exception)) + + def test_uncommon_identifiers(self): + # Tests uncommon identifiers like numerical values and operators in + # the key fields. See #6518 and #7416. + + # Numerical values in keys + @njit + def foo(): + d = {'0': np.ones(5), '1': 4} + return len(d) + + self.assertPreciseEqual(foo(), foo.py_func()) + + # operators in keys + @njit + def bar(): + d = {'+': np.ones(5), 'x--': 4} + return len(d) + + self.assertPreciseEqual(bar(), bar.py_func()) + + def test_update_error(self): + # Tests that dict.update produces a reasonable + # error with a LiteralStrKeyDict input. + @njit + def foo(): + + d1 = { + 'a': 2, + 'b': 4, + 'c': 'a' + } + d1.update({'x': 3}) + return d1 + + with self.assertRaises(TypingError) as raises: + foo() + + self.assertIn( + "Cannot mutate a literal dictionary", + str(raises.exception) + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dicts.py b/venv/lib/python3.10/site-packages/numba/tests/test_dicts.py new file mode 100644 index 0000000000000000000000000000000000000000..7327bc90837528e45e7a6bfc33e4f332d6b09cdf --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dicts.py @@ -0,0 +1,233 @@ +import numpy as np +from numba import njit, jit +from numba.core.errors import TypingError +import unittest +from numba.tests.support import TestCase + + +def build_map(): + return {0: 1, 2: 3} + +def build_map_from_local_vars(): + # There used to be a crash due to wrong IR generation for STORE_MAP + x = TestCase + return {0: x, x: 1} + + +class DictTestCase(TestCase): + + def check(self, pyfunc): + cfunc = jit(forceobj=True)(pyfunc) + self.assertPreciseEqual(pyfunc(), cfunc()) + + def test_build_map(self): + self.check(build_map) + + def test_build_map_from_local_vars(self): + self.check(build_map_from_local_vars) + + +class TestCompiledDict(TestCase): + """Testing `dict()` and `{}` usage that are redirected to + `numba.typed.Dict`. + """ + def test_use_dict(self): + # Test dict() + @njit + def foo(): + d = dict() + d[1] = 2 + return d + + d = foo() + self.assertEqual(d, {1: 2}) + + def test_use_dict_iterable_args(self): + # Test dict(iterable) + @njit + def dict_iterable_1(a, b): + d = dict(zip(a, b)) + return d + + @njit + def dict_iterable_2(): + # from python docs + return dict([('sape', 4139), ('guido', 4127), ('jack', 4098)]) + + inps = ( + ([1, 2, 3], [4, 5, 6]), + (np.arange(4), np.arange(4)), + ([1, 2, 3], 'abc'), + ([1, 2, 3, 4], 'abc'), + ) + for a, b in inps: + d = dict_iterable_1(a, b) + self.assertEqual(d, dict(zip(a, b))) + + self.assertEqual(dict_iterable_2(), dict_iterable_2.py_func()) + + def test_ctor_iterable_tuple(self): + @njit + def ctor(): + return dict(((1, 2), (1, 2))) + + expected = dict({1: 2}) + got = ctor() + self.assertEqual(expected, got) + + def test_unsupported_dict_usage(self): + # Test dict(dict()) + from numba.core.typing.dictdecl import _message_dict_support + + @njit + def ctor1(): + d = dict() + d[1] = 2 + return dict(d) + + @njit + def ctor2(): + return dict(((1, 2), (3, 'a'))) + + @njit + def ctor3(): + return dict((('a', 'b', 'c'), ('d', 'e', 'f'))) + + @njit + def ctor4(): + return dict((({}, 1), ({}, 2))) + + _non_iter_args = "Non-iterable args used in dict(iterable)" + _dict_upd_item_len = "dictionary update sequence element has length 3;" + _unhashable_type = "Unhashable type" + + inputs = [ + (ctor1, TypingError, _message_dict_support), + (ctor2, TypingError, _non_iter_args), + (ctor3, TypingError, _dict_upd_item_len), + (ctor4, TypingError, _unhashable_type), + ] + + for func, exc, msg in inputs: + with self.assertRaises(exc) as raises: + func() + + self.assertIn(msg, str(raises.exception)) + + def test_use_curlybraces(self): + # Test {} with empty args + @njit + def foo(): + d = {} + d[1] = 2 + return d + + d = foo() + self.assertEqual(d, {1: 2}) + + def test_use_curlybraces_with_init1(self): + # Test {} with 1 item + @njit + def foo(): + return {1: 2} + + d = foo() + self.assertEqual(d, {1: 2}) + + def test_use_curlybraces_with_initmany(self): + # Test {} with many items + @njit + def foo(): + return {1: 2.2, 3: 4.4, 5: 6.6} + + d = foo() + self.assertEqual(d, {1: 2.2, 3: 4.4, 5: 6.6}) + + def test_curlybraces_init_with_coercion(self): + # Type coercion at dict init is tested + @njit + def foo(): + return {1: 2.2, 3: 4, 5: 6} + + self.assertEqual(foo(), foo.py_func()) + + def test_use_curlybraces_with_manyvar(self): + # Test using variable in {} + @njit + def foo(x, y): + return {x: 1, y: x + y} + + x, y = 10, 20 + self.assertEqual(foo(x, y), foo.py_func(x, y)) + + def test_mixed_curlybraces_and_dict(self): + # Test mixed use of {} and dict() + @njit + def foo(): + k = dict() + k[1] = {1: 3} + k[2] = {4: 2} + return k + + self.assertEqual(foo(), foo.py_func()) + + def test_dict_use_with_none_value(self): + # Test that NoneType cannot be used as value for Dict + @njit + def foo(): + k = {1: None} + return k + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "Dict.value_type cannot be of type none", + str(raises.exception), + ) + + + def test_dict_use_with_optional_value(self): + # Test that Optional cannot be used as value for Dict + @njit + def foo(choice): + optional = 2.5 if choice else None + k = {1: optional} + return k + + with self.assertRaises(TypingError) as raises: + foo(True) + self.assertIn( + "Dict.value_type cannot be of type OptionalType(float64)", + str(raises.exception), + ) + + def test_dict_use_with_optional_key(self): + # Test that Optional cannot be used as a key for Dict + @njit + def foo(choice): + k = {2.5 if choice else None: 1} + return k + + with self.assertRaises(TypingError) as raises: + foo(True) + self.assertIn( + "Dict.key_type cannot be of type OptionalType(float64)", + str(raises.exception), + ) + + def test_dict_use_with_none_key(self): + # Test that NoneType cannot be used as a key for Dict + @njit + def foo(): + k = {None: 1} + return k + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "Dict.key_type cannot be of type none", + str(raises.exception), + ) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dispatcher.py b/venv/lib/python3.10/site-packages/numba/tests/test_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa5e9232dba1bc08fd4b962a86ca5265264c388 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dispatcher.py @@ -0,0 +1,1193 @@ +import multiprocessing +import platform +import threading +import pickle +import weakref +from itertools import chain +from io import StringIO + +import numpy as np + +from numba import njit, jit, typeof, vectorize +from numba.core import types, errors +from numba import _dispatcher +from numba.tests.support import TestCase, captured_stdout +from numba.np.numpy_support import as_dtype +from numba.core.dispatcher import Dispatcher +from numba.extending import overload +from numba.tests.support import needs_lapack, SerialMixin +from numba.testing.main import _TIMEOUT as _RUNNER_TIMEOUT +import unittest + + +_TEST_TIMEOUT = _RUNNER_TIMEOUT - 60. + + +try: + import jinja2 +except ImportError: + jinja2 = None + +try: + import pygments +except ImportError: + pygments = None + +_is_armv7l = platform.machine() == 'armv7l' + + +def dummy(x): + return x + + +def add(x, y): + return x + y + + +def addsub(x, y, z): + return x - y + z + + +def addsub_defaults(x, y=2, z=3): + return x - y + z + + +def star_defaults(x, y=2, *z): + return x, y, z + + +def generated_usecase(x, y=5): + if isinstance(x, types.Complex): + def impl(x, y): + return x + y + else: + def impl(x, y): + return x - y + return impl + + +def bad_generated_usecase(x, y=5): + if isinstance(x, types.Complex): + def impl(x): + return x + else: + def impl(x, y=6): + return x - y + return impl + + +def dtype_generated_usecase(a, b, dtype=None): + if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)): + out_dtype = np.result_type(*(np.dtype(ary.dtype.name) + for ary in (a, b))) + elif isinstance(dtype, (types.DType, types.NumberClass)): + out_dtype = as_dtype(dtype) + else: + raise TypeError("Unhandled Type %s" % type(dtype)) + + def _fn(a, b, dtype=None): + return np.ones(a.shape, dtype=out_dtype) + + return _fn + + +class BaseTest(TestCase): + + jit_args = dict(nopython=True) + + def compile_func(self, pyfunc): + def check(*args, **kwargs): + expected = pyfunc(*args, **kwargs) + result = f(*args, **kwargs) + self.assertPreciseEqual(result, expected) + f = jit(**self.jit_args)(pyfunc) + return f, check + + +class TestDispatcher(BaseTest): + + def test_equality(self): + @jit + def foo(x): + return x + + @jit + def bar(x): + return x + + # Written this way to verify `==` returns a bool (gh-5838). Using + # `assertTrue(foo == foo)` or `assertEqual(foo, foo)` would defeat the + # purpose of this test. + self.assertEqual(foo == foo, True) + self.assertEqual(foo == bar, False) + self.assertEqual(foo == None, False) # noqa: E711 + + def test_dyn_pyfunc(self): + @jit + def foo(x): + return x + + foo(1) + [cr] = foo.overloads.values() + # __module__ must be match that of foo + self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__) + + def test_no_argument(self): + @jit + def foo(): + return 1 + + # Just make sure this doesn't crash + foo() + + def test_coerce_input_types(self): + # Issue #486: do not allow unsafe conversions if we can still + # compile other specializations. + c_add = jit(nopython=True)(add) + self.assertPreciseEqual(c_add(123, 456), add(123, 456)) + self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6)) + self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j)) + self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456)) + + # Now force compilation of only a single specialization + c_add = jit('(i4, i4)', nopython=True)(add) + self.assertPreciseEqual(c_add(123, 456), add(123, 456)) + # Implicit (unsafe) conversion of float to int + self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45)) + with self.assertRaises(TypeError): + # Implicit conversion of complex to int disallowed + c_add(12.3, 45.6j) + + def test_ambiguous_new_version(self): + """Test compiling new version in an ambiguous case + """ + @jit + def foo(a, b): + return a + b + + INT = 1 + FLT = 1.5 + self.assertAlmostEqual(foo(INT, FLT), INT + FLT) + self.assertEqual(len(foo.overloads), 1) + self.assertAlmostEqual(foo(FLT, INT), FLT + INT) + self.assertEqual(len(foo.overloads), 2) + self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT) + self.assertEqual(len(foo.overloads), 3) + # The following call is ambiguous because (int, int) can resolve + # to (float, int) or (int, float) with equal weight. + self.assertAlmostEqual(foo(1, 1), INT + INT) + self.assertEqual(len(foo.overloads), 4, "didn't compile a new " + "version") + + def test_lock(self): + """ + Test that (lazy) compiling from several threads at once doesn't + produce errors (see issue #908). + """ + errors = [] + + @jit + def foo(x): + return x + 1 + + def wrapper(): + try: + self.assertEqual(foo(1), 2) + except Exception as e: + errors.append(e) + + threads = [threading.Thread(target=wrapper) for i in range(16)] + for t in threads: + t.start() + for t in threads: + t.join() + self.assertFalse(errors) + + def test_explicit_signatures(self): + f = jit("(int64,int64)")(add) + # Approximate match (unsafe conversion) + self.assertPreciseEqual(f(1.5, 2.5), 3) + self.assertEqual(len(f.overloads), 1, f.overloads) + f = jit(["(int64,int64)", "(float64,float64)"])(add) + # Exact signature matches + self.assertPreciseEqual(f(1, 2), 3) + self.assertPreciseEqual(f(1.5, 2.5), 4.0) + # Approximate match (int32 -> float64 is a safe conversion) + self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5) + # No conversion + with self.assertRaises(TypeError) as cm: + f(1j, 1j) + self.assertIn("No matching definition", str(cm.exception)) + self.assertEqual(len(f.overloads), 2, f.overloads) + # A more interesting one... + f = jit(["(float32,float32)", "(float64,float64)"])(add) + self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0) + self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224) + # Fail to resolve ambiguity between the two best overloads + f = jit(["(float32,float64)", + "(float64,float32)", + "(int64,int64)"])(add) + with self.assertRaises(TypeError) as cm: + f(1.0, 2.0) + # The two best matches are output in the error message, as well + # as the actual argument types. + self.assertRegex( + str(cm.exception), + r"Ambiguous overloading for ]*> " + r"\(float64, float64\):\n" + r"\(float32, float64\) -> float64\n" + r"\(float64, float32\) -> float64" + ) + # The integer signature is not part of the best matches + self.assertNotIn("int64", str(cm.exception)) + + def test_signature_mismatch(self): + tmpl = ("Signature mismatch: %d argument types given, but function " + "takes 2 arguments") + with self.assertRaises(TypeError) as cm: + jit("()")(add) + self.assertIn(tmpl % 0, str(cm.exception)) + with self.assertRaises(TypeError) as cm: + jit("(intc,)")(add) + self.assertIn(tmpl % 1, str(cm.exception)) + with self.assertRaises(TypeError) as cm: + jit("(intc,intc,intc)")(add) + self.assertIn(tmpl % 3, str(cm.exception)) + # With forceobj=True, an empty tuple is accepted + jit("()", forceobj=True)(add) + with self.assertRaises(TypeError) as cm: + jit("(intc,)", forceobj=True)(add) + self.assertIn(tmpl % 1, str(cm.exception)) + + def test_matching_error_message(self): + f = jit("(intc,intc)")(add) + with self.assertRaises(TypeError) as cm: + f(1j, 1j) + self.assertEqual(str(cm.exception), + "No matching definition for argument type(s) " + "complex128, complex128") + + def test_disabled_compilation(self): + @jit + def foo(a): + return a + + foo.compile("(float32,)") + foo.disable_compile() + with self.assertRaises(RuntimeError) as raises: + foo.compile("(int32,)") + self.assertEqual(str(raises.exception), "compilation disabled") + self.assertEqual(len(foo.signatures), 1) + + def test_disabled_compilation_through_list(self): + @jit(["(float32,)", "(int32,)"]) + def foo(a): + return a + + with self.assertRaises(RuntimeError) as raises: + foo.compile("(complex64,)") + self.assertEqual(str(raises.exception), "compilation disabled") + self.assertEqual(len(foo.signatures), 2) + + def test_disabled_compilation_nested_call(self): + @jit(["(intp,)"]) + def foo(a): + return a + + @jit + def bar(): + foo(1) + foo(np.ones(1)) # no matching definition + + with self.assertRaises(errors.TypingError) as raises: + bar() + + m = r".*Invalid use of.*with parameters \(array\(float64, 1d, C\)\).*" + self.assertRegex(str(raises.exception), m) + + def test_fingerprint_failure(self): + """ + Failure in computing the fingerprint cannot affect a nopython=False + function. On the other hand, with nopython=True, a ValueError should + be raised to report the failure with fingerprint. + """ + def foo(x): + return x + + # Empty list will trigger failure in compile_fingerprint + errmsg = 'cannot compute fingerprint of empty list' + with self.assertRaises(ValueError) as raises: + _dispatcher.compute_fingerprint([]) + self.assertIn(errmsg, str(raises.exception)) + # It should work in objmode + objmode_foo = jit(forceobj=True)(foo) + self.assertEqual(objmode_foo([]), []) + # But, not in nopython=True + strict_foo = jit(nopython=True)(foo) + with self.assertRaises(ValueError) as raises: + strict_foo([]) + self.assertIn(errmsg, str(raises.exception)) + + # Test in loop lifting context + @jit(forceobj=True) + def bar(): + object() # force looplifting + x = [] + for i in range(10): + x = objmode_foo(x) + return x + + self.assertEqual(bar(), []) + # Make sure it was looplifted + [cr] = bar.overloads.values() + self.assertEqual(len(cr.lifted), 1) + + def test_serialization(self): + """ + Test serialization of Dispatcher objects + """ + @jit(nopython=True) + def foo(x): + return x + 1 + + self.assertEqual(foo(1), 2) + + # get serialization memo + memo = Dispatcher._memo + Dispatcher._recent.clear() + memo_size = len(memo) + + # pickle foo and check memo size + serialized_foo = pickle.dumps(foo) + # increases the memo size + self.assertEqual(memo_size + 1, len(memo)) + + # unpickle + foo_rebuilt = pickle.loads(serialized_foo) + self.assertEqual(memo_size + 1, len(memo)) + + self.assertIs(foo, foo_rebuilt) + + # do we get the same object even if we delete all the explicit + # references? + id_orig = id(foo_rebuilt) + del foo + del foo_rebuilt + self.assertEqual(memo_size + 1, len(memo)) + new_foo = pickle.loads(serialized_foo) + self.assertEqual(id_orig, id(new_foo)) + + # now clear the recent cache + ref = weakref.ref(new_foo) + del new_foo + Dispatcher._recent.clear() + self.assertEqual(memo_size, len(memo)) + + # show that deserializing creates a new object + pickle.loads(serialized_foo) + self.assertIs(ref(), None) + + @needs_lapack + @unittest.skipIf(_is_armv7l, "Unaligned loads unsupported") + def test_misaligned_array_dispatch(self): + # for context see issue #2937 + def foo(a): + return np.linalg.matrix_power(a, 1) + + jitfoo = jit(nopython=True)(foo) + + n = 64 + r = int(np.sqrt(n)) + dt = np.int8 + count = np.complex128().itemsize // dt().itemsize + + tmp = np.arange(n * count + 1, dtype=dt) + + # create some arrays as Cartesian production of: + # [F/C] x [aligned/misaligned] + C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r) + C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r) + F_contig_aligned = C_contig_aligned.T + F_contig_misaligned = C_contig_misaligned.T + + # checking routine + def check(name, a): + a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r) + expected = foo(a) + got = jitfoo(a) + np.testing.assert_allclose(expected, got) + + # The checks must be run in this order to create the dispatch key + # sequence that causes invalid dispatch noted in #2937. + # The first two should hit the cache as they are aligned, supported + # order and under 5 dimensions. The second two should end up in the + # fallback path as they are misaligned. + check("C_contig_aligned", C_contig_aligned) + check("F_contig_aligned", F_contig_aligned) + check("C_contig_misaligned", C_contig_misaligned) + check("F_contig_misaligned", F_contig_misaligned) + + @unittest.skipIf(_is_armv7l, "Unaligned loads unsupported") + def test_immutability_in_array_dispatch(self): + + # RO operation in function + def foo(a): + return np.sum(a) + + jitfoo = jit(nopython=True)(foo) + + n = 64 + r = int(np.sqrt(n)) + dt = np.int8 + count = np.complex128().itemsize // dt().itemsize + + tmp = np.arange(n * count + 1, dtype=dt) + + # create some arrays as Cartesian production of: + # [F/C] x [aligned/misaligned] + C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r) + C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r) + F_contig_aligned = C_contig_aligned.T + F_contig_misaligned = C_contig_misaligned.T + + # checking routine + def check(name, a, disable_write_bit=False): + a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r) + if disable_write_bit: + a.flags.writeable = False + expected = foo(a) + got = jitfoo(a) + np.testing.assert_allclose(expected, got) + + # all of these should end up in the fallback path as they have no write + # bit set + check("C_contig_aligned", C_contig_aligned, disable_write_bit=True) + check("F_contig_aligned", F_contig_aligned, disable_write_bit=True) + check("C_contig_misaligned", C_contig_misaligned, + disable_write_bit=True) + check("F_contig_misaligned", F_contig_misaligned, + disable_write_bit=True) + + @needs_lapack + @unittest.skipIf(_is_armv7l, "Unaligned loads unsupported") + def test_misaligned_high_dimension_array_dispatch(self): + + def foo(a): + return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1) + + jitfoo = jit(nopython=True)(foo) + + def check_properties(arr, layout, aligned): + self.assertEqual(arr.flags.aligned, aligned) + if layout == "C": + self.assertEqual(arr.flags.c_contiguous, True) + if layout == "F": + self.assertEqual(arr.flags.f_contiguous, True) + + n = 729 + r = 3 + dt = np.int8 + count = np.complex128().itemsize // dt().itemsize + + tmp = np.arange(n * count + 1, dtype=dt) + + # create some arrays as Cartesian production of: + # [F/C] x [aligned/misaligned] + C_contig_aligned = tmp[:-1].view(np.complex128).\ + reshape(r, r, r, r, r, r) + check_properties(C_contig_aligned, 'C', True) + C_contig_misaligned = tmp[1:].view(np.complex128).\ + reshape(r, r, r, r, r, r) + check_properties(C_contig_misaligned, 'C', False) + F_contig_aligned = C_contig_aligned.T + check_properties(F_contig_aligned, 'F', True) + F_contig_misaligned = C_contig_misaligned.T + check_properties(F_contig_misaligned, 'F', False) + + # checking routine + def check(name, a): + a[:, :] = np.arange(n, dtype=np.complex128).\ + reshape(r, r, r, r, r, r) + expected = foo(a) + got = jitfoo(a) + np.testing.assert_allclose(expected, got) + + # these should all hit the fallback path as the cache is only for up to + # 5 dimensions + check("F_contig_misaligned", F_contig_misaligned) + check("C_contig_aligned", C_contig_aligned) + check("F_contig_aligned", F_contig_aligned) + check("C_contig_misaligned", C_contig_misaligned) + + def test_dispatch_recompiles_for_scalars(self): + # for context #3612, essentially, compiling a lambda x:x for a + # numerically wide type (everything can be converted to a complex128) + # and then calling again with e.g. an int32 would lead to the int32 + # being converted to a complex128 whereas it ought to compile an int32 + # specialization. + def foo(x): + return x + + # jit and compile on dispatch for 3 scalar types, expect 3 signatures + jitfoo = jit(nopython=True)(foo) + jitfoo(np.complex128(1 + 2j)) + jitfoo(np.int32(10)) + jitfoo(np.bool_(False)) + self.assertEqual(len(jitfoo.signatures), 3) + expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)] + self.assertEqual(jitfoo.signatures, expected_sigs) + + # now jit with signatures so recompilation is forbidden + # expect 1 signature and type conversion + jitfoo = jit([(types.complex128,)], nopython=True)(foo) + jitfoo(np.complex128(1 + 2j)) + jitfoo(np.int32(10)) + jitfoo(np.bool_(False)) + self.assertEqual(len(jitfoo.signatures), 1) + expected_sigs = [(types.complex128,)] + self.assertEqual(jitfoo.signatures, expected_sigs) + + def test_dispatcher_raises_for_invalid_decoration(self): + # For context see https://github.com/numba/numba/issues/4750. + + @jit(nopython=True) + def foo(x): + return x + + with self.assertRaises(TypeError) as raises: + jit(foo) + err_msg = str(raises.exception) + self.assertIn( + "A jit decorator was called on an already jitted function", err_msg) + self.assertIn("foo", err_msg) + self.assertIn(".py_func", err_msg) + + with self.assertRaises(TypeError) as raises: + jit(BaseTest) + err_msg = str(raises.exception) + self.assertIn("The decorated object is not a function", err_msg) + self.assertIn(f"{type(BaseTest)}", err_msg) + + +class TestSignatureHandling(BaseTest): + """ + Test support for various parameter passing styles. + """ + + def test_named_args(self): + """ + Test passing named arguments to a dispatcher. + """ + f, check = self.compile_func(addsub) + check(3, z=10, y=4) + check(3, 4, 10) + check(x=3, y=4, z=10) + # All calls above fall under the same specialization + self.assertEqual(len(f.overloads), 1) + # Errors + with self.assertRaises(TypeError) as cm: + f(3, 4, y=6, z=7) + self.assertIn("too many arguments: expected 3, got 4", + str(cm.exception)) + with self.assertRaises(TypeError) as cm: + f() + self.assertIn("not enough arguments: expected 3, got 0", + str(cm.exception)) + with self.assertRaises(TypeError) as cm: + f(3, 4, y=6) + self.assertIn("missing argument 'z'", str(cm.exception)) + + def test_default_args(self): + """ + Test omitting arguments with a default value. + """ + f, check = self.compile_func(addsub_defaults) + check(3, z=10, y=4) + check(3, 4, 10) + check(x=3, y=4, z=10) + # Now omitting some values + check(3, z=10) + check(3, 4) + check(x=3, y=4) + check(3) + check(x=3) + # Errors + with self.assertRaises(TypeError) as cm: + f(3, 4, y=6, z=7) + self.assertIn("too many arguments: expected 3, got 4", + str(cm.exception)) + with self.assertRaises(TypeError) as cm: + f() + self.assertIn("not enough arguments: expected at least 1, got 0", + str(cm.exception)) + with self.assertRaises(TypeError) as cm: + f(y=6, z=7) + self.assertIn("missing argument 'x'", str(cm.exception)) + + def test_star_args(self): + """ + Test a compiled function with starargs in the signature. + """ + f, check = self.compile_func(star_defaults) + check(4) + check(4, 5) + check(4, 5, 6) + check(4, 5, 6, 7) + check(4, 5, 6, 7, 8) + check(x=4) + check(x=4, y=5) + check(4, y=5) + with self.assertRaises(TypeError) as cm: + f(4, 5, y=6) + self.assertIn("some keyword arguments unexpected", str(cm.exception)) + with self.assertRaises(TypeError) as cm: + f(4, 5, z=6) + self.assertIn("some keyword arguments unexpected", str(cm.exception)) + with self.assertRaises(TypeError) as cm: + f(4, x=6) + self.assertIn("some keyword arguments unexpected", str(cm.exception)) + + +class TestSignatureHandlingObjectMode(TestSignatureHandling): + """ + Sams as TestSignatureHandling, but in object mode. + """ + + jit_args = dict(forceobj=True) + + +class TestDispatcherMethods(TestCase): + + def test_recompile(self): + closure = 1 + + @jit + def foo(x): + return x + closure + self.assertPreciseEqual(foo(1), 2) + self.assertPreciseEqual(foo(1.5), 2.5) + self.assertEqual(len(foo.signatures), 2) + closure = 2 + self.assertPreciseEqual(foo(1), 2) + # Recompiling takes the new closure into account. + foo.recompile() + # Everything was recompiled + self.assertEqual(len(foo.signatures), 2) + self.assertPreciseEqual(foo(1), 3) + self.assertPreciseEqual(foo(1.5), 3.5) + + def test_recompile_signatures(self): + # Same as above, but with an explicit signature on @jit. + closure = 1 + + @jit("int32(int32)") + def foo(x): + return x + closure + self.assertPreciseEqual(foo(1), 2) + self.assertPreciseEqual(foo(1.5), 2) + closure = 2 + self.assertPreciseEqual(foo(1), 2) + # Recompiling takes the new closure into account. + foo.recompile() + self.assertPreciseEqual(foo(1), 3) + self.assertPreciseEqual(foo(1.5), 3) + + def test_inspect_llvm(self): + # Create a jited function + @jit + def foo(explicit_arg1, explicit_arg2): + return explicit_arg1 + explicit_arg2 + + # Call it in a way to create 3 signatures + foo(1, 1) + foo(1.0, 1) + foo(1.0, 1.0) + + # base call to get all llvm in a dict + llvms = foo.inspect_llvm() + self.assertEqual(len(llvms), 3) + + # make sure the function name shows up in the llvm + for llvm_bc in llvms.values(): + # Look for the function name + self.assertIn("foo", llvm_bc) + + # Look for the argument names + self.assertIn("explicit_arg1", llvm_bc) + self.assertIn("explicit_arg2", llvm_bc) + + def test_inspect_asm(self): + # Create a jited function + @jit + def foo(explicit_arg1, explicit_arg2): + return explicit_arg1 + explicit_arg2 + + # Call it in a way to create 3 signatures + foo(1, 1) + foo(1.0, 1) + foo(1.0, 1.0) + + # base call to get all llvm in a dict + asms = foo.inspect_asm() + self.assertEqual(len(asms), 3) + + # make sure the function name shows up in the llvm + for asm in asms.values(): + # Look for the function name + self.assertTrue("foo" in asm) + + def _check_cfg_display(self, cfg, wrapper=''): + # simple stringify test + if wrapper: + wrapper = "{}{}".format(len(wrapper), wrapper) + module_name = __name__.split('.', 1)[0] + module_len = len(module_name) + prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper, + module_len, + module_name) + self.assertRegex(str(cfg), prefix) + # .display() requires an optional dependency on `graphviz`. + # just test for the attribute without running it. + self.assertTrue(callable(cfg.display)) + + def test_inspect_cfg(self): + # Exercise the .inspect_cfg(). These are minimal tests and do not fully + # check the correctness of the function. + @jit + def foo(the_array): + return the_array.sum() + + # Generate 3 overloads + a1 = np.ones(1) + a2 = np.ones((1, 1)) + a3 = np.ones((1, 1, 1)) + foo(a1) + foo(a2) + foo(a3) + + # Call inspect_cfg() without arguments + cfgs = foo.inspect_cfg() + + # Correct count of overloads + self.assertEqual(len(cfgs), 3) + + # Makes sure all the signatures are correct + [s1, s2, s3] = cfgs.keys() + self.assertEqual(set([s1, s2, s3]), + set(map(lambda x: (typeof(x),), [a1, a2, a3]))) + + for cfg in cfgs.values(): + self._check_cfg_display(cfg) + self.assertEqual(len(list(cfgs.values())), 3) + + # Call inspect_cfg(signature) + cfg = foo.inspect_cfg(signature=foo.signatures[0]) + self._check_cfg_display(cfg) + + def test_inspect_cfg_with_python_wrapper(self): + # Exercise the .inspect_cfg() including the python wrapper. + # These are minimal tests and do not fully check the correctness of + # the function. + @jit + def foo(the_array): + return the_array.sum() + + # Generate 3 overloads + a1 = np.ones(1) + a2 = np.ones((1, 1)) + a3 = np.ones((1, 1, 1)) + foo(a1) + foo(a2) + foo(a3) + + # Call inspect_cfg(signature, show_wrapper="python") + cfg = foo.inspect_cfg(signature=foo.signatures[0], + show_wrapper="python") + self._check_cfg_display(cfg, wrapper='cpython') + + def test_inspect_types(self): + @jit + def foo(a, b): + return a + b + + foo(1, 2) + # Exercise the method + foo.inspect_types(StringIO()) + + # Test output + expected = str(foo.overloads[foo.signatures[0]].type_annotation) + with captured_stdout() as out: + foo.inspect_types() + assert expected in out.getvalue() + + def test_inspect_types_with_signature(self): + @jit + def foo(a): + return a + 1 + + foo(1) + foo(1.0) + # Inspect all signatures + with captured_stdout() as total: + foo.inspect_types() + # Inspect first signature + with captured_stdout() as first: + foo.inspect_types(signature=foo.signatures[0]) + # Inspect second signature + with captured_stdout() as second: + foo.inspect_types(signature=foo.signatures[1]) + + self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue()) + + @unittest.skipIf(jinja2 is None, "please install the 'jinja2' package") + @unittest.skipIf(pygments is None, "please install the 'pygments' package") + def test_inspect_types_pretty(self): + @jit + def foo(a, b): + return a + b + + foo(1, 2) + + # Exercise the method, dump the output + with captured_stdout(): + ann = foo.inspect_types(pretty=True) + + # ensure HTML is found in the annotation output + for k, v in ann.ann.items(): + span_found = False + for line in v['pygments_lines']: + if 'span' in line[2]: + span_found = True + self.assertTrue(span_found) + + # check that file+pretty kwarg combo raises + with self.assertRaises(ValueError) as raises: + foo.inspect_types(file=StringIO(), pretty=True) + + self.assertIn("`file` must be None if `pretty=True`", + str(raises.exception)) + + def test_get_annotation_info(self): + @jit + def foo(a): + return a + 1 + + foo(1) + foo(1.3) + + expected = dict(chain.from_iterable(foo.get_annotation_info(i).items() + for i in foo.signatures)) + result = foo.get_annotation_info() + self.assertEqual(expected, result) + + def test_issue_with_array_layout_conflict(self): + """ + This test an issue with the dispatcher when an array that is both + C and F contiguous is supplied as the first signature. + The dispatcher checks for F contiguous first but the compiler checks + for C contiguous first. This results in an C contiguous code inserted + as F contiguous function. + """ + def pyfunc(A, i, j): + return A[i, j] + + cfunc = jit(pyfunc) + + ary_c_and_f = np.array([[1.]]) + ary_c = np.array([[0., 1.], [2., 3.]], order='C') + ary_f = np.array([[0., 1.], [2., 3.]], order='F') + + exp_c = pyfunc(ary_c, 1, 0) + exp_f = pyfunc(ary_f, 1, 0) + + self.assertEqual(1., cfunc(ary_c_and_f, 0, 0)) + got_c = cfunc(ary_c, 1, 0) + got_f = cfunc(ary_f, 1, 0) + + self.assertEqual(exp_c, got_c) + self.assertEqual(exp_f, got_f) + + +class TestDispatcherFunctionBoundaries(TestCase): + def test_pass_dispatcher_as_arg(self): + # Test that a Dispatcher object can be pass as argument + @jit(nopython=True) + def add1(x): + return x + 1 + + @jit(nopython=True) + def bar(fn, x): + return fn(x) + + @jit(nopython=True) + def foo(x): + return bar(add1, x) + + # Check dispatcher as argument inside NPM + inputs = [1, 11.1, np.arange(10)] + expected_results = [x + 1 for x in inputs] + + for arg, expect in zip(inputs, expected_results): + self.assertPreciseEqual(foo(arg), expect) + + # Check dispatcher as argument from python + for arg, expect in zip(inputs, expected_results): + self.assertPreciseEqual(bar(add1, arg), expect) + + def test_dispatcher_as_arg_usecase(self): + @jit(nopython=True) + def maximum(seq, cmpfn): + tmp = seq[0] + for each in seq[1:]: + cmpval = cmpfn(tmp, each) + if cmpval < 0: + tmp = each + return tmp + + got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y)) + self.assertEqual(got, 4) + got = maximum(list(zip(range(5), range(5)[::-1])), + cmpfn=jit(lambda x, y: x[0] - y[0])) + self.assertEqual(got, (4, 0)) + got = maximum(list(zip(range(5), range(5)[::-1])), + cmpfn=jit(lambda x, y: x[1] - y[1])) + self.assertEqual(got, (0, 4)) + + def test_dispatcher_can_return_to_python(self): + @jit(nopython=True) + def foo(fn): + return fn + + fn = jit(lambda x: x) + + self.assertEqual(foo(fn), fn) + + def test_dispatcher_in_sequence_arg(self): + @jit(nopython=True) + def one(x): + return x + 1 + + @jit(nopython=True) + def two(x): + return one(one(x)) + + @jit(nopython=True) + def three(x): + return one(one(one(x))) + + @jit(nopython=True) + def choose(fns, x): + return fns[0](x), fns[1](x), fns[2](x) + + # Tuple case + self.assertEqual(choose((one, two, three), 1), (2, 3, 4)) + # List case + self.assertEqual(choose([one, one, one], 1), (2, 2, 2)) + + +class TestBoxingDefaultError(unittest.TestCase): + # Testing default error at boxing/unboxing + def test_unbox_runtime_error(self): + # Dummy type has no unbox support + def foo(x): + pass + argtys = (types.Dummy("dummy_type"),) + # This needs `compile_isolated`-like behaviour so as to bypass + # dispatcher type checking logic + cres = njit(argtys)(foo).overloads[argtys] + with self.assertRaises(TypeError) as raises: + # Can pass in whatever and the unbox logic will always raise + # without checking the input value. + cres.entry_point(None) + self.assertEqual(str(raises.exception), "can't unbox dummy_type type") + + def test_box_runtime_error(self): + @njit + def foo(): + return unittest # Module type has no boxing logic + with self.assertRaises(TypeError) as raises: + foo() + pat = "cannot convert native Module.* to Python object" + self.assertRegex(str(raises.exception), pat) + + +class TestNoRetryFailedSignature(unittest.TestCase): + """Test that failed-to-compile signatures are not recompiled. + """ + + def run_test(self, func): + fcom = func._compiler + self.assertEqual(len(fcom._failed_cache), 0) + # expected failure because `int` has no `__getitem__` + with self.assertRaises(errors.TypingError): + func(1) + self.assertEqual(len(fcom._failed_cache), 1) + # retry + with self.assertRaises(errors.TypingError): + func(1) + self.assertEqual(len(fcom._failed_cache), 1) + # retry with double + with self.assertRaises(errors.TypingError): + func(1.0) + self.assertEqual(len(fcom._failed_cache), 2) + + def test_direct_call(self): + @jit(nopython=True) + def foo(x): + return x[0] + + self.run_test(foo) + + def test_nested_call(self): + @jit(nopython=True) + def bar(x): + return x[0] + + @jit(nopython=True) + def foobar(x): + bar(x) + + @jit(nopython=True) + def foo(x): + return bar(x) + foobar(x) + + self.run_test(foo) + + @unittest.expectedFailure + # NOTE: @overload does not have an error cache. See PR #9259 for this + # feature and remove the xfail once this is merged. + def test_error_count(self): + def check(field, would_fail): + # Slightly modified from the reproducer in issue #4117. + # Before the patch, the compilation time of the failing case is + # much longer than of the successful case. This can be detected + # by the number of times `trigger()` is visited. + k = 10 + counter = {'c': 0} + + def trigger(x): + assert 0, "unreachable" + + @overload(trigger) + def ol_trigger(x): + # Keep track of every visit + counter['c'] += 1 + if would_fail: + raise errors.TypingError("invoke_failed") + return lambda x: x + + @jit(nopython=True) + def ident(out, x): + pass + + def chain_assign(fs, inner=ident): + tab_head, tab_tail = fs[-1], fs[:-1] + + @jit(nopython=True) + def assign(out, x): + inner(out, x) + out[0] += tab_head(x) + + if tab_tail: + return chain_assign(tab_tail, assign) + else: + return assign + + chain = chain_assign((trigger,) * k) + out = np.ones(2) + if would_fail: + with self.assertRaises(errors.TypingError) as raises: + chain(out, 1) + self.assertIn('invoke_failed', str(raises.exception)) + else: + chain(out, 1) + + # Returns the visit counts + return counter['c'] + + ct_ok = check('a', False) + ct_bad = check('c', True) + # `trigger()` is visited exactly once for both successful and failed + # compilation. + self.assertEqual(ct_ok, 1) + self.assertEqual(ct_bad, 1) + + +@njit +def add_y1(x, y=1): + return x + y + + +@njit +def add_ynone(x, y=None): + return x + (1 if y else 2) + + +@njit +def mult(x, y): + return x * y + + +@njit +def add_func(x, func=mult): + return x + func(x, x) + + +def _checker(f1, arg): + assert f1(arg) == f1.py_func(arg) + + +class TestMultiprocessingDefaultParameters(SerialMixin, unittest.TestCase): + def run_fc_multiproc(self, fc): + try: + ctx = multiprocessing.get_context('spawn') + except AttributeError: + ctx = multiprocessing + + # RE: issue #5973, this doesn't use multiprocessing.Pool.map as doing so + # causes the TBB library to segfault under certain conditions. It's not + # clear whether the cause is something in the complexity of the Pool + # itself, e.g. watcher threads etc, or if it's a problem synonymous with + # a "timing attack". + for a in [1, 2, 3]: + p = ctx.Process(target=_checker, args=(fc, a,)) + p.start() + p.join(_TEST_TIMEOUT) + self.assertEqual(p.exitcode, 0) + + def test_int_def_param(self): + """ Tests issue #4888""" + + self.run_fc_multiproc(add_y1) + + def test_none_def_param(self): + """ Tests None as a default parameter""" + + self.run_fc_multiproc(add_func) + + def test_function_def_param(self): + """ Tests a function as a default parameter""" + + self.run_fc_multiproc(add_func) + + +class TestVectorizeDifferentTargets(unittest.TestCase): + """Test that vectorize can be reapplied if the target is different + """ + + def test_cpu_vs_parallel(self): + @jit + def add(x, y): + return x + y + + custom_vectorize = vectorize([], identity=None, target='cpu') + + custom_vectorize(add) + + custom_vectorize_2 = vectorize([], identity=None, target='parallel') + + custom_vectorize_2(add) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_doctest.py b/venv/lib/python3.10/site-packages/numba/tests/test_doctest.py new file mode 100644 index 0000000000000000000000000000000000000000..53771dc4d6c1cf542f62314dadbf6a670e9a99da --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_doctest.py @@ -0,0 +1,28 @@ +import doctest +import unittest +from numba.tests.support import TestCase + + +class TestDocTest(TestCase): + def test_basic_decorators(self): + from . import doctest_usecase + + # Make sure the finder see all the doctest + finder = doctest.DocTestFinder() + tests = finder.find(doctest_usecase) + testnames = {x.name for x in tests} + expected = { + 'numba.tests.doctest_usecase', + 'numba.tests.doctest_usecase.a', + 'numba.tests.doctest_usecase.b', + 'numba.tests.doctest_usecase.c', + 'numba.tests.doctest_usecase.d', + } + self.assertEqual(testnames, expected) + + # Execute the doctest in the module + doctest.testmod(doctest_usecase) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dyn_array.py b/venv/lib/python3.10/site-packages/numba/tests/test_dyn_array.py new file mode 100644 index 0000000000000000000000000000000000000000..1349e66cdad0aa2e14169411faa0d07776fb8a07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dyn_array.py @@ -0,0 +1,1850 @@ +import contextlib +import sys +import numpy as np +import random +import re +import threading +import gc + +from numba.core.errors import TypingError +from numba import njit +from numba.core import types, utils, config +from numba.tests.support import MemoryLeakMixin, TestCase, tag, skip_if_32bit +import unittest + + +nrtjit = njit(_nrt=True, nogil=True) + + +def np_concatenate1(a, b, c): + return np.concatenate((a, b, c)) + +def np_concatenate2(a, b, c, axis): + return np.concatenate((a, b, c), axis=axis) + +def np_stack1(a, b, c): + return np.stack((a, b, c)) + +def np_stack2(a, b, c, axis): + return np.stack((a, b, c), axis=axis) + +def np_hstack(a, b, c): + return np.hstack((a, b, c)) + +def np_vstack(a, b, c): + return np.vstack((a, b, c)) + +def np_row_stack(a, b, c): + return np.row_stack((a, b, c)) + +def np_dstack(a, b, c): + return np.dstack((a, b, c)) + +def np_column_stack(a, b, c): + return np.column_stack((a, b, c)) + + +class BaseTest(TestCase): + + def check_outputs(self, pyfunc, argslist, exact=True): + cfunc = nrtjit(pyfunc) + for args in argslist: + expected = pyfunc(*args) + ret = cfunc(*args) + self.assertEqual(ret.size, expected.size) + self.assertEqual(ret.dtype, expected.dtype) + self.assertStridesEqual(ret, expected) + if exact: + np.testing.assert_equal(expected, ret) + else: + np.testing.assert_allclose(expected, ret) + + +class NrtRefCtTest(MemoryLeakMixin): + def assert_array_nrt_refct(self, arr, expect): + self.assertEqual(arr.base.refcount, expect) + + +class TestDynArray(NrtRefCtTest, TestCase): + + def test_empty_0d(self): + @nrtjit + def foo(): + arr = np.empty(()) + arr[()] = 42 + return arr + + arr = foo() + self.assert_array_nrt_refct(arr, 1) + np.testing.assert_equal(42, arr) + self.assertEqual(arr.size, 1) + self.assertEqual(arr.shape, ()) + self.assertEqual(arr.dtype, np.dtype(np.float64)) + self.assertEqual(arr.strides, ()) + arr.fill(123) # test writability + np.testing.assert_equal(123, arr) + del arr + + def test_empty_1d(self): + @nrtjit + def foo(n): + arr = np.empty(n) + for i in range(n): + arr[i] = i + + return arr + + n = 3 + arr = foo(n) + self.assert_array_nrt_refct(arr, 1) + np.testing.assert_equal(np.arange(n), arr) + self.assertEqual(arr.size, n) + self.assertEqual(arr.shape, (n,)) + self.assertEqual(arr.dtype, np.dtype(np.float64)) + self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,)) + arr.fill(123) # test writability + np.testing.assert_equal(123, arr) + del arr + + def test_empty_2d(self): + def pyfunc(m, n): + arr = np.empty((m, n), np.int32) + for i in range(m): + for j in range(n): + arr[i, j] = i + j + + return arr + + cfunc = nrtjit(pyfunc) + m = 4 + n = 3 + expected_arr = pyfunc(m, n) + got_arr = cfunc(m, n) + self.assert_array_nrt_refct(got_arr, 1) + np.testing.assert_equal(expected_arr, got_arr) + + self.assertEqual(expected_arr.size, got_arr.size) + self.assertEqual(expected_arr.shape, got_arr.shape) + self.assertEqual(expected_arr.strides, got_arr.strides) + + del got_arr + + def test_empty_3d(self): + def pyfunc(m, n, p): + arr = np.empty((m, n, p), np.int32) + for i in range(m): + for j in range(n): + for k in range(p): + arr[i, j, k] = i + j + k + + return arr + + cfunc = nrtjit(pyfunc) + m = 4 + n = 3 + p = 2 + expected_arr = pyfunc(m, n, p) + got_arr = cfunc(m, n, p) + self.assert_array_nrt_refct(got_arr, 1) + np.testing.assert_equal(expected_arr, got_arr) + + self.assertEqual(expected_arr.size, got_arr.size) + self.assertEqual(expected_arr.shape, got_arr.shape) + self.assertEqual(expected_arr.strides, got_arr.strides) + + del got_arr + + def test_empty_2d_sliced(self): + def pyfunc(m, n, p): + arr = np.empty((m, n), np.int32) + for i in range(m): + for j in range(n): + arr[i, j] = i + j + + return arr[p] + + cfunc = nrtjit(pyfunc) + m = 4 + n = 3 + p = 2 + expected_arr = pyfunc(m, n, p) + got_arr = cfunc(m, n, p) + self.assert_array_nrt_refct(got_arr, 1) + np.testing.assert_equal(expected_arr, got_arr) + + self.assertEqual(expected_arr.size, got_arr.size) + self.assertEqual(expected_arr.shape, got_arr.shape) + self.assertEqual(expected_arr.strides, got_arr.strides) + + del got_arr + + def test_return_global_array(self): + y = np.ones(4, dtype=np.float32) + initrefct = sys.getrefcount(y) + + def return_external_array(): + return y + + cfunc = nrtjit(return_external_array) + out = cfunc() + + # out reference by cfunc + self.assertEqual(initrefct + 1, sys.getrefcount(y)) + + np.testing.assert_equal(y, out) + np.testing.assert_equal(y, np.ones(4, dtype=np.float32)) + np.testing.assert_equal(out, np.ones(4, dtype=np.float32)) + + del out + gc.collect() + # out is only referenced by cfunc + self.assertEqual(initrefct + 1, sys.getrefcount(y)) + + del cfunc + gc.collect() + # y is no longer referenced by cfunc + self.assertEqual(initrefct, sys.getrefcount(y)) + + def test_return_global_array_sliced(self): + y = np.ones(4, dtype=np.float32) + + def return_external_array(): + return y[2:] + + cfunc = nrtjit(return_external_array) + out = cfunc() + self.assertIsNone(out.base) + + yy = y[2:] + np.testing.assert_equal(yy, out) + np.testing.assert_equal(yy, np.ones(2, dtype=np.float32)) + np.testing.assert_equal(out, np.ones(2, dtype=np.float32)) + + def test_array_pass_through(self): + def pyfunc(y): + return y + + arr = np.ones(4, dtype=np.float32) + + cfunc = nrtjit(pyfunc) + expected = cfunc(arr) + got = pyfunc(arr) + + np.testing.assert_equal(expected, arr) + np.testing.assert_equal(expected, got) + self.assertIs(expected, arr) + self.assertIs(expected, got) + + def test_array_pass_through_sliced(self): + def pyfunc(y): + return y[y.size // 2:] + + arr = np.ones(4, dtype=np.float32) + + initrefct = sys.getrefcount(arr) + + cfunc = nrtjit(pyfunc) + got = cfunc(arr) + self.assertEqual(initrefct + 1, sys.getrefcount(arr)) + expected = pyfunc(arr) + self.assertEqual(initrefct + 2, sys.getrefcount(arr)) + + np.testing.assert_equal(expected, arr[arr.size // 2]) + np.testing.assert_equal(expected, got) + + del expected + self.assertEqual(initrefct + 1, sys.getrefcount(arr)) + del got + self.assertEqual(initrefct, sys.getrefcount(arr)) + + def test_ufunc_with_allocated_output(self): + + def pyfunc(a, b): + out = np.empty(a.shape) + np.add(a, b, out) + return out + + cfunc = nrtjit(pyfunc) + + # 1D case + arr_a = np.random.random(10) + arr_b = np.random.random(10) + + np.testing.assert_equal(pyfunc(arr_a, arr_b), + cfunc(arr_a, arr_b)) + + self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1) + + # 2D case + arr_a = np.random.random(10).reshape(2, 5) + arr_b = np.random.random(10).reshape(2, 5) + + np.testing.assert_equal(pyfunc(arr_a, arr_b), + cfunc(arr_a, arr_b)) + + self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1) + + # 3D case + arr_a = np.random.random(70).reshape(2, 5, 7) + arr_b = np.random.random(70).reshape(2, 5, 7) + + np.testing.assert_equal(pyfunc(arr_a, arr_b), + cfunc(arr_a, arr_b)) + + self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1) + + def test_allocation_mt(self): + """ + This test exercises the array allocation in multithreaded usecase. + This stress the freelist inside NRT. + """ + + def pyfunc(inp): + out = np.empty(inp.size) + + # Zero fill + for i in range(out.size): + out[i] = 0 + + for i in range(inp[0]): + # Allocate inside a loop + tmp = np.empty(inp.size) + # Write to tmp + for j in range(tmp.size): + tmp[j] = inp[j] + # out = tmp + i + for j in range(tmp.size): + out[j] += tmp[j] + i + + return out + + cfunc = nrtjit(pyfunc) + size = 10 # small array size so that the computation is short + arr = np.random.randint(1, 10, size) + frozen_arr = arr.copy() + + np.testing.assert_equal(pyfunc(arr), cfunc(arr)) + # Ensure we did not modify the input + np.testing.assert_equal(frozen_arr, arr) + + workers = [] + inputs = [] + outputs = [] + + # Make wrapper to store the output + def wrapped(inp, out): + out[:] = cfunc(inp) + + # Create a lot of worker threads to create contention + for i in range(100): + arr = np.random.randint(1, 10, size) + out = np.empty_like(arr) + thread = threading.Thread(target=wrapped, + args=(arr, out), + name="worker{0}".format(i)) + workers.append(thread) + inputs.append(arr) + outputs.append(out) + + # Launch worker threads + for thread in workers: + thread.start() + + # Join worker threads + for thread in workers: + thread.join() + + # Check result + for inp, out in zip(inputs, outputs): + np.testing.assert_equal(pyfunc(inp), out) + + def test_refct_mt(self): + """ + This test exercises the refct in multithreaded code + """ + + def pyfunc(n, inp): + out = np.empty(inp.size) + for i in range(out.size): + out[i] = inp[i] + 1 + # Use swap to trigger many refct ops + for i in range(n): + out, inp = inp, out + return out + + cfunc = nrtjit(pyfunc) + size = 10 + input = np.arange(size, dtype=float) + expected_refct = sys.getrefcount(input) + swapct = random.randrange(1000) + expected = pyfunc(swapct, input) + np.testing.assert_equal(expected, cfunc(swapct, input)) + # The following checks can discover a reference count error + del expected + self.assertEqual(expected_refct, sys.getrefcount(input)) + + workers = [] + outputs = [] + swapcts = [] + + # Make wrapper to store the output + def wrapped(n, input, out): + out[:] = cfunc(n, input) + + # Create worker threads + for i in range(100): + out = np.empty(size) + # All thread shares the same input + swapct = random.randrange(1000) + thread = threading.Thread(target=wrapped, + args=(swapct, input, out), + name="worker{0}".format(i)) + workers.append(thread) + outputs.append(out) + swapcts.append(swapct) + + # Launch worker threads + for thread in workers: + thread.start() + + # Join worker threads + for thread in workers: + thread.join() + + # Check result + for swapct, out in zip(swapcts, outputs): + np.testing.assert_equal(pyfunc(swapct, input), out) + + del outputs, workers + # The following checks can discover a reference count error + self.assertEqual(expected_refct, sys.getrefcount(input)) + + @skip_if_32bit + def test_invalid_size_array(self): + + @njit + def foo(x): + np.empty(x) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertRaises(MemoryError) as raises: + foo(types.size_t.maxval // 8 // 2) + + self.assertIn("Allocation failed", str(raises.exception)) + + def test_swap(self): + + def pyfunc(x, y, t): + """Swap array x and y for t number of times + """ + for i in range(t): + x, y = y, x + + return x, y + + + cfunc = nrtjit(pyfunc) + + x = np.random.random(100) + y = np.random.random(100) + + t = 100 + + initrefct = sys.getrefcount(x), sys.getrefcount(y) + expect, got = pyfunc(x, y, t), cfunc(x, y, t) + self.assertIsNone(got[0].base) + self.assertIsNone(got[1].base) + np.testing.assert_equal(expect, got) + del expect, got + self.assertEqual(initrefct, (sys.getrefcount(x), sys.getrefcount(y))) + + def test_return_tuple_of_array(self): + + def pyfunc(x): + y = np.empty(x.size) + for i in range(y.size): + y[i] = x[i] + 1 + return x, y + + cfunc = nrtjit(pyfunc) + + x = np.random.random(5) + initrefct = sys.getrefcount(x) + expected_x, expected_y = pyfunc(x) + got_x, got_y = cfunc(x) + self.assertIs(x, expected_x) + self.assertIs(x, got_x) + np.testing.assert_equal(expected_x, got_x) + np.testing.assert_equal(expected_y, got_y) + del expected_x, got_x + self.assertEqual(initrefct, sys.getrefcount(x)) + + self.assertEqual(sys.getrefcount(expected_y), sys.getrefcount(got_y)) + + def test_return_tuple_of_array_created(self): + + def pyfunc(x): + y = np.empty(x.size) + for i in range(y.size): + y[i] = x[i] + 1 + out = y, y + return out + + cfunc = nrtjit(pyfunc) + + x = np.random.random(5) + expected_x, expected_y = pyfunc(x) + got_x, got_y = cfunc(x) + np.testing.assert_equal(expected_x, got_x) + np.testing.assert_equal(expected_y, got_y) + # getrefcount owns 1, got_y owns 1 + self.assertEqual(2, sys.getrefcount(got_y)) + # getrefcount owns 1, got_y owns 1 + self.assertEqual(2, sys.getrefcount(got_y)) + + def test_issue_with_return_leak(self): + """ + Dispatcher returns a new reference. + It need to workaround it for now. + """ + @nrtjit + def inner(out): + return out + + def pyfunc(x): + return inner(x) + + cfunc = nrtjit(pyfunc) + + arr = np.arange(10) + old_refct = sys.getrefcount(arr) + + self.assertEqual(old_refct, sys.getrefcount(pyfunc(arr))) + self.assertEqual(old_refct, sys.getrefcount(cfunc(arr))) + self.assertEqual(old_refct, sys.getrefcount(arr)) + + +class ConstructorBaseTest(NrtRefCtTest): + + def check_0d(self, pyfunc): + cfunc = nrtjit(pyfunc) + expected = pyfunc() + ret = cfunc() + self.assert_array_nrt_refct(ret, 1) + self.assertEqual(ret.size, expected.size) + self.assertEqual(ret.shape, expected.shape) + self.assertEqual(ret.dtype, expected.dtype) + self.assertEqual(ret.strides, expected.strides) + self.check_result_value(ret, expected) + # test writability + expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8 + expected.fill(123) + ret.fill(123) + np.testing.assert_equal(ret, expected) + + def check_1d(self, pyfunc): + cfunc = nrtjit(pyfunc) + n = 3 + expected = pyfunc(n) + ret = cfunc(n) + self.assert_array_nrt_refct(ret, 1) + self.assertEqual(ret.size, expected.size) + self.assertEqual(ret.shape, expected.shape) + self.assertEqual(ret.dtype, expected.dtype) + self.assertEqual(ret.strides, expected.strides) + self.check_result_value(ret, expected) + # test writability + expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8 + expected.fill(123) + ret.fill(123) + np.testing.assert_equal(ret, expected) + # errors + with self.assertRaises(ValueError) as cm: + cfunc(-1) + self.assertEqual(str(cm.exception), "negative dimensions not allowed") + + def check_2d(self, pyfunc): + cfunc = nrtjit(pyfunc) + m, n = 2, 3 + expected = pyfunc(m, n) + ret = cfunc(m, n) + self.assert_array_nrt_refct(ret, 1) + self.assertEqual(ret.size, expected.size) + self.assertEqual(ret.shape, expected.shape) + self.assertEqual(ret.dtype, expected.dtype) + self.assertEqual(ret.strides, expected.strides) + self.check_result_value(ret, expected) + # test writability + expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8 + expected.fill(123) + ret.fill(123) + np.testing.assert_equal(ret, expected) + # errors + with self.assertRaises(ValueError) as cm: + cfunc(2, -1) + self.assertEqual(str(cm.exception), "negative dimensions not allowed") + + def check_alloc_size(self, pyfunc): + """Checks that pyfunc will error, not segfaulting due to array size.""" + cfunc = nrtjit(pyfunc) + with self.assertRaises(ValueError) as e: + cfunc() + self.assertIn( + "array is too big", + str(e.exception) + ) + + +class TestNdZeros(ConstructorBaseTest, TestCase): + + def setUp(self): + super(TestNdZeros, self).setUp() + self.pyfunc = np.zeros + + def check_result_value(self, ret, expected): + np.testing.assert_equal(ret, expected) + + def test_0d(self): + pyfunc = self.pyfunc + def func(): + return pyfunc(()) + self.check_0d(func) + + def test_1d(self): + pyfunc = self.pyfunc + def func(n): + return pyfunc(n) + self.check_1d(func) + + def test_1d_dtype(self): + pyfunc = self.pyfunc + def func(n): + return pyfunc(n, np.int32) + self.check_1d(func) + + def test_1d_dtype_instance(self): + # dtype as numpy dtype, not as scalar class + pyfunc = self.pyfunc + _dtype = np.dtype('int32') + def func(n): + return pyfunc(n, _dtype) + self.check_1d(func) + + def test_1d_dtype_str(self): + pyfunc = self.pyfunc + _dtype = 'int32' + def func(n): + return pyfunc(n, _dtype) + self.check_1d(func) + + def func(n): + return pyfunc(n, 'complex128') + self.check_1d(func) + + def test_1d_dtype_str_alternative_spelling(self): + # like test_1d_dtype_str but using the shorthand type spellings + pyfunc = self.pyfunc + _dtype = 'i4' + def func(n): + return pyfunc(n, _dtype) + self.check_1d(func) + + def func(n): + return pyfunc(n, 'c8') + self.check_1d(func) + + def test_1d_dtype_str_structured_dtype(self): + # test_1d_dtype_str but using a structured dtype + pyfunc = self.pyfunc + _dtype = "i4, (2,3)f8" + def func(n): + return pyfunc(n, _dtype) + self.check_1d(func) + + def test_1d_dtype_non_const_str(self): + pyfunc = self.pyfunc + + @njit + def func(n, dt): + return pyfunc(n, dt) + + with self.assertRaises(TypingError) as raises: + func(5, 'int32') + + excstr = str(raises.exception) + msg = (f"If np.{self.pyfunc.__name__} dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + + def test_1d_dtype_invalid_str(self): + pyfunc = self.pyfunc + + @njit + def func(n): + return pyfunc(n, 'ABCDEF') + + with self.assertRaises(TypingError) as raises: + func(5) + + excstr = str(raises.exception) + self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr) + + def test_2d(self): + pyfunc = self.pyfunc + def func(m, n): + return pyfunc((m, n)) + self.check_2d(func) + + def test_2d_shape_dtypes(self): + # Test for issue #4575 + pyfunc = self.pyfunc + def func1(m, n): + return pyfunc((np.int16(m), np.int32(n))) + self.check_2d(func1) + # Using a 64-bit value checks that 32 bit systems will downcast to intp + def func2(m, n): + return pyfunc((np.int64(m), np.int8(n))) + self.check_2d(func2) + # Make sure an error is thrown if we can't downcast safely + if config.IS_32BITS: + cfunc = nrtjit(lambda m, n: pyfunc((m, n))) + with self.assertRaises(ValueError): + cfunc(np.int64(1 << (32 - 1)), 1) + + def test_2d_dtype_kwarg(self): + pyfunc = self.pyfunc + def func(m, n): + return pyfunc((m, n), dtype=np.complex64) + self.check_2d(func) + + def test_2d_dtype_str_kwarg(self): + pyfunc = self.pyfunc + def func(m, n): + return pyfunc((m, n), dtype='complex64') + self.check_2d(func) + + def test_2d_dtype_str_kwarg_alternative_spelling(self): + # as test_2d_dtype_str_kwarg but with the numpy shorthand type spelling + pyfunc = self.pyfunc + def func(m, n): + return pyfunc((m, n), dtype='c8') + self.check_2d(func) + + def test_alloc_size(self): + pyfunc = self.pyfunc + width = types.intp.bitwidth + def gen_func(shape, dtype): + return lambda : pyfunc(shape, dtype) + # Under these values numba will segfault, but that's another issue + self.check_alloc_size(gen_func(1 << width - 2, np.intp)) + self.check_alloc_size(gen_func((1 << width - 8, 64), np.intp)) + + +class TestNdOnes(TestNdZeros): + + def setUp(self): + super(TestNdOnes, self).setUp() + self.pyfunc = np.ones + + @unittest.expectedFailure + def test_1d_dtype_str_structured_dtype(self): + super().test_1d_dtype_str_structured_dtype() + + +class TestNdFull(ConstructorBaseTest, TestCase): + + def check_result_value(self, ret, expected): + np.testing.assert_equal(ret, expected) + + def test_0d(self): + def func(): + return np.full((), 4.5) + self.check_0d(func) + + def test_1d(self): + def func(n): + return np.full(n, 4.5) + self.check_1d(func) + + def test_1d_dtype(self): + def func(n): + return np.full(n, 4.5, np.bool_) + self.check_1d(func) + + def test_1d_dtype_instance(self): + dtype = np.dtype('bool') + def func(n): + return np.full(n, 4.5, dtype) + self.check_1d(func) + + def test_1d_dtype_str(self): + def func(n): + return np.full(n, 4.5, 'bool_') + self.check_1d(func) + + def test_1d_dtype_str_alternative_spelling(self): + # like test_1d_dtype_str but using the shorthand type spelling + def func(n): + return np.full(n, 4.5, '?') + self.check_1d(func) + + def test_1d_dtype_non_const_str(self): + + @njit + def func(n, fv, dt): + return np.full(n, fv, dt) + + with self.assertRaises(TypingError) as raises: + func((5,), 4.5, 'int32') + + excstr = str(raises.exception) + msg = ("If np.full dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + + def test_1d_dtype_invalid_str(self): + + @njit + def func(n, fv): + return np.full(n, fv, 'ABCDEF') + + with self.assertRaises(TypingError) as raises: + func((5,), 4.5) + + excstr = str(raises.exception) + self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr) + + def test_2d(self): + def func(m, n): + return np.full((m, n), 4.5) + self.check_2d(func) + + def test_2d_dtype_kwarg(self): + def func(m, n): + return np.full((m, n), 1 + 4.5j, dtype=np.complex64) + self.check_2d(func) + + def test_2d_dtype_from_type(self): + # tests issue #2862 + def func(m, n): + return np.full((m, n), np.int32(1)) + self.check_2d(func) + + # Complex uses `.real`, imaginary part dropped + def func(m, n): + return np.full((m, n), np.complex128(1)) + self.check_2d(func) + + # and that if a dtype is specified, this influences the return type + def func(m, n): + return np.full((m, n), 1, dtype=np.int8) + self.check_2d(func) + + def test_2d_shape_dtypes(self): + # Test for issue #4575 + def func1(m, n): + return np.full((np.int16(m), np.int32(n)), 4.5) + self.check_2d(func1) + # Using a 64-bit value checks that 32 bit systems will downcast to intp + def func2(m, n): + return np.full((np.int64(m), np.int8(n)), 4.5) + self.check_2d(func2) + # Make sure an error is thrown if we can't downcast safely + if config.IS_32BITS: + cfunc = nrtjit(lambda m, n: np.full((m, n), 4.5)) + with self.assertRaises(ValueError): + cfunc(np.int64(1 << (32 - 1)), 1) + + def test_alloc_size(self): + width = types.intp.bitwidth + def gen_func(shape, value): + return lambda : np.full(shape, value) + # Under these values numba will segfault, but that's another issue + self.check_alloc_size(gen_func(1 << width - 2, 1)) + self.check_alloc_size(gen_func((1 << width - 8, 64), 1)) + + +class ConstructorLikeBaseTest(object): + + def mutate_array(self, arr): + try: + arr.fill(42) + except (TypeError, ValueError): + # Try something else (e.g. Numpy 1.6 with structured dtypes) + fill_value = b'x' * arr.dtype.itemsize + arr.fill(fill_value) + + def check_like(self, pyfunc, dtype): + def check_arr(arr): + expected = pyfunc(arr) + ret = cfunc(arr) + self.assertEqual(ret.size, expected.size) + self.assertEqual(ret.dtype, expected.dtype) + self.assertStridesEqual(ret, expected) + self.check_result_value(ret, expected) + # test writability + self.mutate_array(ret) + self.mutate_array(expected) + np.testing.assert_equal(ret, expected) + + orig = np.linspace(0, 5, 6).astype(dtype) + cfunc = nrtjit(pyfunc) + + for shape in (6, (2, 3), (1, 2, 3), (3, 1, 2), ()): + if shape == (): + arr = orig[-1:].reshape(()) + else: + arr = orig.reshape(shape) + check_arr(arr) + # Non-contiguous array + if arr.ndim > 0: + check_arr(arr[::2]) + # Check new array doesn't inherit readonly flag + arr.flags['WRITEABLE'] = False + # verify read-only + with self.assertRaises(ValueError): + arr[0] = 1 + check_arr(arr) + + # Scalar argument => should produce a 0-d array + check_arr(orig[0]) + + +class TestNdEmptyLike(ConstructorLikeBaseTest, TestCase): + + def setUp(self): + super(TestNdEmptyLike, self).setUp() + self.pyfunc = np.empty_like + + def check_result_value(self, ret, expected): + pass + + def test_like(self): + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr) + self.check_like(func, np.float64) + + def test_like_structured(self): + dtype = np.dtype([('a', np.int16), ('b', np.float32)]) + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr) + self.check_like(func, dtype) + + def test_like_dtype(self): + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr, np.int32) + self.check_like(func, np.float64) + + def test_like_dtype_instance(self): + dtype = np.dtype('int32') + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr, dtype) + self.check_like(func, np.float64) + + def test_like_dtype_structured(self): + dtype = np.dtype([('a', np.int16), ('b', np.float32)]) + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr, dtype) + self.check_like(func, np.float64) + + def test_like_dtype_kwarg(self): + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr, dtype=np.int32) + self.check_like(func, np.float64) + + def test_like_dtype_str_kwarg(self): + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr, dtype='int32') + self.check_like(func, np.float64) + + def test_like_dtype_str_kwarg_alternative_spelling(self): + pyfunc = self.pyfunc + def func(arr): + return pyfunc(arr, dtype='i4') + self.check_like(func, np.float64) + + def test_like_dtype_non_const_str(self): + pyfunc = self.pyfunc + + @njit + def func(n, dt): + return pyfunc(n, dt) + + with self.assertRaises(TypingError) as raises: + func(np.ones(4), 'int32') + + excstr = str(raises.exception) + msg = (f"If np.{self.pyfunc.__name__} dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + self.assertIn( + '{}(array(float64, 1d, C), unicode_type)'.format(pyfunc.__name__), + excstr) + + def test_like_dtype_invalid_str(self): + pyfunc = self.pyfunc + + @njit + def func(n): + return pyfunc(n, 'ABCDEF') + + with self.assertRaises(TypingError) as raises: + func(np.ones(4)) + + excstr = str(raises.exception) + self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr) + + +class TestNdZerosLike(TestNdEmptyLike): + + def setUp(self): + super(TestNdZerosLike, self).setUp() + self.pyfunc = np.zeros_like + + def check_result_value(self, ret, expected): + np.testing.assert_equal(ret, expected) + + def test_like_structured(self): + super(TestNdZerosLike, self).test_like_structured() + + def test_like_dtype_structured(self): + super(TestNdZerosLike, self).test_like_dtype_structured() + + +class TestNdOnesLike(TestNdZerosLike): + + def setUp(self): + super(TestNdOnesLike, self).setUp() + self.pyfunc = np.ones_like + self.expected_value = 1 + + # Not supported yet. + + @unittest.expectedFailure + def test_like_structured(self): + super(TestNdOnesLike, self).test_like_structured() + + @unittest.expectedFailure + def test_like_dtype_structured(self): + super(TestNdOnesLike, self).test_like_dtype_structured() + + +class TestNdFullLike(ConstructorLikeBaseTest, TestCase): + + def check_result_value(self, ret, expected): + np.testing.assert_equal(ret, expected) + + def test_like(self): + def func(arr): + return np.full_like(arr, 3.5) + self.check_like(func, np.float64) + + # Not supported yet. + @unittest.expectedFailure + def test_like_structured(self): + dtype = np.dtype([('a', np.int16), ('b', np.float32)]) + def func(arr): + return np.full_like(arr, 4.5) + self.check_like(func, dtype) + + def test_like_dtype(self): + def func(arr): + return np.full_like(arr, 4.5, np.bool_) + self.check_like(func, np.float64) + + def test_like_dtype_instance(self): + dtype = np.dtype('bool') + def func(arr): + return np.full_like(arr, 4.5, dtype) + self.check_like(func, np.float64) + + def test_like_dtype_kwarg(self): + def func(arr): + return np.full_like(arr, 4.5, dtype=np.bool_) + self.check_like(func, np.float64) + + def test_like_dtype_str_kwarg(self): + def func(arr): + return np.full_like(arr, 4.5, 'bool_') + self.check_like(func, np.float64) + + def test_like_dtype_str_kwarg_alternative_spelling(self): + def func(arr): + return np.full_like(arr, 4.5, dtype='?') + self.check_like(func, np.float64) + + def test_like_dtype_non_const_str_kwarg(self): + + @njit + def func(arr, fv, dt): + return np.full_like(arr, fv, dt) + + with self.assertRaises(TypingError) as raises: + func(np.ones(3,), 4.5, 'int32') + + excstr = str(raises.exception) + msg = ("If np.full_like dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + + def test_like_dtype_invalid_str(self): + + @njit + def func(arr, fv): + return np.full_like(arr, fv, "ABCDEF") + + with self.assertRaises(TypingError) as raises: + func(np.ones(4), 3.4) + + excstr = str(raises.exception) + self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr) + + +class TestNdIdentity(BaseTest): + + def check_identity(self, pyfunc): + self.check_outputs(pyfunc, [(3,)]) + + def test_identity(self): + def func(n): + return np.identity(n) + self.check_identity(func) + + def test_identity_dtype(self): + for dtype in (np.complex64, np.int16, np.bool_, np.dtype('bool'), + 'bool_'): + def func(n): + return np.identity(n, dtype) + self.check_identity(func) + + def test_like_dtype_non_const_str_kwarg(self): + + @njit + def func(n, dt): + return np.identity(n, dt) + + with self.assertRaises(TypingError) as raises: + func(4, 'int32') + + excstr = str(raises.exception) + msg = ("If np.identity dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + + + +class TestNdEye(BaseTest): + + def test_eye_n(self): + def func(n): + return np.eye(n) + self.check_outputs(func, [(1,), (3,)]) + + def test_eye_n_dtype(self): + # check None option, dtype class, instance of dtype class + for dt in (None, np.complex128, np.complex64(1)): + def func(n, dtype=dt): + return np.eye(n, dtype=dtype) + self.check_outputs(func, [(1,), (3,)]) + + def test_eye_n_m(self): + def func(n, m): + return np.eye(n, m) + self.check_outputs(func, [(1, 2), (3, 2), (0, 3)]) + + def check_eye_n_m_k(self, func): + self.check_outputs(func, [(1, 2, 0), + (3, 4, 1), + (3, 4, -1), + (4, 3, -2), + (4, 3, -5), + (4, 3, 5)]) + + def test_eye_n_m_k(self): + def func(n, m, k): + return np.eye(n, m, k) + self.check_eye_n_m_k(func) + + def test_eye_n_m_k_dtype(self): + def func(n, m, k): + return np.eye(N=n, M=m, k=k, dtype=np.int16) + self.check_eye_n_m_k(func) + + def test_eye_n_m_k_dtype_instance(self): + dtype = np.dtype('int16') + def func(n, m, k): + return np.eye(N=n, M=m, k=k, dtype=dtype) + self.check_eye_n_m_k(func) + + +class TestNdDiag(TestCase): + + def setUp(self): + v = np.array([1, 2, 3]) + hv = np.array([[1, 2, 3]]) + vv = np.transpose(hv) + self.vectors = [v, hv, vv] + a3x4 = np.arange(12).reshape(3, 4) + a4x3 = np.arange(12).reshape(4, 3) + self.matricies = [a3x4, a4x3] + def func(q): + return np.diag(q) + self.py = func + self.jit = nrtjit(func) + + def func_kwarg(q, k=0): + return np.diag(q, k=k) + self.py_kw = func_kwarg + self.jit_kw = nrtjit(func_kwarg) + + def check_diag(self, pyfunc, nrtfunc, *args, **kwargs): + expected = pyfunc(*args, **kwargs) + computed = nrtfunc(*args, **kwargs) + self.assertEqual(computed.size, expected.size) + self.assertEqual(computed.dtype, expected.dtype) + # NOTE: stride not tested as np returns a RO view, nb returns new data + np.testing.assert_equal(expected, computed) + + # create a diag matrix from a vector + def test_diag_vect_create(self): + for d in self.vectors: + self.check_diag(self.py, self.jit, d) + + # create a diag matrix from a vector at a given offset + def test_diag_vect_create_kwarg(self): + for k in range(-10, 10): + for d in self.vectors: + self.check_diag(self.py_kw, self.jit_kw, d, k=k) + + # extract the diagonal + def test_diag_extract(self): + for d in self.matricies: + self.check_diag(self.py, self.jit, d) + + # extract a diagonal at a given offset + def test_diag_extract_kwarg(self): + for k in range(-4, 4): + for d in self.matricies: + self.check_diag(self.py_kw, self.jit_kw, d, k=k) + + # check error handling + def test_error_handling(self): + d = np.array([[[1.]]]) + cfunc = nrtjit(self.py) + + # missing arg + with self.assertRaises(TypeError): + cfunc() + + # > 2d + with self.assertRaises(TypingError): + cfunc(d) + with self.assertRaises(TypingError): + dfunc = nrtjit(self.py_kw) + dfunc(d, k=3) + + def test_bad_shape(self): + cfunc = nrtjit(self.py) + msg = '.*The argument "v" must be array-like.*' + with self.assertRaisesRegex(TypingError, msg) as raises: + cfunc(None) + +class TestLinspace(BaseTest): + + def test_linspace_2(self): + def pyfunc(n, m): + return np.linspace(n, m) + self.check_outputs(pyfunc, + [(0, 4), (1, 100), (-3.5, 2.5), (-3j, 2+3j), + (2, 1), (1+0.5j, 1.5j)]) + + def test_linspace_3(self): + def pyfunc(n, m, p): + return np.linspace(n, m, p) + self.check_outputs(pyfunc, + [(0, 4, 9), (1, 4, 3), (-3.5, 2.5, 8), + (-3j, 2+3j, 7), (2, 1, 0), + (1+0.5j, 1.5j, 5), (1, 1e100, 1)]) + + def test_linspace_accuracy(self): + # Checking linspace reasonably replicates NumPy's algorithm + # see https://github.com/numba/numba/issues/6768 + @nrtjit + def foo(n, m, p): + return np.linspace(n, m, p) + + n, m, p = 0.0, 1.0, 100 + self.assertPreciseEqual(foo(n, m, p), foo.py_func(n, m, p)) + + +class TestNpyEmptyKeyword(TestCase): + def _test_with_dtype_kw(self, dtype): + def pyfunc(shape): + return np.empty(shape, dtype=dtype) + + shapes = [1, 5, 9] + + cfunc = nrtjit(pyfunc) + for s in shapes: + expected = pyfunc(s) + got = cfunc(s) + self.assertEqual(expected.dtype, got.dtype) + self.assertEqual(expected.shape, got.shape) + + def test_with_dtype_kws(self): + for dtype in [np.int32, np.float32, np.complex64, np.dtype('complex64')]: + self._test_with_dtype_kw(dtype) + + def _test_with_shape_and_dtype_kw(self, dtype): + def pyfunc(shape): + return np.empty(shape=shape, dtype=dtype) + + shapes = [1, 5, 9] + + cfunc = nrtjit(pyfunc) + for s in shapes: + expected = pyfunc(s) + got = cfunc(s) + self.assertEqual(expected.dtype, got.dtype) + self.assertEqual(expected.shape, got.shape) + + def test_with_shape_and_dtype_kws(self): + for dtype in [np.int32, np.float32, np.complex64, np.dtype('complex64')]: + self._test_with_shape_and_dtype_kw(dtype) + + def test_empty_no_args(self): + + def pyfunc(): + return np.empty() + + cfunc = nrtjit(pyfunc) + + # Trigger the compilation + # That will cause a TypingError due to missing shape argument + with self.assertRaises(TypingError): + cfunc() + + +class TestNpArray(MemoryLeakMixin, BaseTest): + + def test_0d(self): + def pyfunc(arg): + return np.array(arg) + + cfunc = nrtjit(pyfunc) + got = cfunc(42) + self.assertPreciseEqual(got, np.array(42, dtype=np.intp)) + got = cfunc(2.5) + self.assertPreciseEqual(got, np.array(2.5)) + + def test_0d_with_dtype(self): + def pyfunc(arg): + return np.array(arg, dtype=np.int16) + + self.check_outputs(pyfunc, [(42,), (3.5,)]) + + def test_1d(self): + def pyfunc(arg): + return np.array(arg) + + cfunc = nrtjit(pyfunc) + # A list + got = cfunc([2, 3, 42]) + self.assertPreciseEqual(got, np.intp([2, 3, 42])) + # A heterogeneous tuple + got = cfunc((1.0, 2.5j, 42)) + self.assertPreciseEqual(got, np.array([1.0, 2.5j, 42])) + # An empty tuple + got = cfunc(()) + self.assertPreciseEqual(got, np.float64(())) + + def test_1d_with_dtype(self): + def pyfunc(arg): + return np.array(arg, dtype=np.float32) + + self.check_outputs(pyfunc, + [([2, 42],), + ([3.5, 1.0],), + ((1, 3.5, 42),), + ((),), + ]) + + def test_1d_with_str_dtype(self): + def pyfunc(arg): + return np.array(arg, dtype='float32') + + self.check_outputs(pyfunc, + [([2, 42],), + ([3.5, 1.0],), + ((1, 3.5, 42),), + ((),), + ]) + + def test_1d_with_non_const_str_dtype(self): + + @njit + def func(arg, dt): + return np.array(arg, dtype=dt) + + with self.assertRaises(TypingError) as raises: + func((5, 3), 'int32') + + excstr = str(raises.exception) + msg = (f"If np.array dtype is a string it must be a " + "string constant.") + self.assertIn(msg, excstr) + + def test_2d(self): + def pyfunc(arg): + return np.array(arg) + + cfunc = nrtjit(pyfunc) + # A list of tuples + got = cfunc([(1, 2), (3, 4)]) + self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]])) + got = cfunc([(1, 2.5), (3, 4.5)]) + self.assertPreciseEqual(got, np.float64([[1, 2.5], [3, 4.5]])) + # A tuple of lists + got = cfunc(([1, 2], [3, 4])) + self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]])) + got = cfunc(([1, 2], [3.5, 4.5])) + self.assertPreciseEqual(got, np.float64([[1, 2], [3.5, 4.5]])) + # A tuple of tuples + got = cfunc(((1.5, 2), (3.5, 4.5))) + self.assertPreciseEqual(got, np.float64([[1.5, 2], [3.5, 4.5]])) + got = cfunc(((), ())) + self.assertPreciseEqual(got, np.float64(((), ()))) + + def test_2d_with_dtype(self): + def pyfunc(arg): + return np.array(arg, dtype=np.int32) + + cfunc = nrtjit(pyfunc) + got = cfunc([(1, 2.5), (3, 4.5)]) + self.assertPreciseEqual(got, np.int32([[1, 2], [3, 4]])) + + def test_raises(self): + + def pyfunc(arg): + return np.array(arg) + + cfunc = nrtjit(pyfunc) + + @contextlib.contextmanager + def check_raises(msg): + with self.assertRaises(TypingError) as raises: + yield + self.assertIn(msg, str(raises.exception)) + + with check_raises(('array(float64, 1d, C) not allowed in a ' + 'homogeneous sequence')): + cfunc(np.array([1.])) + + with check_raises(('type Tuple(int64, reflected list(int64)) ' + 'does not have a regular shape')): + cfunc((np.int64(1), [np.int64(2)])) + + with check_raises( + "cannot convert Tuple(int64, Record(a[type=int32;offset=0]," + "b[type=float32;offset=4];8;False)) to a homogeneous type", + ): + st = np.dtype([('a', 'i4'), ('b', 'f4')]) + val = np.zeros(1, dtype=st)[0] + cfunc(((1, 2), (np.int64(1), val))) + + def test_bad_array(self): + @njit + def func(obj): + return np.array(obj) + + msg = '.*The argument "object" must be array-like.*' + with self.assertRaisesRegex(TypingError, msg) as raises: + func(None) + + def test_bad_dtype(self): + @njit + def func(obj, dt): + return np.array(obj, dt) + + msg = '.*The argument "dtype" must be a data-type if it is provided.*' + with self.assertRaisesRegex(TypingError, msg) as raises: + func(5, 4) + + +class TestNpConcatenate(MemoryLeakMixin, TestCase): + """ + Tests for np.concatenate(). + """ + + def _3d_arrays(self): + a = np.arange(24).reshape((4, 3, 2)) + b = a + 10 + c = (b + 10).copy(order='F') + d = (c + 10)[::-1] + e = (d + 10)[...,::-1] + return a, b, c, d, e + + @contextlib.contextmanager + def assert_invalid_sizes_over_dim(self, axis): + with self.assertRaises(ValueError) as raises: + yield + self.assertIn("input sizes over dimension %d do not match" % axis, + str(raises.exception)) + + def test_3d(self): + pyfunc = np_concatenate2 + cfunc = nrtjit(pyfunc) + + def check(a, b, c, axis): + for ax in (axis, -3 + axis): + expected = pyfunc(a, b, c, axis=ax) + got = cfunc(a, b, c, axis=ax) + self.assertPreciseEqual(got, expected) + + def check_all_axes(a, b, c): + for axis in range(3): + check(a, b, c, axis) + + a, b, c, d, e = self._3d_arrays() + + # Inputs with equal sizes + # C, C, C + check_all_axes(a, b, b) + # C, C, F + check_all_axes(a, b, c) + # F, F, F + check_all_axes(a.T, b.T, a.T) + # F, F, C + check_all_axes(a.T, b.T, c.T) + # F, F, A + check_all_axes(a.T, b.T, d.T) + # A, A, A + # (note Numpy may select the layout differently for other inputs) + check_all_axes(d.T, e.T, d.T) + + # Inputs with compatible sizes + check(a[1:], b, c[::-1], axis=0) + check(a, b[:,1:], c, axis=1) + check(a, b, c[:,:,1:], axis=2) + + # Different but compatible dtypes + check_all_axes(a, b.astype(np.float64), b) + + # Exceptions leak references + self.disable_leak_check() + + # Incompatible sizes + for axis in (1, 2, -2, -1): + with self.assert_invalid_sizes_over_dim(0): + cfunc(a[1:], b, b, axis) + for axis in (0, 2, -3, -1): + with self.assert_invalid_sizes_over_dim(1): + cfunc(a, b[:,1:], b, axis) + + def test_3d_no_axis(self): + pyfunc = np_concatenate1 + cfunc = nrtjit(pyfunc) + + def check(a, b, c): + expected = pyfunc(a, b, c) + got = cfunc(a, b, c) + self.assertPreciseEqual(got, expected) + + a, b, c, d, e = self._3d_arrays() + + # Inputs with equal sizes + # C, C, C + check(a, b, b) + # C, C, F + check(a, b, c) + # F, F, F + check(a.T, b.T, a.T) + # F, F, C + check(a.T, b.T, c.T) + # F, F, A + check(a.T, b.T, d.T) + # A, A, A + # (note Numpy may select the layout differently for other inputs) + check(d.T, e.T, d.T) + + # Inputs with compatible sizes + check(a[1:], b, c[::-1]) + + # Exceptions leak references + self.disable_leak_check() + + # Incompatible sizes + with self.assert_invalid_sizes_over_dim(1): + cfunc(a, b[:,1:], b) + + def test_typing_errors(self): + pyfunc = np_concatenate1 + cfunc = nrtjit(pyfunc) + + a = np.arange(15) + b = a.reshape((3, 5)) + c = a.astype(np.dtype([('x', np.int8)])) + d = np.array(42) + + # Different dimensionalities + with self.assertTypingError() as raises: + cfunc(a, b, b) + self.assertIn("all the input arrays must have same number of dimensions", + str(raises.exception)) + + # Incompatible dtypes + with self.assertTypingError() as raises: + cfunc(a, c, c) + self.assertIn("input arrays must have compatible dtypes", + str(raises.exception)) + + # 0-d arrays + with self.assertTypingError() as raises: + cfunc(d, d, d) + self.assertIn("zero-dimensional arrays cannot be concatenated", + str(raises.exception)) + + # non-tuple input + with self.assertTypingError() as raises: + cfunc(c, 1, c) + self.assertIn('expecting a non-empty tuple of arrays', str(raises.exception)) + + +@unittest.skipUnless(hasattr(np, "stack"), "this Numpy doesn't have np.stack()") +class TestNpStack(MemoryLeakMixin, TestCase): + """ + Tests for np.stack(). + """ + + def _3d_arrays(self): + a = np.arange(24).reshape((4, 3, 2)) + b = a + 10 + c = (b + 10).copy(order='F') + d = (c + 10)[::-1] + e = (d + 10)[...,::-1] + return a, b, c, d, e + + @contextlib.contextmanager + def assert_invalid_sizes(self): + with self.assertRaises(ValueError) as raises: + yield + self.assertIn("all input arrays must have the same shape", + str(raises.exception)) + + def check_stack(self, pyfunc, cfunc, args): + expected = pyfunc(*args) + got = cfunc(*args) + # Numba doesn't choose the same layout as Numpy. + # We would like to check the result is contiguous, but we can't + # rely on the "flags" attribute when there are 1-sized + # dimensions. + self.assertEqual(got.shape, expected.shape) + self.assertPreciseEqual(got.flatten(), expected.flatten()) + + def check_3d(self, pyfunc, cfunc, generate_starargs): + def check(a, b, c, args): + self.check_stack(pyfunc, cfunc, (a, b, c) + args) + + def check_all_axes(a, b, c): + for args in generate_starargs(): + check(a, b, c, args) + + a, b, c, d, e = self._3d_arrays() + + # C, C, C + check_all_axes(a, b, b) + # C, C, F + check_all_axes(a, b, c) + # F, F, F + check_all_axes(a.T, b.T, a.T) + # F, F, C + check_all_axes(a.T, b.T, c.T) + # F, F, A + check_all_axes(a.T, b.T, d.T) + # A, A, A + check_all_axes(d.T, e.T, d.T) + + # Different but compatible dtypes + check_all_axes(a, b.astype(np.float64), b) + + def check_runtime_errors(self, cfunc, generate_starargs): + # Exceptions leak references + self.assert_no_memory_leak() + self.disable_leak_check() + + # Inputs have different shapes + a, b, c, d, e = self._3d_arrays() + with self.assert_invalid_sizes(): + args = next(generate_starargs()) + cfunc(a[:-1], b, c, *args) + + def test_3d(self): + """ + stack(3d arrays, axis) + """ + pyfunc = np_stack2 + cfunc = nrtjit(pyfunc) + + def generate_starargs(): + for axis in range(3): + yield (axis,) + yield (-3 + axis,) + + self.check_3d(pyfunc, cfunc, generate_starargs) + self.check_runtime_errors(cfunc, generate_starargs) + + def test_3d_no_axis(self): + """ + stack(3d arrays) + """ + pyfunc = np_stack1 + cfunc = nrtjit(pyfunc) + + def generate_starargs(): + yield() + + self.check_3d(pyfunc, cfunc, generate_starargs) + self.check_runtime_errors(cfunc, generate_starargs) + + def test_0d(self): + """ + stack(0d arrays) + """ + pyfunc = np_stack1 + cfunc = nrtjit(pyfunc) + + a = np.array(42) + b = np.array(-5j) + c = np.array(True) + + self.check_stack(pyfunc, cfunc, (a, b, c)) + + def check_xxstack(self, pyfunc, cfunc): + """ + 3d and 0d tests for hstack(), vstack(), dstack(). + """ + def generate_starargs(): + yield() + + self.check_3d(pyfunc, cfunc, generate_starargs) + # 0d + a = np.array(42) + b = np.array(-5j) + c = np.array(True) + self.check_stack(pyfunc, cfunc, (a, b, a)) + + def test_hstack(self): + pyfunc = np_hstack + cfunc = nrtjit(pyfunc) + + self.check_xxstack(pyfunc, cfunc) + # 1d + a = np.arange(5) + b = np.arange(6) + 10 + self.check_stack(pyfunc, cfunc, (a, b, b)) + # 2d + a = np.arange(6).reshape((2, 3)) + b = np.arange(8).reshape((2, 4)) + 100 + self.check_stack(pyfunc, cfunc, (a, b, a)) + + def test_vstack(self): + # Since np.row_stack is an alias for np.vstack, it does not need a + # separate Numba implementation. For every test for np.vstack, the same + # test for np.row_stack has been added. + functions = [np_vstack, np_row_stack] + for pyfunc in functions: + cfunc = nrtjit(pyfunc) + + self.check_xxstack(pyfunc, cfunc) + # 1d + a = np.arange(5) + b = a + 10 + self.check_stack(pyfunc, cfunc, (a, b, b)) + # 2d + a = np.arange(6).reshape((3, 2)) + b = np.arange(8).reshape((4, 2)) + 100 + self.check_stack(pyfunc, cfunc, (a, b, b)) + + def test_dstack(self): + pyfunc = np_dstack + cfunc = nrtjit(pyfunc) + + self.check_xxstack(pyfunc, cfunc) + # 1d + a = np.arange(5) + b = a + 10 + self.check_stack(pyfunc, cfunc, (a, b, b)) + # 2d + a = np.arange(12).reshape((3, 4)) + b = a + 100 + self.check_stack(pyfunc, cfunc, (a, b, b)) + + def test_column_stack(self): + pyfunc = np_column_stack + cfunc = nrtjit(pyfunc) + + a = np.arange(4) + b = a + 10 + c = np.arange(12).reshape((4, 3)) + self.check_stack(pyfunc, cfunc, (a, b, c)) + + # Exceptions leak references + self.assert_no_memory_leak() + self.disable_leak_check() + + # Invalid dims + a = np.array(42) + with self.assertTypingError(): + cfunc((a, a, a)) + a = a.reshape((1, 1, 1)) + with self.assertTypingError(): + cfunc((a, a, a)) + + def test_bad_arrays(self): + for pyfunc in (np_stack1, np_hstack, np_vstack, np_dstack, np_column_stack): + cfunc = nrtjit(pyfunc) + c = np.arange(12).reshape((4, 3)) + + # non-tuple input + with self.assertTypingError() as raises: + cfunc(c, 1, c) + self.assertIn('expecting a non-empty tuple of arrays', str(raises.exception)) + + +def benchmark_refct_speed(): + def pyfunc(x, y, t): + """Swap array x and y for t number of times + """ + for i in range(t): + x, y = y, x + return x, y + + cfunc = nrtjit(pyfunc) + + x = np.random.random(100) + y = np.random.random(100) + t = 10000 + + def bench_pyfunc(): + pyfunc(x, y, t) + + def bench_cfunc(): + cfunc(x, y, t) + + python_time = utils.benchmark(bench_pyfunc) + numba_time = utils.benchmark(bench_cfunc) + print(python_time) + print(numba_time) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_dyn_func.py b/venv/lib/python3.10/site-packages/numba/tests/test_dyn_func.py new file mode 100644 index 0000000000000000000000000000000000000000..df3b7aac1c1401a4d6ca9c205b2e002167003c08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_dyn_func.py @@ -0,0 +1,43 @@ +import numpy as np + +import numba +from numba.tests.support import TestCase + + +class Issue455(object): + """ + Test code from issue 455. + """ + + def __init__(self): + self.f = [] + + def create_f(self): + code = """ + def f(x): + n = x.shape[0] + for i in range(n): + x[i] = 1. + """ + d = {} + exec(code.strip(), d) + self.f.append(numba.jit("void(f8[:])", nopython=True)(d['f'])) + + def call_f(self): + a = np.zeros(10) + for f in self.f: + f(a) + return a + + +class TestDynFunc(TestCase): + + def test_issue_455(self): + inst = Issue455() + inst.create_f() + a = inst.call_f() + self.assertPreciseEqual(a, np.ones_like(a)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_entrypoints.py b/venv/lib/python3.10/site-packages/numba/tests/test_entrypoints.py new file mode 100644 index 0000000000000000000000000000000000000000..7391972af8954108e48696efd69459d7fc665264 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_entrypoints.py @@ -0,0 +1,227 @@ +import sys +from unittest import mock + +import types +import warnings +import unittest +import os +import subprocess +import threading + +from numba import njit +from numba.tests.support import TestCase +from numba.testing.main import _TIMEOUT as _RUNNER_TIMEOUT + +from importlib import metadata as importlib_metadata + +_TEST_TIMEOUT = _RUNNER_TIMEOUT - 60. + + +class _DummyClass(object): + def __init__(self, value): + self.value = value + + def __repr__(self): + return '_DummyClass(%f, %f)' % self.value + + +class TestEntrypoints(TestCase): + """ + Test registration of init() functions from Numba extensions + """ + + def test_init_entrypoint(self): + # loosely based on Pandas test from: + # https://github.com/pandas-dev/pandas/pull/27488 + + mod = mock.Mock(__name__='_test_numba_extension') + + try: + # will remove this module at the end of the test + sys.modules[mod.__name__] = mod + + my_entrypoint = importlib_metadata.EntryPoint( + 'init', '_test_numba_extension:init_func', 'numba_extensions', + ) + + with mock.patch.object( + importlib_metadata, + 'entry_points', + return_value={'numba_extensions': (my_entrypoint,)}, + ): + + from numba.core import entrypoints + + # Allow reinitialization + entrypoints._already_initialized = False + + entrypoints.init_all() + + # was our init function called? + mod.init_func.assert_called_once() + + # ensure we do not initialize twice + entrypoints.init_all() + mod.init_func.assert_called_once() + finally: + # remove fake module + if mod.__name__ in sys.modules: + del sys.modules[mod.__name__] + + def test_entrypoint_tolerance(self): + # loosely based on Pandas test from: + # https://github.com/pandas-dev/pandas/pull/27488 + + mod = mock.Mock(__name__='_test_numba_bad_extension') + mod.configure_mock(**{'init_func.side_effect': ValueError('broken')}) + + try: + # will remove this module at the end of the test + sys.modules[mod.__name__] = mod + + my_entrypoint = importlib_metadata.EntryPoint( + 'init', + '_test_numba_bad_extension:init_func', + 'numba_extensions', + ) + + with mock.patch.object( + importlib_metadata, + 'entry_points', + return_value={'numba_extensions': (my_entrypoint,)}, + ): + + from numba.core import entrypoints + # Allow reinitialization + entrypoints._already_initialized = False + + with warnings.catch_warnings(record=True) as w: + entrypoints.init_all() + + bad_str = "Numba extension module '_test_numba_bad_extension'" + for x in w: + if bad_str in str(x): + break + else: + raise ValueError("Expected warning message not found") + + # was our init function called? + mod.init_func.assert_called_once() + + finally: + # remove fake module + if mod.__name__ in sys.modules: + del sys.modules[mod.__name__] + + _EP_MAGIC_TOKEN = 'RUN_ENTRY' + + @unittest.skipIf(os.environ.get('_EP_MAGIC_TOKEN', None) != _EP_MAGIC_TOKEN, + "needs token") + def test_entrypoint_handles_type_extensions(self): + # loosely based on Pandas test from: + # https://github.com/pandas-dev/pandas/pull/27488 + import numba + + def init_function(): + # This init function would normally just call a module init via + # import or similar, for the sake of testing, inline registration + # of how to handle the global "_DummyClass". + class DummyType(numba.types.Type): + def __init__(self): + super(DummyType, self).__init__(name='DummyType') + + @numba.extending.typeof_impl.register(_DummyClass) + def typer_DummyClass(val, c): + return DummyType() + + @numba.extending.register_model(DummyType) + class DummyModel(numba.extending.models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('value', numba.types.float64), ] + super(DummyModel, self).__init__(dmm, fe_type, members) + + @numba.extending.unbox(DummyType) + def unbox_dummy(typ, obj, c): + value_obj = c.pyapi.object_getattr_string(obj, "value") + dummy_struct_proxy = numba.core.cgutils.create_struct_proxy(typ) + dummy_struct = dummy_struct_proxy(c.context, c.builder) + dummy_struct.value = c.pyapi.float_as_double(value_obj) + c.pyapi.decref(value_obj) + err_flag = c.pyapi.err_occurred() + is_error = numba.core.cgutils.is_not_null(c.builder, err_flag) + return numba.extending.NativeValue(dummy_struct._getvalue(), + is_error=is_error) + + @numba.extending.box(DummyType) + def box_dummy(typ, val, c): + dummy_struct_proxy = numba.core.cgutils.create_struct_proxy(typ) + dummy_struct = dummy_struct_proxy(c.context, c.builder) + value_obj = c.pyapi.float_from_double(dummy_struct.value) + serialized_clazz = c.pyapi.serialize_object(_DummyClass) + class_obj = c.pyapi.unserialize(serialized_clazz) + res = c.pyapi.call_function_objargs(class_obj, (value_obj,)) + c.pyapi.decref(value_obj) + c.pyapi.decref(class_obj) + return res + + mod = types.ModuleType("_test_numba_init_sequence") + mod.init_func = init_function + + try: + # will remove this module at the end of the test + sys.modules[mod.__name__] = mod + + my_entrypoint = importlib_metadata.EntryPoint( + 'init', + '_test_numba_init_sequence:init_func', + 'numba_extensions', + ) + + with mock.patch.object( + importlib_metadata, + 'entry_points', + return_value={'numba_extensions': (my_entrypoint,)}, + ): + @njit + def foo(x): + return x + + ival = _DummyClass(10) + foo(ival) + finally: + # remove fake module + if mod.__name__ in sys.modules: + del sys.modules[mod.__name__] + + def run_cmd(self, cmdline, env): + popen = subprocess.Popen(cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + # finish in _TEST_TIMEOUT seconds or kill it + timeout = threading.Timer(_TEST_TIMEOUT, popen.kill) + try: + timeout.start() + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError( + "process failed with code %s: stderr follows\n%s\n" % + (popen.returncode, err.decode())) + return out.decode(), err.decode() + finally: + timeout.cancel() + return None, None + + def test_entrypoint_extension_sequence(self): + env_copy = os.environ.copy() + env_copy['_EP_MAGIC_TOKEN'] = str(self._EP_MAGIC_TOKEN) + themod = self.__module__ + thecls = type(self).__name__ + methname = 'test_entrypoint_handles_type_extensions' + injected_method = '%s.%s.%s' % (themod, thecls, methname) + cmdline = [sys.executable, "-m", "numba.runtests", injected_method] + out, err = self.run_cmd(cmdline, env_copy) + _DEBUG = False + if _DEBUG: + print(out, err) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_enums.py b/venv/lib/python3.10/site-packages/numba/tests/test_enums.py new file mode 100644 index 0000000000000000000000000000000000000000..68734dcff595fd0126138838e4f78f33720033b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_enums.py @@ -0,0 +1,181 @@ +""" +Tests for enum support. +""" + + +import numpy as np +import unittest +from numba import jit, vectorize, int8, int16, int32 + +from numba.tests.support import TestCase +from numba.tests.enum_usecases import (Color, Shape, Shake, + Planet, RequestError, + IntEnumWithNegatives) + + +def compare_usecase(a, b): + return a == b, a != b, a is b, a is not b + + +def getattr_usecase(a): + # Lookup of a enum member on its class + return a is Color.red + + +def getitem_usecase(a): + """Lookup enum member by string name""" + return a is Color['red'] + + +def identity_usecase(a, b, c): + return (a is Shake.mint, + b is Shape.circle, + c is RequestError.internal_error, + ) + + +def make_constant_usecase(const): + def constant_usecase(a): + return a is const + return constant_usecase + + +def return_usecase(a, b, pred): + return a if pred else b + + +def int_coerce_usecase(x): + # Implicit coercion of intenums to ints + if x > RequestError.internal_error: + return x - RequestError.not_found + else: + return x + Shape.circle + +def int_cast_usecase(x): + # Explicit coercion of intenums to ints + if x > int16(RequestError.internal_error): + return x - int32(RequestError.not_found) + else: + return x + int16(Shape.circle) + + +def vectorize_usecase(x): + if x != RequestError.not_found: + return RequestError['internal_error'] + else: + return RequestError.dummy + + +class BaseEnumTest(object): + + def test_compare(self): + pyfunc = compare_usecase + cfunc = jit(nopython=True)(pyfunc) + + for args in self.pairs: + self.assertPreciseEqual(pyfunc(*args), cfunc(*args)) + + def test_return(self): + """ + Passing and returning enum members. + """ + pyfunc = return_usecase + cfunc = jit(nopython=True)(pyfunc) + + for pair in self.pairs: + for pred in (True, False): + args = pair + (pred,) + self.assertIs(pyfunc(*args), cfunc(*args)) + + def check_constant_usecase(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + for arg in self.values: + self.assertPreciseEqual(pyfunc(arg), cfunc(arg)) + + def test_constant(self): + self.check_constant_usecase(getattr_usecase) + self.check_constant_usecase(getitem_usecase) + self.check_constant_usecase(make_constant_usecase(self.values[0])) + + +class TestEnum(BaseEnumTest, TestCase): + """ + Tests for Enum classes and members. + """ + values = [Color.red, Color.green] + + pairs = [ + (Color.red, Color.red), + (Color.red, Color.green), + (Shake.mint, Shake.vanilla), + (Planet.VENUS, Planet.MARS), + (Planet.EARTH, Planet.EARTH), + ] + + def test_identity(self): + """ + Enum with equal values should not compare identical + """ + pyfunc = identity_usecase + cfunc = jit(nopython=True)(pyfunc) + args = (Color.blue, Color.green, Shape.square) + self.assertPreciseEqual(pyfunc(*args), cfunc(*args)) + + +class TestIntEnum(BaseEnumTest, TestCase): + """ + Tests for IntEnum classes and members. + """ + values = [Shape.circle, Shape.square] + + pairs = [ + (Shape.circle, Shape.circle), + (Shape.circle, Shape.square), + (RequestError.not_found, RequestError.not_found), + (RequestError.internal_error, RequestError.not_found), + ] + + def test_int_coerce(self): + pyfunc = int_coerce_usecase + cfunc = jit(nopython=True)(pyfunc) + + for arg in [300, 450, 550]: + self.assertPreciseEqual(pyfunc(arg), cfunc(arg)) + + def test_int_cast(self): + pyfunc = int_cast_usecase + cfunc = jit(nopython=True)(pyfunc) + + for arg in [300, 450, 550]: + self.assertPreciseEqual(pyfunc(arg), cfunc(arg)) + + def test_vectorize(self): + cfunc = vectorize(nopython=True)(vectorize_usecase) + arg = np.array([2, 404, 500, 404]) + sol = np.array([vectorize_usecase(i) for i in arg], dtype=arg.dtype) + self.assertPreciseEqual(sol, cfunc(arg)) + + def test_hash(self): + def pyfun(x): + return hash(x) + cfunc = jit(nopython=True)(pyfun) + for member in IntEnumWithNegatives: + self.assertPreciseEqual(pyfun(member), cfunc(member)) + + def test_int_shape_cast(self): + def pyfun_empty(x): + return np.empty((x, x), dtype='int64').fill(-1) + def pyfun_zeros(x): + return np.zeros((x, x), dtype='int64') + def pyfun_ones(x): + return np.ones((x, x), dtype='int64') + for pyfun in [pyfun_empty, pyfun_zeros, pyfun_ones]: + cfunc = jit(nopython=True)(pyfun) + for member in IntEnumWithNegatives: + if member >= 0: + self.assertPreciseEqual(pyfun(member), cfunc(member)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_errorhandling.py b/venv/lib/python3.10/site-packages/numba/tests/test_errorhandling.py new file mode 100644 index 0000000000000000000000000000000000000000..3d8f0091c571793c0281ef9ff07c1ac448038f0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_errorhandling.py @@ -0,0 +1,469 @@ +""" +Unspecified error handling tests +""" + +import numpy as np +import os +import warnings + +from numba import jit, njit, types +from numba.core import errors +from numba.experimental import structref +from numba.extending import (overload, intrinsic, overload_method, + overload_attribute) +from numba.core.compiler import CompilerBase +from numba.core.untyped_passes import (TranslateByteCode, FixupArgs, + IRProcessing,) +from numba.core.typed_passes import (NopythonTypeInference, DeadCodeElimination, + NoPythonBackend, NativeLowering) +from numba.core.compiler_machinery import PassManager +from numba.core.types.functions import _err_reasons as error_reasons + +from numba.tests.support import (skip_parfors_unsupported, override_config, + SerialMixin, skip_unless_cffi, + skip_unless_scipy, TestCase) +import unittest + + +class TestErrorHandlingBeforeLowering(unittest.TestCase): + + def test_unsupported_make_function_return_inner_func(self): + def func(x): + """ return the closure """ + z = x + 1 + + def inner(x): + return x + z + return inner + + for pipeline in jit, njit: + with self.assertRaises(errors.TypingError) as raises: + pipeline(func)(1) + + expected = "Cannot capture the non-constant value" + self.assertIn(expected, str(raises.exception)) + + +class TestUnsupportedReporting(unittest.TestCase): + + def test_unsupported_numpy_function(self): + # np.asanyarray(list) currently unsupported + @njit + def func(): + np.asanyarray([1,2,3]) + + with self.assertRaises(errors.TypingError) as raises: + func() + + expected = "Use of unsupported NumPy function 'numpy.asanyarray'" + self.assertIn(expected, str(raises.exception)) + + +class TestMiscErrorHandling(unittest.TestCase): + + def test_use_of_exception_for_flow_control(self): + # constant inference uses exceptions with no Loc specified to determine + # flow control, this asserts that the construction of the lowering + # error context handler works in the case of an exception with no Loc + # specified. See issue #3135. + @njit + def fn(x): + return 10**x + + a = np.array([1.0],dtype=np.float64) + fn(a) # should not raise + + def test_commented_func_definition_is_not_a_definition(self): + # See issue #4056, the commented def should not be found as the + # definition for reporting purposes when creating the synthetic + # traceback because it is commented! Use of def in docstring would also + # cause this issue hence is tested. + + def foo_commented(): + #def commented_definition() + raise Exception('test_string') + + def foo_docstring(): + """ def docstring containing def might match function definition!""" + raise Exception('test_string') + + for func in (foo_commented, foo_docstring): + with self.assertRaises(Exception) as raises: + func() + + self.assertIn("test_string", str(raises.exception)) + + def test_use_of_ir_unknown_loc(self): + # for context see # 3390 + class TestPipeline(CompilerBase): + def define_pipelines(self): + name = 'bad_DCE_pipeline' + pm = PassManager(name) + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(FixupArgs, "fix up args") + pm.add_pass(IRProcessing, "processing IR") + # remove dead before type inference so that the Arg node is + # removed and the location of the arg cannot be found + pm.add_pass(DeadCodeElimination, "DCE") + # typing + pm.add_pass(NopythonTypeInference, "nopython frontend") + pm.add_pass(NativeLowering, "native lowering") + pm.add_pass(NoPythonBackend, "nopython mode backend") + pm.finalize() + return [pm] + + @njit(pipeline_class=TestPipeline) + def f(a): + return 0 + + with self.assertRaises(errors.TypingError) as raises: + f(iter([1,2])) # use a type that Numba doesn't recognize + + expected = 'File "unknown location", line 0:' + self.assertIn(expected, str(raises.exception)) + + def check_write_to_globals(self, func): + with self.assertRaises(errors.TypingError) as raises: + func() + + expected = ["The use of a", "in globals, is not supported as globals"] + for ex in expected: + self.assertIn(ex, str(raises.exception)) + + def test_handling_of_write_to_reflected_global(self): + from numba.tests.errorhandling_usecases import global_reflected_write + self.check_write_to_globals(njit(global_reflected_write)) + + def test_handling_of_write_to_typed_dict_global(self): + from numba.tests.errorhandling_usecases import global_dict_write + self.check_write_to_globals(njit(global_dict_write)) + + @skip_parfors_unsupported + def test_handling_forgotten_numba_internal_import(self): + @njit(parallel=True) + def foo(): + for i in prange(10): # noqa: F821 prange is not imported + pass + + with self.assertRaises(errors.TypingError) as raises: + foo() + + expected = ("'prange' looks like a Numba internal function, " + "has it been imported") + self.assertIn(expected, str(raises.exception)) + + def test_handling_unsupported_generator_expression(self): + def foo(): + (x for x in range(10)) + + expected = "The use of yield in a closure is unsupported." + + for dec in jit(forceobj=True), njit: + with self.assertRaises(errors.UnsupportedError) as raises: + dec(foo)() + self.assertIn(expected, str(raises.exception)) + + def test_handling_undefined_variable(self): + @njit + def foo(): + return a # noqa: F821 + + expected = "NameError: name 'a' is not defined" + + with self.assertRaises(errors.TypingError) as raises: + foo() + self.assertIn(expected, str(raises.exception)) + + +class TestErrorMessages(unittest.TestCase): + + def test_specific_error(self): + + given_reason = "specific_reason" + + def foo(): + pass + + @overload(foo) + def ol_foo(): + raise errors.NumbaValueError(given_reason) + + @njit + def call_foo(): + foo() + + with self.assertRaises(errors.TypingError) as raises: + call_foo() + + excstr = str(raises.exception) + self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr) + self.assertIn(given_reason, excstr) + + def test_no_match_error(self): + + def foo(): + pass + + @overload(foo) + def ol_foo(): + return None # emulate no impl available for type + + @njit + def call_foo(): + foo() + + with self.assertRaises(errors.TypingError) as raises: + call_foo() + + excstr = str(raises.exception) + self.assertIn("No match", excstr) + + @skip_unless_scipy + def test_error_function_source_is_correct(self): + """ Checks that the reported source location for an overload is the + overload implementation source, not the actual function source from the + target library.""" + + @njit + def foo(): + np.linalg.svd("chars") + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr) + expected_file = os.path.join("numba", "np", "linalg.py") + expected = f"Overload in function 'svd_impl': File: {expected_file}:" + self.assertIn(expected.format(expected_file), excstr) + + def test_concrete_template_source(self): + # hits ConcreteTemplate + @njit + def foo(): + return 'a' + 1 + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + + self.assertIn("Overload of function 'add'", excstr) + # there'll be numerous matched templates that don't work but as they + # are mostly "overload"s they'll just appear as "No match". + self.assertIn("No match.", excstr) + + def test_abstract_template_source(self): + # hits AbstractTemplate + @njit + def foo(): + return len(1) + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("Overload of function 'len'", excstr) + + def test_callable_template_source(self): + # hits CallableTemplate + @njit + def foo(): + return np.angle(None) + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("No implementation of function Function(", excstr) + expected_file = os.path.join("numba", "tests", + "test_errorhandling.py") + expected_ol = f"Overload of function 'bar': File: {expected_file}:" + self.assertIn(expected_ol.format(expected_file), excstr) + self.assertIn("No match.", excstr) + + def test_intrinsic_template_source(self): + # hits _IntrinsicTemplate + given_reason1 = "x must be literal" + given_reason2 = "array.ndim must be 1" + + @intrinsic + def myintrin(typingctx, x, arr): + if not isinstance(x, types.IntegerLiteral): + raise errors.RequireLiteralValue(given_reason1) + + if arr.ndim != 1: + raise errors.NumbaValueError(given_reason2) + + sig = types.intp(x, arr) + + def codegen(context, builder, signature, args): + pass + return sig, codegen + + @njit + def call_intrin(): + arr = np.zeros((2, 2)) + myintrin(1, arr) + + with self.assertRaises(errors.TypingError) as raises: + call_intrin() + + excstr = str(raises.exception) + self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr) + self.assertIn(given_reason1, excstr) + self.assertIn(given_reason2, excstr) + self.assertIn("Intrinsic in function", excstr) + + def test_overloadmethod_template_source(self): + # doesn't hit _OverloadMethodTemplate for source as it's a nested + # exception + @overload_method(types.UnicodeType, 'isnonsense') + def ol_unicode_isnonsense(self): + pass + + @njit + def foo(): + "abc".isnonsense() + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("Overload of function 'ol_unicode_isnonsense'", excstr) + + def test_overloadattribute_template_source(self): + # doesn't hit _OverloadMethodTemplate for source as it's a nested + # exception + @overload_attribute(types.UnicodeType, 'isnonsense') + def ol_unicode_isnonsense(self): + pass + + @njit + def foo(): + "abc".isnonsense + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("Overload of function 'ol_unicode_isnonsense'", excstr) + + def test_external_function_pointer_template_source(self): + from numba.tests.ctypes_usecases import c_cos + + @njit + def foo(): + c_cos('a') + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("Type Restricted Function in function 'unknown'", excstr) + + @skip_unless_cffi + def test_cffi_function_pointer_template_source(self): + from numba.tests import cffi_usecases as mod + mod.init() + func = mod.cffi_cos + + @njit + def foo(): + func('a') + + with self.assertRaises(errors.TypingError) as raises: + foo() + + excstr = str(raises.exception) + self.assertIn("Type Restricted Function in function 'unknown'", excstr) + + def test_missing_source(self): + + @structref.register + class ParticleType(types.StructRef): + pass + + class Particle(structref.StructRefProxy): + def __new__(cls, pos, mass): + return structref.StructRefProxy.__new__(cls, pos) + # didn't provide the required mass argument ----^ + + structref.define_proxy(Particle, ParticleType, ["pos", "mass"]) + + with self.assertRaises(errors.TypingError) as raises: + Particle(pos=1, mass=2) + + excstr = str(raises.exception) + self.assertIn("missing a required argument: 'mass'", excstr) + + +class TestDeveloperSpecificErrorMessages(SerialMixin, unittest.TestCase): + + def test_bound_function_error_string(self): + # See PR #5952 + def foo(x): + x.max(-1) + + with override_config('DEVELOPER_MODE', 1): + with self.assertRaises(errors.TypingError) as raises: + njit("void(int64[:,:])")(foo) + + excstr = str(raises.exception) + self.assertIn("too many positional arguments", excstr) + + +class TestCapturedErrorHandling(SerialMixin, TestCase): + """Checks that the way errors are captured. + """ + + def test_error_in_overload(self): + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + x.some_invalid_attr # doesn't exist! + + def impl(x): + pass + return impl + + with warnings.catch_warnings(): + # Suppress error going into stdout + warnings.simplefilter("ignore", + errors.NumbaPendingDeprecationWarning) + + with self.assertRaises(AttributeError) as raises: + @njit('void(int64)') + def foo(x): + bar(x) + expected = "object has no attribute 'some_invalid_attr'" + self.assertIn(expected, str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_errormodels.py b/venv/lib/python3.10/site-packages/numba/tests/test_errormodels.py new file mode 100644 index 0000000000000000000000000000000000000000..9635da294e0c52eca4ae55284160e01ea0ee5b6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_errormodels.py @@ -0,0 +1,28 @@ +""" +Test setting/overriding error models +""" + +from numba import jit +import unittest + + +class TestErrorModel(unittest.TestCase): + + def test_div_by_zero_python(self): + @jit # python model is the default + def model_python(val): + return 1 / val + + with self.assertRaises(ZeroDivisionError): + model_python(0) + + def test_div_by_zero_numpy(self): + @jit(error_model='numpy') + def model_numpy(val): + return 1 / val + + self.assertEqual(model_numpy(0), float('inf')) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_event.py b/venv/lib/python3.10/site-packages/numba/tests/test_event.py new file mode 100644 index 0000000000000000000000000000000000000000..dce8b01226fe7f0fb4e48a442e91bbfc0098a0a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_event.py @@ -0,0 +1,218 @@ +import unittest +import string + +import numpy as np + +from numba import njit, jit, literal_unroll +from numba.core import event as ev +from numba.tests.support import TestCase, override_config +from numba.core.utils import _lazy_pformat + + +class TestEvent(TestCase): + + def setUp(self): + # Trigger compilation to ensure all listeners are initialized + njit(lambda: None)() + self.__registered_listeners = len(ev._registered) + + def tearDown(self): + # Check there is no lingering listeners + self.assertEqual(len(ev._registered), self.__registered_listeners) + + def test_recording_listener(self): + @njit + def foo(x): + return x + x + + with ev.install_recorder("numba:compile") as rec: + foo(1) + + self.assertIsInstance(rec, ev.RecordingListener) + # Check there must be at least two events. + # Because there must be a START and END for the compilation of foo() + self.assertGreaterEqual(len(rec.buffer), 2) + + def test_compiler_lock_event(self): + @njit + def foo(x): + return x + x + + foo(1) + md = foo.get_metadata(foo.signatures[0]) + lock_duration = md['timers']['compiler_lock'] + self.assertIsInstance(lock_duration, float) + self.assertGreater(lock_duration, 0) + + def test_llvm_lock_event(self): + @njit + def foo(x): + return x + x + + foo(1) + md = foo.get_metadata(foo.signatures[0]) + lock_duration = md['timers']['llvm_lock'] + self.assertIsInstance(lock_duration, float) + self.assertGreater(lock_duration, 0) + + def test_run_pass_event(self): + @njit + def foo(x): + return x + x + + with ev.install_recorder("numba:run_pass") as recorder: + foo(2) + + self.assertGreater(len(recorder.buffer), 0) + for _, event in recorder.buffer: + # Check that all fields are there + data = event.data + self.assertIsInstance(data['name'], str) + self.assertIsInstance(data['qualname'], str) + self.assertIsInstance(data['module'], str) + self.assertIsInstance(data['flags'], _lazy_pformat) + self.assertIsInstance(data['args'], str) + self.assertIsInstance(data['return_type'], str) + + def test_install_listener(self): + ut = self + + class MyListener(ev.Listener): + def on_start(self, event): + ut.assertEqual(event.status, ev.EventStatus.START) + ut.assertEqual(event.kind, "numba:compile") + ut.assertIs(event.data["dispatcher"], foo) + dispatcher = event.data["dispatcher"] + ut.assertIs(dispatcher, foo) + # Check that the compiling signature is NOT in the overloads + ut.assertNotIn(event.data["args"], dispatcher.overloads) + + def on_end(self, event): + ut.assertEqual(event.status, ev.EventStatus.END) + ut.assertEqual(event.kind, "numba:compile") + dispatcher = event.data["dispatcher"] + ut.assertIs(dispatcher, foo) + # Check that the compiling signature is in the overloads + ut.assertIn(event.data["args"], dispatcher.overloads) + + @njit + def foo(x): + return x + + listener = MyListener() + with ev.install_listener("numba:compile", listener) as yielded: + foo(1) + + # Check that the yielded value is the same listener + self.assertIs(listener, yielded) + + def test_global_register(self): + ut = self + + class MyListener(ev.Listener): + def on_start(self, event): + ut.assertEqual(event.status, ev.EventStatus.START) + ut.assertEqual(event.kind, "numba:compile") + # Check it is the same dispatcher + dispatcher = event.data["dispatcher"] + ut.assertIs(dispatcher, foo) + # Check that the compiling signature is NOT in the overloads + ut.assertNotIn(event.data["args"], dispatcher.overloads) + + def on_end(self, event): + ut.assertEqual(event.status, ev.EventStatus.END) + ut.assertEqual(event.kind, "numba:compile") + # Check it is the same dispatcher + dispatcher = event.data["dispatcher"] + ut.assertIs(dispatcher, foo) + # Check that the compiling signature is in the overloads + ut.assertIn(event.data["args"], dispatcher.overloads) + + @njit + def foo(x): + return x + + listener = MyListener() + ev.register("numba:compile", listener) + foo(1) + ev.unregister("numba:compile", listener) + + def test_lifted_dispatcher(self): + @jit(forceobj=True) + def foo(): + object() # to trigger loop-lifting + c = 0 + for i in range(10): + c += i + return c + + with ev.install_recorder("numba:compile") as rec: + foo() + + # Check that there are 4 events. + # Two for `foo()` and two for the lifted loop. + self.assertGreaterEqual(len(rec.buffer), 4) + + cres = foo.overloads[foo.signatures[0]] + [ldisp] = cres.lifted + + lifted_cres = ldisp.overloads[ldisp.signatures[0]] + self.assertIsInstance( + lifted_cres.metadata["timers"]["compiler_lock"], + float, + ) + self.assertIsInstance( + lifted_cres.metadata["timers"]["llvm_lock"], + float, + ) + + def test_timing_properties(self): + a = tuple(string.ascii_lowercase) + + @njit + def bar(x): + acc = 0 + for i in literal_unroll(a): + if i in {'1': x}: + acc += 1 + else: + acc += np.sqrt(x[0, 0]) + return np.sin(x), acc + + @njit + def foo(x): + return bar(np.zeros((x, x))) + + with override_config('LLVM_PASS_TIMINGS', True): + foo(1) + + def get_timers(fn, prop): + md = fn.get_metadata(fn.signatures[0]) + return md[prop] + + foo_timers = get_timers(foo, 'timers') + bar_timers = get_timers(bar, 'timers') + foo_llvm_timer = get_timers(foo, 'llvm_pass_timings') + bar_llvm_timer = get_timers(bar, 'llvm_pass_timings') + + # Check: time spent in bar() must be longer than in foo() + self.assertLess(bar_timers['llvm_lock'], + foo_timers['llvm_lock']) + self.assertLess(bar_timers['compiler_lock'], + foo_timers['compiler_lock']) + + # Check: time spent in LLVM itself must be less than in the LLVM lock + self.assertLess(foo_llvm_timer.get_total_time(), + foo_timers['llvm_lock']) + self.assertLess(bar_llvm_timer.get_total_time(), + bar_timers['llvm_lock']) + + # Check: time spent in LLVM lock must be less than in compiler + self.assertLess(foo_timers['llvm_lock'], + foo_timers['compiler_lock']) + self.assertLess(bar_timers['llvm_lock'], + bar_timers['compiler_lock']) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_exceptions.py b/venv/lib/python3.10/site-packages/numba/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..195c963e7af5e3c71f67e9c51bf6aaf5bb253956 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_exceptions.py @@ -0,0 +1,475 @@ +import numpy as np +import sys +import traceback + +from numba import jit, njit +from numba.core import types, errors, utils +from numba.tests.support import (TestCase, expected_failure_py311, + expected_failure_py312, + expected_failure_py313, + ) +import unittest + + +force_pyobj_flags = {'nopython': False, 'forceobj': True} +no_pyobj_flags = {'nopython': True, '_nrt': False} +no_pyobj_flags_w_nrt = {'nopython': True, '_nrt': True} +no_gil_flags = {'nopython': True, 'nogil': True, '_nrt': True} + + +class MyError(Exception): + pass + + +class OtherError(Exception): + pass + + +class UDEArgsToSuper(Exception): + def __init__(self, arg, value0): + super(UDEArgsToSuper, self).__init__(arg) + self.value0 = value0 + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + same = True + same |= self.args == other.args + same |= self.value0 == other.value0 + return same + + def __hash__(self): + return hash((super(UDEArgsToSuper).__hash__(), self.value0)) + + +class UDENoArgSuper(Exception): + def __init__(self, arg, value0): + super(UDENoArgSuper, self).__init__() + self.deferarg = arg + self.value0 = value0 + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + same = True + same |= self.args == other.args + same |= self.deferarg == other.deferarg + same |= self.value0 == other.value0 + return same + + def __hash__(self): + return hash((super(UDENoArgSuper).__hash__(), self.deferarg, + self.value0)) + + +def raise_class(exc): + def raiser(i): + if i == 1: + raise exc + elif i == 2: + raise ValueError + elif i == 3: + # The exception type is looked up on a module (issue #1624) + raise np.linalg.LinAlgError + return i + return raiser + + +def raise_instance(exc, arg): + def raiser(i): + if i == 1: + raise exc(arg, 1) + elif i == 2: + raise ValueError(arg, 2) + elif i == 3: + raise np.linalg.LinAlgError(arg, 3) + return i + return raiser + + +def raise_instance_runtime_args(exc): + def raiser(i, arg): + if i == 1: + raise exc(arg, 1) + elif i == 2: + raise ValueError(arg, 2) + elif i == 3: + raise np.linalg.LinAlgError(arg, 3) + return i + return raiser + + +def reraise(): + raise + + +def outer_function(inner): + def outer(i): + if i == 3: + raise OtherError("bar", 3) + return inner(i) + return outer + + +def assert_usecase(i): + assert i == 1, "bar" + + +def ude_bug_usecase(): + raise UDEArgsToSuper() # oops user forgot args to exception ctor + + +def raise_runtime_value(arg): + raise ValueError(arg) + + +class TestRaising(TestCase): + + def test_unituple_index_error(self): + def pyfunc(a, i): + return a.shape[i] + + cfunc = njit((types.Array(types.int32, 1, 'A'), types.int32),)(pyfunc) + + a = np.empty(2, dtype=np.int32) + + self.assertEqual(cfunc(a, 0), pyfunc(a, 0)) + + with self.assertRaises(IndexError) as cm: + cfunc(a, 2) + self.assertEqual(str(cm.exception), "tuple index out of range") + + def check_against_python(self, exec_mode, pyfunc, cfunc, + expected_error_class, *args): + + assert exec_mode in (force_pyobj_flags, no_pyobj_flags, + no_pyobj_flags_w_nrt, no_gil_flags) + + # invariant of mode, check the error class and args are the same + with self.assertRaises(expected_error_class) as pyerr: + pyfunc(*args) + with self.assertRaises(expected_error_class) as jiterr: + cfunc(*args) + self.assertEqual(pyerr.exception.args, jiterr.exception.args) + + # special equality check for UDEs + if isinstance(pyerr.exception, (UDEArgsToSuper, UDENoArgSuper)): + self.assertTrue(pyerr.exception == jiterr.exception) + + # in npm check bottom of traceback matches as frame injection with + # location info should ensure this + if exec_mode is no_pyobj_flags: + + # we only care about the bottom two frames, the error and the + # location it was raised. + try: + pyfunc(*args) + except Exception: + py_frames = traceback.format_exception(*sys.exc_info()) + expected_frames = py_frames[-2:] + + try: + cfunc(*args) + except Exception: + c_frames = traceback.format_exception(*sys.exc_info()) + got_frames = c_frames[-2:] + + # check exception and the injected frame are the same + for expf, gotf in zip(expected_frames, got_frames): + # Note use of assertIn not assertEqual, Py 3.11 has markers (^) + # that point to the variable causing the problem, Numba doesn't + # do this so only the start of the string will match. + self.assertIn(gotf, expf) + + def check_raise_class(self, flags): + pyfunc = raise_class(MyError) + cfunc = jit((types.int32,), **flags)(pyfunc) + self.assertEqual(cfunc(0), 0) + self.check_against_python(flags, pyfunc, cfunc, MyError, 1) + self.check_against_python(flags, pyfunc, cfunc, ValueError, 2) + self.check_against_python(flags, pyfunc, cfunc, + np.linalg.linalg.LinAlgError, 3) + + def test_raise_class_nopython(self): + self.check_raise_class(flags=no_pyobj_flags) + + def test_raise_class_objmode(self): + self.check_raise_class(flags=force_pyobj_flags) + + def check_raise_instance(self, flags): + for clazz in [MyError, UDEArgsToSuper, + UDENoArgSuper]: + pyfunc = raise_instance(clazz, "some message") + cfunc = jit((types.int32,), **flags)(pyfunc) + + self.assertEqual(cfunc(0), 0) + self.check_against_python(flags, pyfunc, cfunc, clazz, 1) + self.check_against_python(flags, pyfunc, cfunc, ValueError, 2) + self.check_against_python(flags, pyfunc, cfunc, + np.linalg.linalg.LinAlgError, 3) + + def test_raise_instance_objmode(self): + self.check_raise_instance(flags=force_pyobj_flags) + + def test_raise_instance_nopython(self): + self.check_raise_instance(flags=no_pyobj_flags) + + def check_raise_nested(self, flags, **jit_args): + """ + Check exception propagation from nested functions. + """ + for clazz in [MyError, UDEArgsToSuper, + UDENoArgSuper]: + inner_pyfunc = raise_instance(clazz, "some message") + pyfunc = outer_function(inner_pyfunc) + inner_cfunc = jit(**jit_args)(inner_pyfunc) + cfunc = jit(**jit_args)(outer_function(inner_cfunc)) + + self.check_against_python(flags, pyfunc, cfunc, clazz, 1) + self.check_against_python(flags, pyfunc, cfunc, ValueError, 2) + self.check_against_python(flags, pyfunc, cfunc, OtherError, 3) + + def test_raise_nested_objmode(self): + self.check_raise_nested(force_pyobj_flags, forceobj=True) + + def test_raise_nested_nopython(self): + self.check_raise_nested(no_pyobj_flags, nopython=True) + + def check_reraise(self, flags): + def raise_exc(exc): + raise exc + pyfunc = reraise + cfunc = jit((), **flags)(pyfunc) + for op, err in [(lambda : raise_exc(ZeroDivisionError), + ZeroDivisionError), + (lambda : raise_exc(UDEArgsToSuper("msg", 1)), + UDEArgsToSuper), + (lambda : raise_exc(UDENoArgSuper("msg", 1)), + UDENoArgSuper)]: + def gen_impl(fn): + def impl(): + try: + op() + except err: + fn() + return impl + pybased = gen_impl(pyfunc) + cbased = gen_impl(cfunc) + self.check_against_python(flags, pybased, cbased, err,) + + def test_reraise_objmode(self): + self.check_reraise(flags=force_pyobj_flags) + + def test_reraise_nopython(self): + self.check_reraise(flags=no_pyobj_flags) + + def check_raise_invalid_class(self, cls, flags): + pyfunc = raise_class(cls) + cfunc = jit((types.int32,), **flags)(pyfunc) + with self.assertRaises(TypeError) as cm: + cfunc(1) + self.assertEqual(str(cm.exception), + "exceptions must derive from BaseException") + + def test_raise_invalid_class_objmode(self): + self.check_raise_invalid_class(int, flags=force_pyobj_flags) + self.check_raise_invalid_class(1, flags=force_pyobj_flags) + + def test_raise_invalid_class_nopython(self): + msg = "Encountered unsupported constant type used for exception" + with self.assertRaises(errors.UnsupportedError) as raises: + self.check_raise_invalid_class(int, flags=no_pyobj_flags) + self.assertIn(msg, str(raises.exception)) + with self.assertRaises(errors.UnsupportedError) as raises: + self.check_raise_invalid_class(1, flags=no_pyobj_flags) + self.assertIn(msg, str(raises.exception)) + + def test_raise_bare_string_nopython(self): + @njit + def foo(): + raise "illegal" + msg = ("Directly raising a string constant as an exception is not " + "supported") + with self.assertRaises(errors.UnsupportedError) as raises: + foo() + self.assertIn(msg, str(raises.exception)) + + def check_assert_statement(self, flags): + pyfunc = assert_usecase + cfunc = jit((types.int32,), **flags)(pyfunc) + cfunc(1) + self.check_against_python(flags, pyfunc, cfunc, AssertionError, 2) + + def test_assert_statement_objmode(self): + self.check_assert_statement(flags=force_pyobj_flags) + + def test_assert_statement_nopython(self): + self.check_assert_statement(flags=no_pyobj_flags) + + def check_raise_from_exec_string(self, flags): + # issue #3428 + simple_raise = "def f(a):\n raise exc('msg', 10)" + assert_raise = "def f(a):\n assert a != 1" + py312_pep695_raise = "def f[T: int](a: T) -> T:\n assert a != 1" + py312_pep695_raise_2 = "def f[T: int\n](a: T) -> T:\n assert a != 1" + test_cases = [ + (assert_raise, AssertionError), + (simple_raise, UDEArgsToSuper), + (simple_raise, UDENoArgSuper), + ] + if utils.PYVERSION >= (3, 12): + # Added for https://github.com/numba/numba/issues/9443 + test_cases.append((py312_pep695_raise, AssertionError)) + test_cases.append((py312_pep695_raise_2, AssertionError)) + for f_text, exc in test_cases: + loc = {} + exec(f_text, {'exc': exc}, loc) + pyfunc = loc['f'] + cfunc = jit((types.int32,), **flags)(pyfunc) + self.check_against_python(flags, pyfunc, cfunc, exc, 1) + + def test_assert_from_exec_string_objmode(self): + self.check_raise_from_exec_string(flags=force_pyobj_flags) + + def test_assert_from_exec_string_nopython(self): + self.check_raise_from_exec_string(flags=no_pyobj_flags) + + def check_user_code_error_traceback(self, flags): + # this test checks that if a user tries to compile code that contains + # a bug in exception initialisation (e.g. missing arg) then this also + # has a frame injected with the location information. + pyfunc = ude_bug_usecase + cfunc = jit((), **flags)(pyfunc) + self.check_against_python(flags, pyfunc, cfunc, TypeError) + + def test_user_code_error_traceback_objmode(self): + self.check_user_code_error_traceback(flags=force_pyobj_flags) + + def test_user_code_error_traceback_nopython(self): + self.check_user_code_error_traceback(flags=no_pyobj_flags) + + def check_raise_runtime_value(self, flags): + pyfunc = raise_runtime_value + cfunc = jit((types.string,), **flags)(pyfunc) + self.check_against_python(flags, pyfunc, cfunc, ValueError, 'hello') + + def test_raise_runtime_value_objmode(self): + self.check_raise_runtime_value(flags=force_pyobj_flags) + + def test_raise_runtime_value_nopython(self): + self.check_raise_runtime_value(flags=no_pyobj_flags_w_nrt) + + def test_raise_runtime_value_nogil(self): + self.check_raise_runtime_value(flags=no_gil_flags) + + def check_raise_instance_with_runtime_args(self, flags): + for clazz in [MyError, UDEArgsToSuper, + UDENoArgSuper]: + pyfunc = raise_instance_runtime_args(clazz) + cfunc = jit((types.int32, types.string), **flags)(pyfunc) + + self.assertEqual(cfunc(0, 'test'), 0) + self.check_against_python(flags, pyfunc, cfunc, clazz, 1, 'hello') + self.check_against_python(flags, pyfunc, cfunc, ValueError, 2, + 'world') + self.check_against_python(flags, pyfunc, cfunc, + np.linalg.linalg.LinAlgError, 3, 'linalg') + + def test_raise_instance_with_runtime_args_objmode(self): + self.check_raise_instance_with_runtime_args(flags=force_pyobj_flags) + + def test_raise_instance_with_runtime_args_nopython(self): + self.check_raise_instance_with_runtime_args(flags=no_pyobj_flags_w_nrt) + + def test_raise_instance_with_runtime_args_nogil(self): + self.check_raise_instance_with_runtime_args(flags=no_gil_flags) + + def test_dynamic_raise_bad_args(self): + def raise_literal_dict(): + raise ValueError({'a': 1, 'b': np.ones(4)}) + + def raise_range(): + raise ValueError(range(3)) + + def raise_rng(rng): + raise ValueError(rng.bit_generator) + + funcs = [ + (raise_literal_dict, ()), + (raise_range, ()), + (raise_rng, (types.npy_rng,)), + ] + + for pyfunc, argtypes in funcs: + msg = '.*Cannot convert native .* to a Python object.*' + with self.assertRaisesRegex(errors.TypingError, msg): + njit(argtypes)(pyfunc) + + def test_dynamic_raise_dict(self): + @njit + def raise_literal_dict2(): + raise ValueError({'a': 1, 'b': 3}) + + msg = "{a: 1, b: 3}" + with self.assertRaisesRegex(ValueError, msg): + raise_literal_dict2() + + def test_disable_nrt(self): + @njit(_nrt=False) + def raise_with_no_nrt(i): + raise ValueError(i) + + msg = 'NRT required but not enabled' + with self.assertRaisesRegex(errors.NumbaRuntimeError, msg): + raise_with_no_nrt(123) + + def test_try_raise(self): + + @njit + def raise_(a): + raise ValueError(a) + + @njit + def try_raise(a): + try: + raise_(a) + except Exception: + pass + return a + 1 + + self.assertEqual(try_raise.py_func(3), try_raise(3)) + + @expected_failure_py311 + @expected_failure_py312 + @expected_failure_py313 + def test_dynamic_raise(self): + + @njit + def raise_(a): + raise ValueError(a) + + @njit + def try_raise_(a): + try: + raise_(a) + except Exception: + raise ValueError(a) + + args = [ + 1, + 1.1, + 'hello', + np.ones(3), + [1, 2], + (1, 2), + set([1, 2]), + ] + for fn in (raise_, try_raise_): + for arg in args: + with self.assertRaises(ValueError) as e: + fn(arg) + self.assertEqual((arg,), e.exception.args) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_extended_arg.py b/venv/lib/python3.10/site-packages/numba/tests/test_extended_arg.py new file mode 100644 index 0000000000000000000000000000000000000000..535d8f89d1b0660da9502f14c99017bf8bd3ed01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_extended_arg.py @@ -0,0 +1,51 @@ +import unittest + +import dis +import struct + +from numba import jit +from numba.core import utils +from numba.tests.support import TestCase + + +class TestExtendedArg(TestCase): + """ + Test support for the EXTENDED_ARG opcode. + """ + bytecode_len = 0xff + + def get_extended_arg_load_const(self): + """ + Get a function with a EXTENDED_ARG opcode before a LOAD_CONST opcode. + """ + def f(): + x = 5 + return x + + b = bytearray(f.__code__.co_code) + consts = f.__code__.co_consts + bytecode_format = "= (3, 11): + # Python 3.11 has a RESUME op code at the start of a function, need + # to inject the EXTENDED_ARG after this to influence the LOAD_CONST + offset = 2 # 2 byte op code + else: + offset = 0 + + packed_extend_arg = struct.pack(bytecode_format, dis.EXTENDED_ARG, 1) + b[:] = b[:offset] + packed_extend_arg + b[offset:] + f.__code__ = f.__code__.replace(co_code=bytes(b), co_consts=consts) + return f + + def test_extended_arg_load_const(self): + pyfunc = self.get_extended_arg_load_const() + # make sure that the pyfunc contains the expected modification + self.assertGreater(len(pyfunc.__code__.co_consts), self.bytecode_len) + self.assertPreciseEqual(pyfunc(), 42) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(), 42) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_extending.py b/venv/lib/python3.10/site-packages/numba/tests/test_extending.py new file mode 100644 index 0000000000000000000000000000000000000000..323e98367c5cec5183bf0484a613971be3b97a78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_extending.py @@ -0,0 +1,2248 @@ +import inspect +import math +import operator +import sys +import pickle +import multiprocessing +import ctypes +import warnings +import re + +import numpy as np +from llvmlite import ir + +import numba +from numba import njit, jit, vectorize, guvectorize, objmode +from numba.core import types, errors, typing, compiler, cgutils +from numba.core.typed_passes import type_inference_stage +from numba.core.registry import cpu_target +from numba.core.imputils import lower_constant +from numba.tests.support import ( + TestCase, + captured_stdout, + temp_directory, + override_config, + run_in_new_process_in_cache_dir, + skip_if_typeguard, +) +from numba.core.errors import LoweringError +import unittest + +from numba.extending import ( + typeof_impl, + type_callable, + lower_builtin, + lower_cast, + overload, + overload_attribute, + overload_method, + models, + register_model, + box, + unbox, + NativeValue, + intrinsic, + _Intrinsic, + register_jitable, + get_cython_function_address, + is_jitted, + overload_classmethod, +) +from numba.core.typing.templates import ( + ConcreteTemplate, + signature, + infer, + infer_global, + AbstractTemplate, +) + + +# Pandas-like API implementation +from .pdlike_usecase import Index, Series + + +try: + import scipy.special.cython_special as sc +except ImportError: + sc = None + + +# ----------------------------------------------------------------------- +# Define a custom type and an implicit cast on it + + +class MyDummy(object): + pass + + +class MyDummyType(types.Opaque): + def can_convert_to(self, context, toty): + if isinstance(toty, types.Number): + from numba.core.typeconv import Conversion + + return Conversion.safe + + +mydummy_type = MyDummyType("mydummy") +mydummy = MyDummy() + + +@typeof_impl.register(MyDummy) +def typeof_mydummy(val, c): + return mydummy_type + + +@lower_cast(MyDummyType, types.Number) +def mydummy_to_number(context, builder, fromty, toty, val): + """ + Implicit conversion from MyDummy to int. + """ + return context.get_constant(toty, 42) + + +def get_dummy(): + return mydummy + + +register_model(MyDummyType)(models.OpaqueModel) + + +@unbox(MyDummyType) +def unbox_index(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + +# ----------------------------------------------------------------------- +# Define a second custom type but w/o implicit cast to Number + + +def base_dummy_type_factory(name): + class DynType(object): + pass + + class DynTypeType(types.Opaque): + pass + + dyn_type_type = DynTypeType(name) + + @typeof_impl.register(DynType) + def typeof_mydummy(val, c): + return dyn_type_type + + register_model(DynTypeType)(models.OpaqueModel) + return DynTypeType, DynType, dyn_type_type + + +MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2") + + +@unbox(MyDummyType2) +def unbox_index2(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + +# ----------------------------------------------------------------------- +# Define a function's typing and implementation using the classical +# two-step API + + +def func1(x=None): + raise NotImplementedError + + +def type_func1_(context): + def typer(x=None): + if x in (None, types.none): + # 0-arg or 1-arg with None + return types.int32 + elif isinstance(x, types.Float): + # 1-arg with float + return x + + return typer + + +type_func1 = type_callable(func1)(type_func1_) + + +@lower_builtin(func1) +@lower_builtin(func1, types.none) +def func1_nullary(context, builder, sig, args): + return context.get_constant(sig.return_type, 42) + + +@lower_builtin(func1, types.Float) +def func1_unary(context, builder, sig, args): + def func1_impl(x): + return math.sqrt(2 * x) + + return context.compile_internal(builder, func1_impl, sig, args) + + +# We can do the same for a known internal operation, here "print_item" +# which we extend to support MyDummyType. + + +@infer +class PrintDummy(ConcreteTemplate): + key = "print_item" + cases = [signature(types.none, mydummy_type)] + + +@lower_builtin("print_item", MyDummyType) +def print_dummy(context, builder, sig, args): + [x] = args + pyapi = context.get_python_api(builder) + strobj = pyapi.unserialize(pyapi.serialize_object("hello!")) + pyapi.print_object(strobj) + pyapi.decref(strobj) + return context.get_dummy_value() + + +# ----------------------------------------------------------------------- +# Define an overloaded function (combined API) + + +def where(cond, x, y): + raise NotImplementedError + + +def np_where(cond, x, y): + """ + Wrap np.where() to allow for keyword arguments + """ + return np.where(cond, x, y) + + +def call_where(cond, x, y): + return where(cond, y=y, x=x) + + +@overload(where) +def overload_where_arrays(cond, x, y): + """ + Implement where() for arrays. + """ + # Choose implementation based on argument types. + if isinstance(cond, types.Array): + if x.dtype != y.dtype: + raise errors.TypingError("x and y should have the same dtype") + + # Array where() => return an array of the same shape + if all(ty.layout == "C" for ty in (cond, x, y)): + + def where_impl(cond, x, y): + """ + Fast implementation for C-contiguous arrays + """ + shape = cond.shape + if x.shape != shape or y.shape != shape: + raise ValueError("all inputs should have the same shape") + res = np.empty_like(x) + cf = cond.flat + xf = x.flat + yf = y.flat + rf = res.flat + for i in range(cond.size): + rf[i] = xf[i] if cf[i] else yf[i] + return res + + else: + + def where_impl(cond, x, y): + """ + Generic implementation for other arrays + """ + shape = cond.shape + if x.shape != shape or y.shape != shape: + raise ValueError("all inputs should have the same shape") + res = np.empty_like(x) + for idx, c in np.ndenumerate(cond): + res[idx] = x[idx] if c else y[idx] + return res + + return where_impl + + +# We can define another overload function for the same function, they +# will be tried in turn until one succeeds. + + +@overload(where) +def overload_where_scalars(cond, x, y): + """ + Implement where() for scalars. + """ + if not isinstance(cond, types.Array): + if x != y: + raise errors.TypingError("x and y should have the same type") + + def where_impl(cond, x, y): + """ + Scalar where() => return a 0-dim array + """ + scal = x if cond else y + # Can't use full_like() on Numpy < 1.8 + arr = np.empty_like(scal) + arr[()] = scal + return arr + + return where_impl + + +# ----------------------------------------------------------------------- +# Overload an already defined built-in function, extending it for new types. + + +@overload(len) +def overload_len_dummy(arg): + if isinstance(arg, MyDummyType): + + def len_impl(arg): + return 13 + + return len_impl + + +@overload(operator.add) +def overload_add_dummy(arg1, arg2): + if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance( + arg2, (MyDummyType, MyDummyType2) + ): + + def dummy_add_impl(arg1, arg2): + return 42 + + return dummy_add_impl + + +@overload(operator.delitem) +def overload_dummy_delitem(obj, idx): + if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer): + + def dummy_delitem_impl(obj, idx): + print("del", obj, idx) + + return dummy_delitem_impl + + +@overload(operator.getitem) +def overload_dummy_getitem(obj, idx): + if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer): + + def dummy_getitem_impl(obj, idx): + return idx + 123 + + return dummy_getitem_impl + + +@overload(operator.setitem) +def overload_dummy_setitem(obj, idx, val): + if all( + [ + isinstance(obj, MyDummyType), + isinstance(idx, types.Integer), + isinstance(val, types.Integer), + ] + ): + + def dummy_setitem_impl(obj, idx, val): + print(idx, val) + + return dummy_setitem_impl + + +def call_add_operator(arg1, arg2): + return operator.add(arg1, arg2) + + +def call_add_binop(arg1, arg2): + return arg1 + arg2 + + +@overload(operator.iadd) +def overload_iadd_dummy(arg1, arg2): + if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance( + arg2, (MyDummyType, MyDummyType2) + ): + + def dummy_iadd_impl(arg1, arg2): + return 42 + + return dummy_iadd_impl + + +def call_iadd_operator(arg1, arg2): + return operator.add(arg1, arg2) + + +def call_iadd_binop(arg1, arg2): + arg1 += arg2 + + return arg1 + + +def call_delitem(obj, idx): + del obj[idx] + + +def call_getitem(obj, idx): + return obj[idx] + + +def call_setitem(obj, idx, val): + obj[idx] = val + + +@overload_method(MyDummyType, "length") +def overload_method_length(arg): + def imp(arg): + return len(arg) + + return imp + + +def cache_overload_method_usecase(x): + return x.length() + + +def call_func1_nullary(): + return func1() + + +def call_func1_unary(x): + return func1(x) + + +def len_usecase(x): + return len(x) + + +def print_usecase(x): + print(x) + + +def getitem_usecase(x, key): + return x[key] + + +def npyufunc_usecase(x): + return np.cos(np.sin(x)) + + +def get_data_usecase(x): + return x._data + + +def get_index_usecase(x): + return x._index + + +def is_monotonic_usecase(x): + return x.is_monotonic_increasing + + +def make_series_usecase(data, index): + return Series(data, index) + + +def clip_usecase(x, lo, hi): + return x.clip(lo, hi) + + +# ----------------------------------------------------------------------- + + +def return_non_boxable(): + return np + + +@overload(return_non_boxable) +def overload_return_non_boxable(): + def imp(): + return np + + return imp + + +def non_boxable_ok_usecase(sz): + mod = return_non_boxable() + return mod.arange(sz) + + +def non_boxable_bad_usecase(): + return return_non_boxable() + + +def mk_func_input(f): + pass + + +@infer_global(mk_func_input) +class MkFuncTyping(AbstractTemplate): + def generic(self, args, kws): + assert isinstance(args[0], types.MakeFunctionLiteral) + return signature(types.none, *args) + + +def mk_func_test_impl(): + mk_func_input(lambda a: a) + + +# ----------------------------------------------------------------------- +# Define a types derived from types.Callable and overloads for them + + +class MyClass(object): + pass + + +class CallableTypeRef(types.Callable): + + def __init__(self, instance_type): + self.instance_type = instance_type + self.sig_to_impl_key = {} + self.compiled_templates = [] + super(CallableTypeRef, self).__init__('callable_type_ref' + '[{}]'.format(self.instance_type)) + + def get_call_type(self, context, args, kws): + + res_sig = None + for template in context._functions[type(self)]: + try: + res_sig = template.apply(args, kws) + except Exception: + pass # for simplicity assume args must match exactly + else: + compiled_ovlds = getattr(template, '_compiled_overloads', {}) + if args in compiled_ovlds: + self.sig_to_impl_key[res_sig] = compiled_ovlds[args] + self.compiled_templates.append(template) + break + + return res_sig + + def get_call_signatures(self): + sigs = list(self.sig_to_impl_key.keys()) + return sigs, True + + def get_impl_key(self, sig): + return self.sig_to_impl_key[sig] + + +@register_model(CallableTypeRef) +class CallableTypeModel(models.OpaqueModel): + + def __init__(self, dmm, fe_type): + + models.OpaqueModel.__init__(self, dmm, fe_type) + + +infer_global(MyClass, CallableTypeRef(MyClass)) + + +@lower_constant(CallableTypeRef) +def constant_callable_typeref(context, builder, ty, pyval): + return context.get_dummy_value() + + +# ----------------------------------------------------------------------- + + +@overload(np.exp) +def overload_np_exp(obj): + if isinstance(obj, MyDummyType): + + def imp(obj): + # Returns a constant if a MyDummyType is seen + return 0xDEADBEEF + + return imp + + +class TestLowLevelExtending(TestCase): + """ + Test the low-level two-tier extension API. + """ + + # Check with `@jit` from within the test process and also in a new test + # process so as to check the registration mechanism. + + def test_func1(self): + pyfunc = call_func1_nullary + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(), 42) + pyfunc = call_func1_unary + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(None), 42) + self.assertPreciseEqual(cfunc(18.0), 6.0) + + @TestCase.run_test_in_subprocess + def test_func1_isolated(self): + self.test_func1() + + def test_type_callable_keeps_function(self): + self.assertIs(type_func1, type_func1_) + self.assertIsNotNone(type_func1) + + @TestCase.run_test_in_subprocess + def test_cast_mydummy(self): + pyfunc = get_dummy + cfunc = njit(types.float64(),)(pyfunc) + self.assertPreciseEqual(cfunc(), 42.0) + + def test_mk_func_literal(self): + """make sure make_function is passed to typer class as a literal + """ + test_ir = compiler.run_frontend(mk_func_test_impl) + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + typingctx.refresh() + targetctx.refresh() + typing_res = type_inference_stage(typingctx, targetctx, test_ir, (), + None) + self.assertTrue( + any( + isinstance(a, types.MakeFunctionLiteral) + for a in typing_res.typemap.values() + ) + ) + + +class TestPandasLike(TestCase): + """ + Test implementing a pandas-like Index object. + Also stresses most of the high-level API. + """ + + def test_index_len(self): + i = Index(np.arange(3)) + cfunc = jit(nopython=True)(len_usecase) + self.assertPreciseEqual(cfunc(i), 3) + + def test_index_getitem(self): + i = Index(np.int32([42, 8, -5])) + cfunc = jit(nopython=True)(getitem_usecase) + self.assertPreciseEqual(cfunc(i, 1), 8) + ii = cfunc(i, slice(1, None)) + self.assertIsInstance(ii, Index) + self.assertEqual(list(ii), [8, -5]) + + def test_index_ufunc(self): + """ + Check Numpy ufunc on an Index object. + """ + i = Index(np.int32([42, 8, -5])) + cfunc = jit(nopython=True)(npyufunc_usecase) + ii = cfunc(i) + self.assertIsInstance(ii, Index) + self.assertPreciseEqual(ii._data, np.cos(np.sin(i._data))) + + def test_index_get_data(self): + # The _data attribute is exposed with make_attribute_wrapper() + i = Index(np.int32([42, 8, -5])) + cfunc = jit(nopython=True)(get_data_usecase) + data = cfunc(i) + self.assertIs(data, i._data) + + def test_index_is_monotonic(self): + # The is_monotonic_increasing attribute is exposed with + # overload_attribute() + cfunc = jit(nopython=True)(is_monotonic_usecase) + for values, expected in [ + ([8, 42, 5], False), + ([5, 8, 42], True), + ([], True), + ]: + i = Index(np.int32(values)) + got = cfunc(i) + self.assertEqual(got, expected) + + def test_series_len(self): + i = Index(np.int32([2, 4, 3])) + s = Series(np.float64([1.5, 4.0, 2.5]), i) + cfunc = jit(nopython=True)(len_usecase) + self.assertPreciseEqual(cfunc(s), 3) + + def test_series_get_index(self): + i = Index(np.int32([2, 4, 3])) + s = Series(np.float64([1.5, 4.0, 2.5]), i) + cfunc = jit(nopython=True)(get_index_usecase) + got = cfunc(s) + self.assertIsInstance(got, Index) + self.assertIs(got._data, i._data) + + def test_series_ufunc(self): + """ + Check Numpy ufunc on an Series object. + """ + i = Index(np.int32([42, 8, -5])) + s = Series(np.int64([1, 2, 3]), i) + cfunc = jit(nopython=True)(npyufunc_usecase) + ss = cfunc(s) + self.assertIsInstance(ss, Series) + self.assertIsInstance(ss._index, Index) + self.assertIs(ss._index._data, i._data) + self.assertPreciseEqual(ss._values, np.cos(np.sin(s._values))) + + def test_series_constructor(self): + i = Index(np.int32([42, 8, -5])) + d = np.float64([1.5, 4.0, 2.5]) + cfunc = jit(nopython=True)(make_series_usecase) + got = cfunc(d, i) + self.assertIsInstance(got, Series) + self.assertIsInstance(got._index, Index) + self.assertIs(got._index._data, i._data) + self.assertIs(got._values, d) + + def test_series_clip(self): + i = Index(np.int32([42, 8, -5])) + s = Series(np.float64([1.5, 4.0, 2.5]), i) + cfunc = jit(nopython=True)(clip_usecase) + ss = cfunc(s, 1.6, 3.0) + self.assertIsInstance(ss, Series) + self.assertIsInstance(ss._index, Index) + self.assertIs(ss._index._data, i._data) + self.assertPreciseEqual(ss._values, np.float64([1.6, 3.0, 2.5])) + + +class TestHighLevelExtending(TestCase): + """ + Test the high-level combined API. + """ + + def test_where(self): + """ + Test implementing a function with @overload. + """ + pyfunc = call_where + cfunc = jit(nopython=True)(pyfunc) + + def check(*args, **kwargs): + expected = np_where(*args, **kwargs) + got = cfunc(*args, **kwargs) + self.assertPreciseEqual(expected, got) + + check(x=3, cond=True, y=8) + check(True, 3, 8) + check( + np.bool_([True, False, True]), + np.int32([1, 2, 3]), + np.int32([4, 5, 5]), + ) + + # The typing error is propagated + with self.assertRaises(errors.TypingError) as raises: + cfunc(np.bool_([]), np.int32([]), np.int64([])) + self.assertIn( + "x and y should have the same dtype", str(raises.exception) + ) + + def test_len(self): + """ + Test re-implementing len() for a custom type with @overload. + """ + cfunc = jit(nopython=True)(len_usecase) + self.assertPreciseEqual(cfunc(MyDummy()), 13) + self.assertPreciseEqual(cfunc([4, 5]), 2) + + def test_print(self): + """ + Test re-implementing print() for a custom type with @overload. + """ + cfunc = jit(nopython=True)(print_usecase) + with captured_stdout(): + cfunc(MyDummy()) + self.assertEqual(sys.stdout.getvalue(), "hello!\n") + + def test_add_operator(self): + """ + Test re-implementing operator.add() for a custom type with @overload. + """ + pyfunc = call_add_operator + cfunc = jit(nopython=True)(pyfunc) + + self.assertPreciseEqual(cfunc(1, 2), 3) + self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) + + # this will call add(Number, Number) as MyDummy implicitly casts to + # Number + self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) + + def test_add_binop(self): + """ + Test re-implementing '+' for a custom type via @overload(operator.add). + """ + pyfunc = call_add_binop + cfunc = jit(nopython=True)(pyfunc) + + self.assertPreciseEqual(cfunc(1, 2), 3) + self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) + + # this will call add(Number, Number) as MyDummy implicitly casts to + # Number + self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) + + def test_iadd_operator(self): + """ + Test re-implementing operator.add() for a custom type with @overload. + """ + pyfunc = call_iadd_operator + cfunc = jit(nopython=True)(pyfunc) + + self.assertPreciseEqual(cfunc(1, 2), 3) + self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) + + # this will call add(Number, Number) as MyDummy implicitly casts to + # Number + self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) + + def test_iadd_binop(self): + """ + Test re-implementing '+' for a custom type via @overload(operator.add). + """ + pyfunc = call_iadd_binop + cfunc = jit(nopython=True)(pyfunc) + + self.assertPreciseEqual(cfunc(1, 2), 3) + self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) + + # this will call add(Number, Number) as MyDummy implicitly casts to + # Number + self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) + + def test_delitem(self): + pyfunc = call_delitem + cfunc = jit(nopython=True)(pyfunc) + obj = MyDummy() + e = None + + with captured_stdout() as out: + try: + cfunc(obj, 321) + except Exception as exc: + e = exc + + if e is not None: + raise e + self.assertEqual(out.getvalue(), "del hello! 321\n") + + def test_getitem(self): + pyfunc = call_getitem + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(MyDummy(), 321), 321 + 123) + + def test_setitem(self): + pyfunc = call_setitem + cfunc = jit(nopython=True)(pyfunc) + obj = MyDummy() + e = None + + with captured_stdout() as out: + try: + cfunc(obj, 321, 123) + except Exception as exc: + e = exc + + if e is not None: + raise e + self.assertEqual(out.getvalue(), "321 123\n") + + def test_no_cpython_wrapper(self): + """ + Test overloading whose return value cannot be represented in CPython. + """ + # Test passing Module type from a @overload implementation to ensure + # that the *no_cpython_wrapper* flag works + ok_cfunc = jit(nopython=True)(non_boxable_ok_usecase) + n = 10 + got = ok_cfunc(n) + expect = non_boxable_ok_usecase(n) + np.testing.assert_equal(expect, got) + # Verify that the Module type cannot be returned to CPython + bad_cfunc = jit(nopython=True)(non_boxable_bad_usecase) + with self.assertRaises(TypeError) as raises: + bad_cfunc() + errmsg = str(raises.exception) + expectmsg = "cannot convert native Module" + self.assertIn(expectmsg, errmsg) + + def test_typing_vs_impl_signature_mismatch_handling(self): + """ + Tests that an overload which has a differing typing and implementing + signature raises an exception. + """ + + def gen_ol(impl=None): + def myoverload(a, b, c, kw=None): + pass + + @overload(myoverload) + def _myoverload_impl(a, b, c, kw=None): + return impl + + @jit(nopython=True) + def foo(a, b, c, d): + myoverload(a, b, c, kw=d) + + return foo + + sentinel = "Typing and implementation arguments differ in" + + # kwarg value is different + def impl1(a, b, c, kw=12): + if a > 10: + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl1)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("keyword argument default values", msg) + self.assertIn('', msg) + self.assertIn('', msg) + + # kwarg name is different + def impl2(a, b, c, kwarg=None): + if a > 10: + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl2)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("keyword argument names", msg) + self.assertIn('', msg) + self.assertIn('', msg) + + # arg name is different + def impl3(z, b, c, kw=None): + if a > 10: # noqa: F821 + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl3)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("argument names", msg) + self.assertFalse("keyword" in msg) + self.assertIn('', msg) + self.assertIn('', msg) + + from .overload_usecases import impl4, impl5 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl4)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("argument names", msg) + self.assertFalse("keyword" in msg) + self.assertIn("First difference: 'z'", msg) + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl5)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("argument names", msg) + self.assertFalse("keyword" in msg) + self.assertIn('', msg) + self.assertIn('', msg) + + # too many args + def impl6(a, b, c, d, e, kw=None): + if a > 10: + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl6)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("argument names", msg) + self.assertFalse("keyword" in msg) + self.assertIn('', msg) + self.assertIn('', msg) + + # too few args + def impl7(a, b, kw=None): + if a > 10: + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl7)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("argument names", msg) + self.assertFalse("keyword" in msg) + self.assertIn('', msg) + + # too many kwargs + def impl8(a, b, c, kw=None, extra_kwarg=None): + if a > 10: + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl8)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("keyword argument names", msg) + self.assertIn('', msg) + + # too few kwargs + def impl9(a, b, c): + if a > 10: + return 1 + else: + return -1 + + with self.assertRaises(errors.TypingError) as e: + gen_ol(impl9)(1, 2, 3, 4) + msg = str(e.exception) + self.assertIn(sentinel, msg) + self.assertIn("keyword argument names", msg) + self.assertIn('', msg) + + def test_typing_vs_impl_signature_mismatch_handling_var_positional(self): + """ + Tests that an overload which has a differing typing and implementing + signature raises an exception and uses VAR_POSITIONAL (*args) in typing + """ + + def myoverload(a, kw=None): + pass + + from .overload_usecases import var_positional_impl + + overload(myoverload)(var_positional_impl) + + @jit(nopython=True) + def foo(a, b): + return myoverload(a, b, 9, kw=11) + + with self.assertRaises(errors.TypingError) as e: + foo(1, 5) + msg = str(e.exception) + self.assertIn("VAR_POSITIONAL (e.g. *args) argument kind", msg) + self.assertIn("offending argument name is '*star_args_token'", msg) + + def test_typing_vs_impl_signature_mismatch_handling_var_keyword(self): + """ + Tests that an overload which uses **kwargs (VAR_KEYWORD) + """ + + def gen_ol(impl, strict=True): + def myoverload(a, kw=None): + pass + + overload(myoverload, strict=strict)(impl) + + @jit(nopython=True) + def foo(a, b): + return myoverload(a, kw=11) + + return foo + + # **kwargs in typing + def ol1(a, **kws): + def impl(a, kw=10): + return a + + return impl + + gen_ol(ol1, False)(1, 2) # no error if strictness not enforced + with self.assertRaises(errors.TypingError) as e: + gen_ol(ol1)(1, 2) + msg = str(e.exception) + self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg) + self.assertIn("offending argument name is '**kws'", msg) + + # **kwargs in implementation + def ol2(a, kw=0): + def impl(a, **kws): + return a + + return impl + + with self.assertRaises(errors.TypingError) as e: + gen_ol(ol2)(1, 2) + msg = str(e.exception) + self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg) + self.assertIn("offending argument name is '**kws'", msg) + + def test_overload_method_kwargs(self): + # Issue #3489 + @overload_method(types.Array, "foo") + def fooimpl(arr, a_kwarg=10): + def impl(arr, a_kwarg=10): + return a_kwarg + + return impl + + @njit + def bar(A): + return A.foo(), A.foo(20), A.foo(a_kwarg=30) + + Z = np.arange(5) + + self.assertEqual(bar(Z), (10, 20, 30)) + + def test_overload_method_literal_unpack(self): + # Issue #3683 + @overload_method(types.Array, "litfoo") + def litfoo(arr, val): + # Must be an integer + if isinstance(val, types.Integer): + # Must not be literal + if not isinstance(val, types.Literal): + + def impl(arr, val): + return val + + return impl + + @njit + def bar(A): + return A.litfoo(0xCAFE) + + A = np.zeros(1) + bar(A) + self.assertEqual(bar(A), 0xCAFE) + + def test_overload_ufunc(self): + # Issue #4133. + # Use an extended type (MyDummyType) to use with a customized + # ufunc (np.exp). + @njit + def test(): + return np.exp(mydummy) + + self.assertEqual(test(), 0xDEADBEEF) + + def test_overload_method_stararg(self): + @overload_method(MyDummyType, "method_stararg") + def _ov_method_stararg(obj, val, val2, *args): + def get(obj, val, val2, *args): + return (val, val2, args) + + return get + + @njit + def foo(obj, *args): + # Test with expanding stararg + return obj.method_stararg(*args) + + obj = MyDummy() + self.assertEqual(foo(obj, 1, 2), (1, 2, ())) + self.assertEqual(foo(obj, 1, 2, 3), (1, 2, (3,))) + self.assertEqual(foo(obj, 1, 2, 3, 4), (1, 2, (3, 4))) + + @njit + def bar(obj): + # Test with explicit argument + return ( + obj.method_stararg(1, 2), + obj.method_stararg(1, 2, 3), + obj.method_stararg(1, 2, 3, 4), + ) + + self.assertEqual( + bar(obj), ((1, 2, ()), (1, 2, (3,)), (1, 2, (3, 4))), + ) + + # Check cases that put tuple type into stararg + # NOTE: the expected result has an extra tuple because of stararg. + self.assertEqual( + foo(obj, 1, 2, (3,)), (1, 2, ((3,),)), + ) + self.assertEqual( + foo(obj, 1, 2, (3, 4)), (1, 2, ((3, 4),)), + ) + self.assertEqual( + foo(obj, 1, 2, (3, (4, 5))), (1, 2, ((3, (4, 5)),)), + ) + + def test_overload_classmethod(self): + # Add classmethod to a subclass of Array + class MyArray(types.Array): + pass + + @overload_classmethod(MyArray, "array_alloc") + def ol_array_alloc(cls, nitems): + def impl(cls, nitems): + arr = np.arange(nitems) + return arr + return impl + + @njit + def foo(nitems): + return MyArray.array_alloc(nitems) + + nitems = 13 + self.assertPreciseEqual(foo(nitems), np.arange(nitems)) + + # Check that the base type doesn't get the classmethod + + @njit + def no_classmethod_in_base(nitems): + return types.Array.array_alloc(nitems) + + with self.assertRaises(errors.TypingError) as raises: + no_classmethod_in_base(nitems) + self.assertIn( + "Unknown attribute 'array_alloc' of", + str(raises.exception), + ) + + def test_overload_callable_typeref(self): + + @overload(CallableTypeRef) + def callable_type_call_ovld1(x): + if isinstance(x, types.Integer): + def impl(x): + return 42.5 + x + return impl + + @overload(CallableTypeRef) + def callable_type_call_ovld2(x): + if isinstance(x, types.UnicodeType): + def impl(x): + return '42.5' + x + + return impl + + @njit + def foo(a, b): + return MyClass(a), MyClass(b) + + args = (4, '4') + expected = (42.5 + args[0], '42.5' + args[1]) + self.assertPreciseEqual(foo(*args), expected) + + +def _assert_cache_stats(cfunc, expect_hit, expect_misses): + hit = cfunc._cache_hits[cfunc.signatures[0]] + if hit != expect_hit: + raise AssertionError("cache not used") + miss = cfunc._cache_misses[cfunc.signatures[0]] + if miss != expect_misses: + raise AssertionError("cache not used") + + +@skip_if_typeguard +class TestOverloadMethodCaching(TestCase): + # Nested multiprocessing.Pool raises AssertionError: + # "daemonic processes are not allowed to have children" + _numba_parallel_test_ = False + + def test_caching_overload_method(self): + self._cache_dir = temp_directory(self.__class__.__name__) + with override_config("CACHE_DIR", self._cache_dir): + self.run_caching_overload_method() + + def run_caching_overload_method(self): + cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase) + self.assertPreciseEqual(cfunc(MyDummy()), 13) + _assert_cache_stats(cfunc, 0, 1) + llvmir = cfunc.inspect_llvm((mydummy_type,)) + # Ensure the inner method is not a declaration + decls = [ + ln + for ln in llvmir.splitlines() + if ln.startswith("declare") and "overload_method_length" in ln + ] + self.assertEqual(len(decls), 0) + # Test in a separate process + try: + ctx = multiprocessing.get_context("spawn") + except AttributeError: + ctx = multiprocessing + q = ctx.Queue() + p = ctx.Process( + target=run_caching_overload_method, args=(q, self._cache_dir) + ) + p.start() + q.put(MyDummy()) + p.join() + # Ensure subprocess exited normally + self.assertEqual(p.exitcode, 0) + res = q.get(timeout=1) + self.assertEqual(res, 13) + + +def run_caching_overload_method(q, cache_dir): + """ + Used by TestOverloadMethodCaching.test_caching_overload_method + """ + with override_config("CACHE_DIR", cache_dir): + arg = q.get() + cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase) + res = cfunc(arg) + q.put(res) + # Check cache stat + _assert_cache_stats(cfunc, 1, 0) + + +class TestIntrinsic(TestCase): + def test_void_return(self): + """ + Verify that returning a None from codegen function is handled + automatically for void functions, otherwise raise exception. + """ + + @intrinsic + def void_func(typingctx, a): + sig = types.void(types.int32) + + def codegen(context, builder, signature, args): + pass # do nothing, return None, should be turned into + # dummy value + + return sig, codegen + + @intrinsic + def non_void_func(typingctx, a): + sig = types.int32(types.int32) + + def codegen(context, builder, signature, args): + pass # oops, should be returning a value here, raise exception + + return sig, codegen + + @jit(nopython=True) + def call_void_func(): + void_func(1) + return 0 + + @jit(nopython=True) + def call_non_void_func(): + non_void_func(1) + return 0 + + # void func should work + self.assertEqual(call_void_func(), 0) + # not void function should raise exception + with self.assertRaises(LoweringError) as e: + call_non_void_func() + self.assertIn("non-void function returns None", e.exception.msg) + + def test_ll_pointer_cast(self): + """ + Usecase test: custom reinterpret cast to turn int values to pointers + """ + from ctypes import CFUNCTYPE, POINTER, c_float, c_int + + # Use intrinsic to make a reinterpret_cast operation + def unsafe_caster(result_type): + assert isinstance(result_type, types.CPointer) + + @intrinsic + def unsafe_cast(typingctx, src): + self.assertIsInstance(typingctx, typing.Context) + if isinstance(src, types.Integer): + sig = result_type(types.uintp) + + # defines the custom code generation + def codegen(context, builder, signature, args): + [src] = args + rtype = signature.return_type + llrtype = context.get_value_type(rtype) + return builder.inttoptr(src, llrtype) + + return sig, codegen + + return unsafe_cast + + # make a nopython function to use our cast op. + # this is not usable from cpython due to the returning of a pointer. + def unsafe_get_ctypes_pointer(src): + raise NotImplementedError("not callable from python") + + @overload(unsafe_get_ctypes_pointer, strict=False) + def array_impl_unsafe_get_ctypes_pointer(arrtype): + if isinstance(arrtype, types.Array): + unsafe_cast = unsafe_caster(types.CPointer(arrtype.dtype)) + + def array_impl(arr): + return unsafe_cast(src=arr.ctypes.data) + + return array_impl + + # the ctype wrapped function for use in nopython mode + def my_c_fun_raw(ptr, n): + for i in range(n): + print(ptr[i]) + + prototype = CFUNCTYPE(None, POINTER(c_float), c_int) + my_c_fun = prototype(my_c_fun_raw) + + # Call our pointer-cast in a @jit compiled function and use + # the pointer in a ctypes function + @jit(nopython=True) + def foo(arr): + ptr = unsafe_get_ctypes_pointer(arr) + my_c_fun(ptr, arr.size) + + # Test + arr = np.arange(10, dtype=np.float32) + with captured_stdout() as buf: + foo(arr) + got = buf.getvalue().splitlines() + buf.close() + expect = list(map(str, arr)) + self.assertEqual(expect, got) + + def test_serialization(self): + """ + Test serialization of intrinsic objects + """ + # define a intrinsic + @intrinsic + def identity(context, x): + def codegen(context, builder, signature, args): + return args[0] + + sig = x(x) + return sig, codegen + + # use in a jit function + @jit(nopython=True) + def foo(x): + return identity(x) + + self.assertEqual(foo(1), 1) + + # get serialization memo + memo = _Intrinsic._memo + memo_size = len(memo) + + # pickle foo and check memo size + serialized_foo = pickle.dumps(foo) + # increases the memo size + memo_size += 1 + self.assertEqual(memo_size, len(memo)) + # unpickle + foo_rebuilt = pickle.loads(serialized_foo) + self.assertEqual(memo_size, len(memo)) + # check rebuilt foo + self.assertEqual(foo(1), foo_rebuilt(1)) + + # pickle identity directly + serialized_identity = pickle.dumps(identity) + # memo size unchanged + self.assertEqual(memo_size, len(memo)) + # unpickle + identity_rebuilt = pickle.loads(serialized_identity) + # must be the same object + self.assertIs(identity, identity_rebuilt) + # memo size unchanged + self.assertEqual(memo_size, len(memo)) + + def test_deserialization(self): + """ + Test deserialization of intrinsic + """ + + def defn(context, x): + def codegen(context, builder, signature, args): + return args[0] + + return x(x), codegen + + memo = _Intrinsic._memo + memo_size = len(memo) + # invoke _Intrinsic indirectly to avoid registration which keeps an + # internal reference inside the compiler + original = _Intrinsic("foo", defn) + self.assertIs(original._defn, defn) + pickled = pickle.dumps(original) + # by pickling, a new memo entry is created + memo_size += 1 + self.assertEqual(memo_size, len(memo)) + del original # remove original before unpickling + + # by deleting, the memo entry is NOT removed due to recent + # function queue + self.assertEqual(memo_size, len(memo)) + + # Manually force clear of _recent queue + _Intrinsic._recent.clear() + memo_size -= 1 + self.assertEqual(memo_size, len(memo)) + + rebuilt = pickle.loads(pickled) + # verify that the rebuilt object is different + self.assertIsNot(rebuilt._defn, defn) + + # the second rebuilt object is the same as the first + second = pickle.loads(pickled) + self.assertIs(rebuilt._defn, second._defn) + + def test_docstring(self): + + @intrinsic + def void_func(typingctx, a: int): + """void_func docstring""" + sig = types.void(types.int32) + + def codegen(context, builder, signature, args): + pass # do nothing, return None, should be turned into + # dummy value + + return sig, codegen + + self.assertEqual("numba.tests.test_extending", void_func.__module__) + self.assertEqual("void_func", void_func.__name__) + self.assertEqual("TestIntrinsic.test_docstring..void_func", + void_func.__qualname__) + self.assertDictEqual({'a': int}, void_func.__annotations__) + self.assertEqual("void_func docstring", void_func.__doc__) + + +class TestRegisterJitable(unittest.TestCase): + def test_no_flags(self): + @register_jitable + def foo(x, y): + return x + y + + def bar(x, y): + return foo(x, y) + + cbar = jit(nopython=True)(bar) + + expect = bar(1, 2) + got = cbar(1, 2) + self.assertEqual(expect, got) + + def test_flags_no_nrt(self): + @register_jitable(_nrt=False) + def foo(n): + return np.arange(n) + + def bar(n): + return foo(n) + + self.assertEqual(bar(3).tolist(), [0, 1, 2]) + + cbar = jit(nopython=True)(bar) + with self.assertRaises(errors.TypingError) as raises: + cbar(2) + msg = ( + "Only accept returning of array passed into the function as " + "argument" + ) + self.assertIn(msg, str(raises.exception)) + + +class TestImportCythonFunction(unittest.TestCase): + @unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed") + def test_getting_function(self): + addr = get_cython_function_address( + "scipy.special.cython_special", "j0" + ) + functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double) + _j0 = functype(addr) + j0 = jit(nopython=True)(lambda x: _j0(x)) + self.assertEqual(j0(0), 1) + + def test_missing_module(self): + with self.assertRaises(ImportError) as raises: + get_cython_function_address("fakemodule", "fakefunction") + # The quotes are not there in Python 2 + msg = "No module named '?fakemodule'?" + match = re.match(msg, str(raises.exception)) + self.assertIsNotNone(match) + + @unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed") + def test_missing_function(self): + with self.assertRaises(ValueError) as raises: + get_cython_function_address( + "scipy.special.cython_special", "foo" + ) + msg = ( + "No function 'foo' found in __pyx_capi__ of " + "'scipy.special.cython_special'" + ) + self.assertEqual(msg, str(raises.exception)) + + +@overload_method( + MyDummyType, "method_jit_option_check_nrt", jit_options={"_nrt": True} +) +def ov_method_jit_option_check_nrt(obj): + def imp(obj): + return np.arange(10) + + return imp + + +@overload_method( + MyDummyType, "method_jit_option_check_no_nrt", jit_options={"_nrt": False} +) +def ov_method_jit_option_check_no_nrt(obj): + def imp(obj): + return np.arange(10) + + return imp + + +@overload_attribute( + MyDummyType, "attr_jit_option_check_nrt", jit_options={"_nrt": True} +) +def ov_attr_jit_option_check_nrt(obj): + def imp(obj): + return np.arange(10) + + return imp + + +@overload_attribute( + MyDummyType, "attr_jit_option_check_no_nrt", jit_options={"_nrt": False} +) +def ov_attr_jit_option_check_no_nrt(obj): + def imp(obj): + return np.arange(10) + + return imp + + +class TestJitOptionsNoNRT(TestCase): + # Test overload*(jit_options={...}) by turning off _nrt + + def check_error_no_nrt(self, func, *args, **kwargs): + # Check that the compilation fails with a complaint about dynamic array + msg = ( + "Only accept returning of array passed into " + "the function as argument" + ) + with self.assertRaises(errors.TypingError) as raises: + func(*args, **kwargs) + self.assertIn(msg, str(raises.exception)) + + def no_nrt_overload_check(self, flag): + def dummy(): + return np.arange(10) + + @overload(dummy, jit_options={"_nrt": flag}) + def ov_dummy(): + def dummy(): + return np.arange(10) + + return dummy + + @njit + def foo(): + return dummy() + + if flag: + self.assertPreciseEqual(foo(), np.arange(10)) + else: + self.check_error_no_nrt(foo) + + def test_overload_no_nrt(self): + self.no_nrt_overload_check(True) + self.no_nrt_overload_check(False) + + def test_overload_method_no_nrt(self): + @njit + def udt(x): + return x.method_jit_option_check_nrt() + + self.assertPreciseEqual(udt(mydummy), np.arange(10)) + + @njit + def udt(x): + return x.method_jit_option_check_no_nrt() + + self.check_error_no_nrt(udt, mydummy) + + def test_overload_attribute_no_nrt(self): + @njit + def udt(x): + return x.attr_jit_option_check_nrt + + self.assertPreciseEqual(udt(mydummy), np.arange(10)) + + @njit + def udt(x): + return x.attr_jit_option_check_no_nrt + + self.check_error_no_nrt(udt, mydummy) + + +class TestBoxingCallingJIT(TestCase): + def setUp(self): + super().setUp() + many = base_dummy_type_factory("mydummy2") + self.DynTypeType, self.DynType, self.dyn_type_type = many + self.dyn_type = self.DynType() + + def test_unboxer_basic(self): + # Implements an unboxer on DynType that calls an intrinsic into the + # unboxer code. + magic_token = 0xCAFE + magic_offset = 123 + + @intrinsic + def my_intrinsic(typingctx, val): + # An intrinsic that returns `val + magic_offset` + def impl(context, builder, sig, args): + [val] = args + return builder.add(val, val.type(magic_offset)) + + sig = signature(val, val) + return sig, impl + + @unbox(self.DynTypeType) + def unboxer(typ, obj, c): + # The unboxer that calls some jitcode + def bridge(x): + # proof that this is a jit'ed context by calling jit only + # intrinsic + return my_intrinsic(x) + + args = [c.context.get_constant(types.intp, magic_token)] + sig = signature(types.voidptr, types.intp) + is_error, res = c.pyapi.call_jit_code(bridge, sig, args) + return NativeValue(res, is_error=is_error) + + @box(self.DynTypeType) + def boxer(typ, val, c): + # The boxer that returns an integer representation + res = c.builder.ptrtoint(val, cgutils.intp_t) + return c.pyapi.long_from_ssize_t(res) + + @njit + def passthru(x): + return x + + out = passthru(self.dyn_type) + self.assertEqual(out, magic_token + magic_offset) + + def test_unboxer_raise(self): + # Testing exception raising in jitcode called from unboxing. + @unbox(self.DynTypeType) + def unboxer(typ, obj, c): + # The unboxer that calls some jitcode + def bridge(x): + if x > 0: + raise ValueError("cannot be x > 0") + return x + + args = [c.context.get_constant(types.intp, 1)] + sig = signature(types.voidptr, types.intp) + is_error, res = c.pyapi.call_jit_code(bridge, sig, args) + return NativeValue(res, is_error=is_error) + + @box(self.DynTypeType) + def boxer(typ, val, c): + # The boxer that returns an integer representation + res = c.builder.ptrtoint(val, cgutils.intp_t) + return c.pyapi.long_from_ssize_t(res) + + @njit + def passthru(x): + return x + + with self.assertRaises(ValueError) as raises: + passthru(self.dyn_type) + self.assertIn( + "cannot be x > 0", str(raises.exception), + ) + + def test_boxer(self): + # Call jitcode inside the boxer + magic_token = 0xCAFE + magic_offset = 312 + + @intrinsic + def my_intrinsic(typingctx, val): + # An intrinsic that returns `val + magic_offset` + def impl(context, builder, sig, args): + [val] = args + return builder.add(val, val.type(magic_offset)) + + sig = signature(val, val) + return sig, impl + + @unbox(self.DynTypeType) + def unboxer(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + @box(self.DynTypeType) + def boxer(typ, val, c): + # Note: this doesn't do proper error handling + def bridge(x): + return my_intrinsic(x) + + args = [c.context.get_constant(types.intp, magic_token)] + sig = signature(types.intp, types.intp) + is_error, res = c.pyapi.call_jit_code(bridge, sig, args) + return c.pyapi.long_from_ssize_t(res) + + @njit + def passthru(x): + return x + + r = passthru(self.dyn_type) + self.assertEqual(r, magic_token + magic_offset) + + def test_boxer_raise(self): + # Call jitcode inside the boxer + @unbox(self.DynTypeType) + def unboxer(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + @box(self.DynTypeType) + def boxer(typ, val, c): + def bridge(x): + if x > 0: + raise ValueError("cannot do x > 0") + return x + + args = [c.context.get_constant(types.intp, 1)] + sig = signature(types.intp, types.intp) + is_error, res = c.pyapi.call_jit_code(bridge, sig, args) + # The error handling + retval = cgutils.alloca_once(c.builder, c.pyapi.pyobj, zfill=True) + with c.builder.if_then(c.builder.not_(is_error)): + obj = c.pyapi.long_from_ssize_t(res) + c.builder.store(obj, retval) + return c.builder.load(retval) + + @njit + def passthru(x): + return x + + with self.assertRaises(ValueError) as raises: + passthru(self.dyn_type) + self.assertIn( + "cannot do x > 0", str(raises.exception), + ) + + +def with_objmode_cache_ov_example(x): + # This is the function stub for overloading inside + # TestCachingOverloadObjmode.test_caching_overload_objmode + pass + + +@skip_if_typeguard +class TestCachingOverloadObjmode(TestCase): + """Test caching of the use of overload implementations that use + `with objmode` + """ + _numba_parallel_test_ = False + + def setUp(self): + warnings.simplefilter("error", errors.NumbaWarning) + + def tearDown(self): + warnings.resetwarnings() + + def test_caching_overload_objmode(self): + cache_dir = temp_directory(self.__class__.__name__) + with override_config("CACHE_DIR", cache_dir): + + def realwork(x): + # uses numpy code + arr = np.arange(x) / x + return np.linalg.norm(arr) + + def python_code(x): + # create indirections + return realwork(x) + + @overload(with_objmode_cache_ov_example) + def _ov_with_objmode_cache_ov_example(x): + def impl(x): + with objmode(y="float64"): + y = python_code(x) + return y + + return impl + + @njit(cache=True) + def testcase(x): + return with_objmode_cache_ov_example(x) + + expect = realwork(123) + got = testcase(123) + self.assertEqual(got, expect) + + testcase_cached = njit(cache=True)(testcase.py_func) + got = testcase_cached(123) + self.assertEqual(got, expect) + + @classmethod + def check_objmode_cache_ndarray(cls): + def do_this(a, b): + return np.sum(a + b) + + def do_something(a, b): + return np.sum(a + b) + + @overload(do_something) + def overload_do_something(a, b): + def _do_something_impl(a, b): + with objmode(y='float64'): + y = do_this(a, b) + return y + return _do_something_impl + + @njit(cache=True) + def test_caching(): + a = np.arange(20) + b = np.arange(20) + return do_something(a, b) + + got = test_caching() + expect = test_caching.py_func() + + # Check result + if got != expect: + raise AssertionError("incorrect result") + return test_caching + + @classmethod + def populate_objmode_cache_ndarray_check_cache(cls): + cls.check_objmode_cache_ndarray() + + @classmethod + def check_objmode_cache_ndarray_check_cache(cls): + disp = cls.check_objmode_cache_ndarray() + if len(disp.stats.cache_misses) != 0: + raise AssertionError('unexpected cache miss') + if len(disp.stats.cache_hits) <= 0: + raise AssertionError("unexpected missing cache hit") + + def test_check_objmode_cache_ndarray(self): + # See issue #6130. + # Env is missing after cache load. + cache_dir = temp_directory(self.__class__.__name__) + with override_config("CACHE_DIR", cache_dir): + # Run in new process to populate the cache + run_in_new_process_in_cache_dir( + self.populate_objmode_cache_ndarray_check_cache, cache_dir + ) + # Run in new process to use the cache in a fresh process. + res = run_in_new_process_in_cache_dir( + self.check_objmode_cache_ndarray_check_cache, cache_dir + ) + self.assertEqual(res['exitcode'], 0) + + +class TestMisc(TestCase): + def test_is_jitted(self): + def foo(x): + pass + + self.assertFalse(is_jitted(foo)) + self.assertTrue(is_jitted(njit(foo))) + self.assertFalse(is_jitted(vectorize(foo))) + self.assertFalse(is_jitted(vectorize(parallel=True)(foo))) + self.assertFalse( + is_jitted(guvectorize("void(float64[:])", "(m)")(foo)) + ) + + def test_overload_arg_binding(self): + # See issue #7982, checks that calling a function with named args works + # correctly irrespective of the order in which the names are supplied. + @njit + def standard_order(): + return np.full(shape=123, fill_value=456).shape + + @njit + def reversed_order(): + return np.full(fill_value=456, shape=123).shape + + self.assertPreciseEqual(standard_order(), standard_order.py_func()) + self.assertPreciseEqual(reversed_order(), reversed_order.py_func()) + + +class TestOverloadPreferLiteral(TestCase): + def test_overload(self): + def prefer_lit(x): + pass + + def non_lit(x): + pass + + def ov(x): + if isinstance(x, types.IntegerLiteral): + # With prefer_literal=False, this branch will not be reached. + if x.literal_value == 1: + def impl(x): + return 0xcafe + return impl + else: + raise errors.TypingError('literal value') + else: + def impl(x): + return x * 100 + return impl + + overload(prefer_lit, prefer_literal=True)(ov) + overload(non_lit)(ov) + + @njit + def check_prefer_lit(x): + return prefer_lit(1), prefer_lit(2), prefer_lit(x) + + a, b, c = check_prefer_lit(3) + self.assertEqual(a, 0xcafe) + self.assertEqual(b, 200) + self.assertEqual(c, 300) + + @njit + def check_non_lit(x): + return non_lit(1), non_lit(2), non_lit(x) + + a, b, c = check_non_lit(3) + self.assertEqual(a, 100) + self.assertEqual(b, 200) + self.assertEqual(c, 300) + + def test_overload_method(self): + def ov(self, x): + if isinstance(x, types.IntegerLiteral): + # With prefer_literal=False, this branch will not be reached. + if x.literal_value == 1: + def impl(self, x): + return 0xcafe + return impl + else: + raise errors.TypingError('literal value') + else: + def impl(self, x): + return x * 100 + return impl + + overload_method( + MyDummyType, "method_prefer_literal", + prefer_literal=True, + )(ov) + + overload_method( + MyDummyType, "method_non_literal", + prefer_literal=False, + )(ov) + + @njit + def check_prefer_lit(dummy, x): + return ( + dummy.method_prefer_literal(1), + dummy.method_prefer_literal(2), + dummy.method_prefer_literal(x), + ) + + a, b, c = check_prefer_lit(MyDummy(), 3) + self.assertEqual(a, 0xcafe) + self.assertEqual(b, 200) + self.assertEqual(c, 300) + + @njit + def check_non_lit(dummy, x): + return ( + dummy.method_non_literal(1), + dummy.method_non_literal(2), + dummy.method_non_literal(x), + ) + + a, b, c = check_non_lit(MyDummy(), 3) + self.assertEqual(a, 100) + self.assertEqual(b, 200) + self.assertEqual(c, 300) + + +class TestIntrinsicPreferLiteral(TestCase): + def test_intrinsic(self): + def intrin(context, x): + # This intrinsic will return 0xcafe if `x` is a literal `1`. + sig = signature(types.intp, x) + if isinstance(x, types.IntegerLiteral): + # With prefer_literal=False, this branch will not be reached + if x.literal_value == 1: + def codegen(context, builder, signature, args): + atype = signature.args[0] + llrtype = context.get_value_type(atype) + return ir.Constant(llrtype, 0xcafe) + return sig, codegen + else: + raise errors.TypingError('literal value') + else: + def codegen(context, builder, signature, args): + atype = signature.return_type + llrtype = context.get_value_type(atype) + int_100 = ir.Constant(llrtype, 100) + return builder.mul(args[0], int_100) + return sig, codegen + + prefer_lit = intrinsic(prefer_literal=True)(intrin) + non_lit = intrinsic(prefer_literal=False)(intrin) + + @njit + def check_prefer_lit(x): + return prefer_lit(1), prefer_lit(2), prefer_lit(x) + + a, b, c = check_prefer_lit(3) + self.assertEqual(a, 0xcafe) + self.assertEqual(b, 200) + self.assertEqual(c, 300) + + @njit + def check_non_lit(x): + return non_lit(1), non_lit(2), non_lit(x) + + a, b, c = check_non_lit(3) + self.assertEqual(a, 100) + self.assertEqual(b, 200) + self.assertEqual(c, 300) + + +class TestNumbaInternalOverloads(TestCase): + + def test_signatures_match_overloaded_api(self): + # This is a "best-effort" test to try and ensure that Numba's internal + # overload declarations have signatures with argument names that match + # the API they are overloading. The purpose of ensuring there is a + # match is so that users can use call-by-name for positional arguments. + + # Set this to: + # 0 to make violations raise a ValueError (default). + # 1 to get violations reported to STDOUT + # 2 to get a verbose output of everything that was checked and its state + # reported to STDOUT. + DEBUG = 0 + + # np.random.* does not have a signature exposed to `inspect`... so + # custom parse the docstrings. + def sig_from_np_random(x): + if not x.startswith('_'): + thing = getattr(np.random, x) + if inspect.isbuiltin(thing): + docstr = thing.__doc__.splitlines() + for l in docstr: + if l: + sl = l.strip() + if sl.startswith(x): # its the signature + # special case np.random.seed, it has `self` in + # the signature whereas all the other functions + # do not!? + if x == "seed": + sl = "seed(seed)" + + fake_impl = f"def {sl}:\n\tpass" + l = {} + try: + exec(fake_impl, {}, l) + except SyntaxError: + # likely elipsis, e.g. rand(d0, d1, ..., dn) + if DEBUG == 2: + print("... skipped as cannot parse " + "signature") + return None + else: + fn = l.get(x) + return inspect.signature(fn) + + def checker(func, overload_func): + if DEBUG == 2: + print(f"Checking: {func}") + + def create_message(func, overload_func, func_sig, ol_sig): + msg = [] + s = (f"{func} from module '{getattr(func, '__module__')}' " + "has mismatched sig.") + msg.append(s) + msg.append(f" - expected: {func_sig}") + msg.append(f" - got: {ol_sig}") + lineno = inspect.getsourcelines(overload_func)[1] + tmpsrcfile = inspect.getfile(overload_func) + srcfile = tmpsrcfile.replace(numba.__path__[0], '') + msg.append(f"from {srcfile}:{lineno}") + msgstr = '\n' + '\n'.join(msg) + return msgstr + + func_sig = None + try: + func_sig = inspect.signature(func) + except ValueError: + # probably a built-in/C code, see if it's a np.random function + if fname := getattr(func, '__name__', False): + if maybe_func := getattr(np.random, fname, False): + if maybe_func == func: + # it's a built-in from np.random + func_sig = sig_from_np_random(fname) + + if func_sig is not None: + ol_sig = inspect.signature(overload_func) + x = list(func_sig.parameters.keys()) + y = list(ol_sig.parameters.keys()) + for a, b in zip(x[:len(y)], y): + if a != b: + p = func_sig.parameters[a] + if p.kind == p.POSITIONAL_ONLY: + # probably a built-in/C code + if DEBUG == 2: + print("... skipped as positional only " + "arguments found") + break + elif '*' in str(p): # probably *args or similar + if DEBUG == 2: + print("... skipped as contains *args") + break + else: + # Only error/report on functions that have a module + # or are from somewhere other than Numba. + if (not func.__module__ or + not func.__module__.startswith("numba")): + msgstr = create_message(func, overload_func, + func_sig, ol_sig) + if DEBUG != 0: + if DEBUG == 2: + print("... INVALID") + if msgstr: + print(msgstr) + break + else: + raise ValueError(msgstr) + else: + if DEBUG == 2: + if not func.__module__: + print("... skipped as no __module__ " + "present") + else: + print("... skipped as Numba internal") + break + else: + if DEBUG == 2: + print("... OK") + + # Compile something to make sure that the typing context registries + # are populated with everything from the CPU target. + njit(lambda : None).compile(()) + tyctx = numba.core.typing.context.Context() + tyctx.refresh() + + # Walk the registries and check each function that is an overload + regs = tyctx._registries + for k, v in regs.items(): + for item in k.functions: + if getattr(item, '_overload_func', False): + checker(item.key, item._overload_func) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_extending_types.py b/venv/lib/python3.10/site-packages/numba/tests/test_extending_types.py new file mode 100644 index 0000000000000000000000000000000000000000..540472e37a01d799f7a7e318c63ed69be167ea54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_extending_types.py @@ -0,0 +1,170 @@ +""" +Test extending types via the numba.extending.* API. +""" +import operator + +from numba import njit, literally +from numba.core import types, cgutils +from numba.core.errors import TypingError, NumbaTypeError +from numba.core.extending import lower_builtin +from numba.core.extending import models, register_model +from numba.core.extending import make_attribute_wrapper +from numba.core.extending import type_callable +from numba.core.extending import overload +from numba.core.extending import typeof_impl + +import unittest + + +def gen_mock_float(): + # Stub to overload, pretending to be `float`. The real `float` function is + # not used as multiple registrations can collide. + def mock_float(x): + pass + return mock_float + + +class TestExtTypDummy(unittest.TestCase): + + def setUp(self): + class Dummy(object): + def __init__(self, value): + self.value = value + + class DummyType(types.Type): + def __init__(self): + super(DummyType, self).__init__(name='Dummy') + + dummy_type = DummyType() + + @register_model(DummyType) + class DummyModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('value', types.intp), + ] + models.StructModel.__init__(self, dmm, fe_type, members) + + make_attribute_wrapper(DummyType, 'value', 'value') + + @type_callable(Dummy) + def type_dummy(context): + def typer(value): + return dummy_type + return typer + + @lower_builtin(Dummy, types.intp) + def impl_dummy(context, builder, sig, args): + typ = sig.return_type + [value] = args + dummy = cgutils.create_struct_proxy(typ)(context, builder) + dummy.value = value + return dummy._getvalue() + + @typeof_impl.register(Dummy) + def typeof_dummy(val, c): + return DummyType() + + # Store attributes + self.Dummy = Dummy + self.DummyType = DummyType + + def _add_float_overload(self, mock_float_inst): + @overload(mock_float_inst) + def dummy_to_float(x): + if isinstance(x, self.DummyType): + def codegen(x): + return float(x.value) + return codegen + else: + raise NumbaTypeError('cannot type float({})'.format(x)) + + def test_overload_float(self): + mock_float = gen_mock_float() + self._add_float_overload(mock_float) + Dummy = self.Dummy + + @njit + def foo(x): + return mock_float(Dummy(x)) + + self.assertEqual(foo(123), float(123)) + + def test_overload_float_error_msg(self): + mock_float = gen_mock_float() + self._add_float_overload(mock_float) + + @njit + def foo(x): + return mock_float(x) + + with self.assertRaises(TypingError) as raises: + foo(1j) + + self.assertIn("cannot type float(complex128)", + str(raises.exception)) + + def test_unboxing(self): + """A test for the unboxing logic on unknown type + """ + Dummy = self.Dummy + + @njit + def foo(x): + # pass a dummy object into another function + bar(Dummy(x)) + + # make sure a cpython wrapper is created + @njit(no_cpython_wrapper=False) + def bar(dummy_obj): + pass + + foo(123) + with self.assertRaises(TypeError) as raises: + bar(Dummy(123)) + self.assertIn("can't unbox Dummy type", str(raises.exception)) + + def test_boxing(self): + """A test for the boxing logic on unknown type + """ + Dummy = self.Dummy + + @njit + def foo(x): + return Dummy(x) + + with self.assertRaises(TypeError) as raises: + foo(123) + self.assertIn("cannot convert native Dummy to Python object", + str(raises.exception)) + + def test_issue5565_literal_getitem(self): + # the following test is adapted from + # https://github.com/numba/numba/issues/5565 + Dummy, DummyType = self.Dummy, self.DummyType + + MAGIC_NUMBER = 12321 + + @overload(operator.getitem) + def dummy_getitem_ovld(self, idx): + if not isinstance(self, DummyType): + return None + # suppose we can only support idx as literal argument + if isinstance(idx, types.StringLiteral): + def dummy_getitem_impl(self, idx): + return MAGIC_NUMBER + return dummy_getitem_impl + + if isinstance(idx, types.UnicodeType): + def dummy_getitem_impl(self, idx): + return literally(idx) + return dummy_getitem_impl + + return None + + @njit + def test_impl(x, y): + return Dummy(x)[y] + + var = 'abc' + self.assertEqual(test_impl(1, var), MAGIC_NUMBER) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_fancy_indexing.py b/venv/lib/python3.10/site-packages/numba/tests/test_fancy_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..aad68349e8b058ab9285eec5a05893a80471d52d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_fancy_indexing.py @@ -0,0 +1,547 @@ +import collections +import itertools + +import numpy as np + +import unittest +from numba import jit, typeof, njit +from numba.core import types +from numba.core.errors import TypingError +from numba.tests.support import MemoryLeakMixin, TestCase + + +def getitem_usecase(a, b): + return a[b] + +def setitem_usecase(a, idx, b): + a[idx] = b + +def np_take(A, indices): + return np.take(A, indices) + +def np_take_kws(A, indices, axis): + return np.take(A, indices, axis=axis) + +class TestFancyIndexing(MemoryLeakMixin, TestCase): + + def generate_advanced_indices(self, N, many=True): + choices = [np.int16([0, N - 1, -2])] + if many: + choices += [np.uint16([0, 1, N - 1]), + np.bool_([0, 1, 1, 0])] + return choices + + def generate_basic_index_tuples(self, N, maxdim, many=True): + """ + Generate basic index tuples with 0 to *maxdim* items. + """ + # Note integers can be considered advanced indices in certain + # cases, so we avoid them here. + # See "Combining advanced and basic indexing" + # in http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html + if many: + choices = [slice(None, None, None), + slice(1, N - 1, None), + slice(0, None, 2), + slice(N - 1, None, -2), + slice(-N + 1, -1, None), + slice(-1, -N, -2), + ] + else: + choices = [slice(0, N - 1, None), + slice(-1, -N, -2)] + for ndim in range(maxdim + 1): + for tup in itertools.product(choices, repeat=ndim): + yield tup + + def generate_advanced_index_tuples(self, N, maxdim, many=True): + """ + Generate advanced index tuples by generating basic index tuples + and adding a single advanced index item. + """ + # (Note Numba doesn't support advanced indices with more than + # one advanced index array at the moment) + choices = list(self.generate_advanced_indices(N, many=many)) + for i in range(maxdim + 1): + for tup in self.generate_basic_index_tuples(N, maxdim - 1, many): + for adv in choices: + yield tup[:i] + (adv,) + tup[i:] + + def generate_advanced_index_tuples_with_ellipsis(self, N, maxdim, many=True): + """ + Same as generate_advanced_index_tuples(), but also insert an + ellipsis at various points. + """ + for tup in self.generate_advanced_index_tuples(N, maxdim, many): + for i in range(len(tup) + 1): + yield tup[:i] + (Ellipsis,) + tup[i:] + + def check_getitem_indices(self, arr, indices): + pyfunc = getitem_usecase + cfunc = jit(nopython=True)(pyfunc) + orig = arr.copy() + orig_base = arr.base or arr + + for index in indices: + expected = pyfunc(arr, index) + # Sanity check: if a copy wasn't made, this wasn't advanced + # but basic indexing, and shouldn't be tested here. + assert expected.base is not orig_base + got = cfunc(arr, index) + # Note Numba may not return the same array strides and + # contiguity as Numpy + self.assertEqual(got.shape, expected.shape) + self.assertEqual(got.dtype, expected.dtype) + np.testing.assert_equal(got, expected) + # Check a copy was *really* returned by Numba + if got.size: + got.fill(42) + np.testing.assert_equal(arr, orig) + + def test_getitem_tuple(self): + # Test many variations of advanced indexing with a tuple index + N = 4 + ndim = 3 + arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + indices = self.generate_advanced_index_tuples(N, ndim) + + self.check_getitem_indices(arr, indices) + + def test_getitem_tuple_and_ellipsis(self): + # Same, but also insert an ellipsis at a random point + N = 4 + ndim = 3 + arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + indices = self.generate_advanced_index_tuples_with_ellipsis(N, ndim, + many=False) + + self.check_getitem_indices(arr, indices) + + def test_ellipsis_getsetitem(self): + # See https://github.com/numba/numba/issues/3225 + @jit(nopython=True) + def foo(arr, v): + arr[..., 0] = arr[..., 1] + + arr = np.arange(2) + foo(arr, 1) + self.assertEqual(arr[0], arr[1]) + + def test_getitem_array(self): + # Test advanced indexing with a single array index + N = 4 + ndim = 3 + arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + indices = self.generate_advanced_indices(N) + self.check_getitem_indices(arr, indices) + + def check_setitem_indices(self, arr, indices): + pyfunc = setitem_usecase + cfunc = jit(nopython=True)(pyfunc) + + for index in indices: + src = arr[index] + expected = np.zeros_like(arr) + got = np.zeros_like(arr) + pyfunc(expected, index, src) + cfunc(got, index, src) + # Note Numba may not return the same array strides and + # contiguity as Numpy + self.assertEqual(got.shape, expected.shape) + self.assertEqual(got.dtype, expected.dtype) + np.testing.assert_equal(got, expected) + + def test_setitem_tuple(self): + # Test many variations of advanced indexing with a tuple index + N = 4 + ndim = 3 + arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + indices = self.generate_advanced_index_tuples(N, ndim) + self.check_setitem_indices(arr, indices) + + def test_setitem_tuple_and_ellipsis(self): + # Same, but also insert an ellipsis at a random point + N = 4 + ndim = 3 + arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + indices = self.generate_advanced_index_tuples_with_ellipsis(N, ndim, + many=False) + + self.check_setitem_indices(arr, indices) + + def test_setitem_array(self): + # Test advanced indexing with a single array index + N = 4 + ndim = 3 + arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + 10 + indices = self.generate_advanced_indices(N) + self.check_setitem_indices(arr, indices) + + def test_setitem_0d(self): + # Test setitem with a 0d-array + pyfunc = setitem_usecase + cfunc = jit(nopython=True)(pyfunc) + + inps = [ + (np.zeros(3), np.array(3.14)), + (np.zeros(2), np.array(2)), + (np.zeros(3, dtype=np.int64), np.array(3, dtype=np.int64)), + (np.zeros(3, dtype=np.float64), np.array(1, dtype=np.int64)), + (np.zeros(5, dtype=' y: + break + return result + + +def for_loop_usecase6(x, y): + result = 0 + for i in range(x): + if i > y: + continue + result += 1 + return result + + +def for_loop_usecase7(x, y): + for i in range(x): + x = 0 + for j in range(x): + return 1 + else: + pass + return 0 + + +def for_loop_usecase8(x, y): + result = 0 + for i in range(x, y, y - x + 1): + result += 1 + return result + + +def for_loop_usecase9(x, y): + z = 0 + for i in range(x): + x = 0 + for j in range(x): + if j == x / 2: + z += j + break + else: + z += y + + return z + + +def for_loop_usecase10(x, y): + for i in range(x): + if i == y: + z = y + break + else: + z = i * 2 + return z + + +def while_loop_usecase1(x, y): + result = 0 + i = 0 + while i < x: + result += i + i += 1 + return result + + +def while_loop_usecase2(x, y): + result = 0 + while result != x: + result += 1 + return result + + +def while_loop_usecase3(x, y): + result = 0 + i = 0 + j = 0 + while i < x: + while j < y: + result += i + j + i += 1 + j += 1 + return result + + +def while_loop_usecase4(x, y): + result = 0 + while True: + result += 1 + if result > x: + break + return result + + +def while_loop_usecase5(x, y): + result = 0 + while result < x: + if result > y: + result += 2 + continue + result += 1 + return result + + +def ifelse_usecase1(x, y): + if x > 0: + pass + elif y > 0: + pass + else: + pass + return True + + +def ifelse_usecase2(x, y): + if x > y: + return 1 + elif x == 0 or y == 0: + return 2 + else: + return 3 + + +def ifelse_usecase3(x, y): + if x > 0: + if y > 0: + return 1 + elif y < 0: + return 1 + else: + return 0 + elif x < 0: + return 1 + else: + return 0 + + +def ifelse_usecase4(x, y): + if x == y: + return 1 + + +def ternary_ifelse_usecase1(x, y): + return True if x > y else False + + +def double_infinite_loop(x, y): + L = x + i = y + + while True: + while True: + if i == L - 1: + break + i += 1 + i += 1 + if i >= L: + break + + return i, L + + +def try_except_usecase(): + try: + pass + except Exception: + pass + + +class TestFlowControl(TestCase): + + def run_test(self, pyfunc, x_operands, y_operands, + flags=enable_pyobj_flags): + cfunc = jit((types.intp, types.intp), **flags)(pyfunc) + for x, y in itertools.product(x_operands, y_operands): + pyerr = None + cerr = None + try: + pyres = pyfunc(x, y) + except Exception as e: + pyerr = e + + try: + cres = cfunc(x, y) + except Exception as e: + if pyerr is None: + raise + cerr = e + self.assertEqual(type(pyerr), type(cerr)) + else: + if pyerr is not None: + self.fail("Invalid for pure-python but numba works\n" + + pyerr) + self.assertEqual(pyres, cres) + + def test_for_loop1(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase1, [-10, 0, 10], [0], flags=flags) + + def test_for_loop1_npm(self): + self.test_for_loop1(flags=no_pyobj_flags) + + def test_for_loop2(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase2, [-10, 0, 10], [-10, 0, 10], + flags=flags) + + def test_for_loop2_npm(self): + self.test_for_loop2(flags=no_pyobj_flags) + + def test_for_loop3(self, flags=enable_pyobj_flags): + """ + List requires pyobject + """ + self.run_test(for_loop_usecase3, [1], [2], + flags=flags) + + def test_for_loop3_npm(self): + self.test_for_loop3(flags=no_pyobj_flags) + + def test_for_loop4(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase4, [10], [10], flags=flags) + + def test_for_loop4_npm(self): + self.test_for_loop4(flags=no_pyobj_flags) + + def test_for_loop5(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase5, [100], [50], flags=flags) + + def test_for_loop5_npm(self): + self.test_for_loop5(flags=no_pyobj_flags) + + def test_for_loop6(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase6, [100], [50], flags=flags) + + def test_for_loop6_npm(self): + self.test_for_loop6(flags=no_pyobj_flags) + + def test_for_loop7(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase7, [5], [0], flags=flags) + + def test_for_loop7_npm(self): + self.test_for_loop7(flags=no_pyobj_flags) + + def test_for_loop8(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase8, [0, 1], [0, 2, 10], flags=flags) + + def test_for_loop8_npm(self): + self.test_for_loop8(flags=no_pyobj_flags) + + def test_for_loop9(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase9, [0, 1], [0, 2, 10], flags=flags) + + def test_for_loop9_npm(self): + self.test_for_loop9(flags=no_pyobj_flags) + + def test_for_loop10(self, flags=enable_pyobj_flags): + self.run_test(for_loop_usecase10, [5], [2, 7], flags=flags) + + def test_for_loop10_npm(self): + self.test_for_loop10(flags=no_pyobj_flags) + + def test_while_loop1(self, flags=enable_pyobj_flags): + self.run_test(while_loop_usecase1, [10], [0], flags=flags) + + def test_while_loop1_npm(self): + self.test_while_loop1(flags=no_pyobj_flags) + + def test_while_loop2(self, flags=enable_pyobj_flags): + self.run_test(while_loop_usecase2, [10], [0], flags=flags) + + def test_while_loop2_npm(self): + self.test_while_loop2(flags=no_pyobj_flags) + + def test_while_loop3(self, flags=enable_pyobj_flags): + self.run_test(while_loop_usecase3, [10], [10], flags=flags) + + def test_while_loop3_npm(self): + self.test_while_loop3(flags=no_pyobj_flags) + + def test_while_loop4(self, flags=enable_pyobj_flags): + self.run_test(while_loop_usecase4, [10], [0], flags=flags) + + def test_while_loop4_npm(self): + self.test_while_loop4(flags=no_pyobj_flags) + + def test_while_loop5(self, flags=enable_pyobj_flags): + self.run_test(while_loop_usecase5, [0, 5, 10], [0, 5, 10], flags=flags) + + def test_while_loop5_npm(self): + self.test_while_loop5(flags=no_pyobj_flags) + + def test_ifelse1(self, flags=enable_pyobj_flags): + self.run_test(ifelse_usecase1, [-1, 0, 1], [-1, 0, 1], flags=flags) + + def test_ifelse1_npm(self): + self.test_ifelse1(flags=no_pyobj_flags) + + def test_ifelse2(self, flags=enable_pyobj_flags): + self.run_test(ifelse_usecase2, [-1, 0, 1], [-1, 0, 1], flags=flags) + + def test_ifelse2_npm(self): + self.test_ifelse2(flags=no_pyobj_flags) + + def test_ifelse3(self, flags=enable_pyobj_flags): + self.run_test(ifelse_usecase3, [-1, 0, 1], [-1, 0, 1], flags=flags) + + def test_ifelse3_npm(self): + self.test_ifelse3(flags=no_pyobj_flags) + + def test_ifelse4(self, flags=enable_pyobj_flags): + self.run_test(ifelse_usecase4, [-1, 0, 1], [-1, 0, 1], flags=flags) + + def test_ifelse4_npm(self): + self.test_ifelse4(flags=no_pyobj_flags) + + def test_ternary_ifelse1(self, flags=enable_pyobj_flags): + self.run_test(ternary_ifelse_usecase1, [-1, 0, 1], [-1, 0, 1], + flags=flags) + + def test_ternary_ifelse1_npm(self): + self.test_ternary_ifelse1(flags=no_pyobj_flags) + + def test_double_infinite_loop(self, flags=enable_pyobj_flags): + self.run_test(double_infinite_loop, [10], [0], + flags=flags) + + def test_double_infinite_loop_npm(self): + self.test_double_infinite_loop(flags=no_pyobj_flags) + + +class TestCFGraph(TestCase): + """ + Test the numba.controlflow.CFGraph class. + """ + + def from_adj_list(self, d, entry_point=0): + """ + Build a CFGraph class from a dict of adjacency lists. + """ + g = CFGraph() + # Need to add all nodes before adding edges + for node in d: + g.add_node(node) + for node, dests in d.items(): + for dest in dests: + g.add_edge(node, dest) + return g + + def loopless1(self): + """ + A simple CFG corresponding to the following code structure: + + c = (... if ... else ...) + ... + return b + c + """ + g = self.from_adj_list({0: [18, 12], 12: [21], 18: [21], 21: []}) + g.set_entry_point(0) + g.process() + return g + + def loopless1_dead_nodes(self): + """ + Same as loopless1(), but with added dead blocks (some of them + in a loop). + """ + g = self.from_adj_list( + {0: [18, 12], + 12: [21], + 18: [21], + 21: [], + 91: [12, 0], + 92: [91, 93], + 93: [92], + 94: [], + }) + g.set_entry_point(0) + g.process() + return g + + def loopless2(self): + """ + A loopless CFG corresponding to the following code structure: + + c = (... if ... else ...) + ... + if c: + return ... + else: + return ... + + Note there are two exit points, and the entry point has been + changed to a non-zero value. + """ + g = self.from_adj_list( + {99: [18, 12], 12: [21], 18: [21], 21: [42, 34], 34: [], 42: []}) + g.set_entry_point(99) + g.process() + return g + + def multiple_loops(self): + """ + A CFG with multiple nested loops: + + for y in b: + for x in a: + # This loop has two back edges + if b: + continue + else: + continue + for z in c: + if z: + return ... + """ + g = self.from_adj_list({0: [7], + 7: [10, 60], + 10: [13], + 13: [20], + 20: [56, 23], + 23: [32, 44], + 32: [20], + 44: [20], + 56: [57], + 57: [7], + 60: [61], + 61: [68], + 68: [87, 71], + 71: [80, 68], + 80: [], + 87: [88], + 88: []} + ) + g.set_entry_point(0) + g.process() + return g + + def multiple_exits(self): + """ + A CFG with three loop exits, one of which is also a function + exit point, and another function exit point: + + for x in a: + if a: + return b + elif b: + break + return c + """ + g = self.from_adj_list( + {0: [7], + 7: [10, 36], + 10: [19, 23], + 19: [], + 23: [29, 7], + 29: [37], + 36: [37], + 37: [] + }) + g.set_entry_point(0) + g.process() + return g + + def infinite_loop1(self): + """ + A CFG with a infinite loop and an alternate exit point: + + if c: + return + while True: + if a: + ... + else: + ... + """ + g = self.from_adj_list( + {0: [10, 6], 6: [], 10: [13], 13: [26, 19], 19: [13], 26: [13]}) + g.set_entry_point(0) + g.process() + return g + + def infinite_loop2(self): + """ + A CFG with no exit point at all: + + while True: + if a: + ... + else: + ... + """ + g = self.from_adj_list({0: [3], 3: [16, 9], 9: [3], 16: [3]}) + g.set_entry_point(0) + g.process() + return g + + def test_simple_properties(self): + g = self.loopless1() + self.assertEqual(sorted(g.successors(0)), [(12, None), (18, None)]) + self.assertEqual(sorted(g.successors(21)), []) + self.assertEqual(sorted(g.predecessors(0)), []) + self.assertEqual(sorted(g.predecessors(21)), [(12, None), (18, None)]) + + def test_exit_points(self): + g = self.loopless1() + self.assertEqual(sorted(g.exit_points()), [21]) + g = self.loopless1_dead_nodes() + self.assertEqual(sorted(g.exit_points()), [21]) + g = self.loopless2() + self.assertEqual(sorted(g.exit_points()), [34, 42]) + g = self.multiple_loops() + self.assertEqual(sorted(g.exit_points()), [80, 88]) + g = self.infinite_loop1() + self.assertEqual(sorted(g.exit_points()), [6]) + g = self.infinite_loop2() + self.assertEqual(sorted(g.exit_points()), []) + g = self.multiple_exits() + self.assertEqual(sorted(g.exit_points()), [19, 37]) + + def test_dead_nodes(self): + g = self.loopless1() + self.assertEqual(len(g.dead_nodes()), 0) + self.assertEqual(sorted(g.nodes()), + [0, 12, 18, 21]) + g = self.loopless2() + self.assertEqual(len(g.dead_nodes()), 0) + self.assertEqual(sorted(g.nodes()), + [12, 18, 21, 34, 42, 99]) + g = self.multiple_loops() + self.assertEqual(len(g.dead_nodes()), 0) + g = self.infinite_loop1() + self.assertEqual(len(g.dead_nodes()), 0) + g = self.multiple_exits() + self.assertEqual(len(g.dead_nodes()), 0) + # Only this example has dead nodes + g = self.loopless1_dead_nodes() + self.assertEqual(sorted(g.dead_nodes()), + [91, 92, 93, 94]) + self.assertEqual(sorted(g.nodes()), + [0, 12, 18, 21]) + + def test_descendents(self): + g = self.loopless2() + d = g.descendents(34) + self.assertEqual(sorted(d), []) + d = g.descendents(42) + self.assertEqual(sorted(d), []) + d = g.descendents(21) + self.assertEqual(sorted(d), [34, 42]) + d = g.descendents(99) + self.assertEqual(sorted(d), [12, 18, 21, 34, 42]) + g = self.infinite_loop1() + d = g.descendents(26) + self.assertEqual(sorted(d), []) + d = g.descendents(19) + self.assertEqual(sorted(d), []) + d = g.descendents(13) + self.assertEqual(sorted(d), [19, 26]) + d = g.descendents(10) + self.assertEqual(sorted(d), [13, 19, 26]) + d = g.descendents(6) + self.assertEqual(sorted(d), []) + d = g.descendents(0) + self.assertEqual(sorted(d), [6, 10, 13, 19, 26]) + + def test_topo_order(self): + g = self.loopless1() + self.assertIn(g.topo_order(), + ([0, 12, 18, 21], [0, 18, 12, 21])) + g = self.loopless2() + self.assertIn(g.topo_order(), + ([99, 18, 12, 21, 34, 42], [99, 12, 18, 21, 34, 42])) + g = self.infinite_loop2() + self.assertIn(g.topo_order(), + ([0, 3, 9, 16], [0, 3, 16, 9])) + g = self.infinite_loop1() + self.assertIn(g.topo_order(), + ([0, 6, 10, 13, 19, 26], [0, 6, 10, 13, 26, 19], + [0, 10, 13, 19, 26, 6], [0, 10, 13, 26, 19, 6])) + + def test_topo_sort(self): + def check_topo_sort(nodes, expected): + self.assertIn(list(g.topo_sort(nodes)), expected) + self.assertIn(list(g.topo_sort(nodes[::-1])), expected) + self.assertIn(list(g.topo_sort(nodes, reverse=True))[::-1], + expected) + self.assertIn(list(g.topo_sort(nodes[::-1], reverse=True))[::-1], + expected) + self.random.shuffle(nodes) + self.assertIn(list(g.topo_sort(nodes)), expected) + self.assertIn(list(g.topo_sort(nodes, reverse=True))[::-1], + expected) + + g = self.loopless2() + check_topo_sort([21, 99, 12, 34], ([99, 12, 21, 34],)) + # NOTE: topo_sort() is not stable + check_topo_sort([18, 12, 42, 99], + ([99, 12, 18, 42], [99, 18, 12, 42])) + g = self.multiple_exits() + check_topo_sort([19, 10, 7, 36], + ([7, 10, 19, 36], [7, 10, 36, 19], [7, 36, 10, 19])) + + def check_dominators(self, got, expected): + self.assertEqual(sorted(got), sorted(expected)) + for node in sorted(got): + self.assertEqual(sorted(got[node]), sorted(expected[node]), + "mismatch for %r" % (node,)) + + def test_dominators_loopless(self): + def eq_(d, l): + self.assertEqual(sorted(doms[d]), l) + for g in [self.loopless1(), self.loopless1_dead_nodes()]: + doms = g.dominators() + eq_(0, [0]) + eq_(12, [0, 12]) + eq_(18, [0, 18]) + eq_(21, [0, 21]) + g = self.loopless2() + doms = g.dominators() + eq_(99, [99]) + eq_(12, [12, 99]) + eq_(18, [18, 99]) + eq_(21, [21, 99]) + eq_(34, [21, 34, 99]) + eq_(42, [21, 42, 99]) + + def test_dominators_loops(self): + g = self.multiple_exits() + doms = g.dominators() + self.check_dominators(doms, + {0: [0], + 7: [0, 7], + 10: [0, 7, 10], + 19: [0, 7, 10, 19], + 23: [0, 7, 10, 23], + 29: [0, 7, 10, 23, 29], + 36: [0, 7, 36], + 37: [0, 7, 37], + }) + g = self.multiple_loops() + doms = g.dominators() + self.check_dominators(doms, + {0: [0], + 7: [0, 7], + 10: [0, 10, 7], + 13: [0, 10, 13, 7], + 20: [0, 10, 20, 13, 7], + 23: [0, 20, 23, 7, 10, 13], + 32: [32, 0, 20, 23, 7, 10, 13], + 44: [0, 20, 23, 7, 10, 44, 13], + 56: [0, 20, 7, 56, 10, 13], + 57: [0, 20, 7, 56, 57, 10, 13], + 60: [0, 60, 7], + 61: [0, 60, 61, 7], + 68: [0, 68, 60, 61, 7], + 71: [0, 68, 71, 7, 60, 61], + 80: [80, 0, 68, 71, 7, 60, 61], + 87: [0, 68, 87, 7, 60, 61], + 88: [0, 68, 87, 88, 7, 60, 61] + }) + g = self.infinite_loop1() + doms = g.dominators() + self.check_dominators(doms, + {0: [0], + 6: [0, 6], + 10: [0, 10], + 13: [0, 10, 13], + 19: [0, 10, 19, 13], + 26: [0, 10, 13, 26], + }) + + def test_post_dominators_loopless(self): + def eq_(d, l): + self.assertEqual(sorted(doms[d]), l) + for g in [self.loopless1(), self.loopless1_dead_nodes()]: + doms = g.post_dominators() + eq_(0, [0, 21]) + eq_(12, [12, 21]) + eq_(18, [18, 21]) + eq_(21, [21]) + g = self.loopless2() + doms = g.post_dominators() + eq_(34, [34]) + eq_(42, [42]) + eq_(21, [21]) + eq_(18, [18, 21]) + eq_(12, [12, 21]) + eq_(99, [21, 99]) + + def test_post_dominators_loops(self): + g = self.multiple_exits() + doms = g.post_dominators() + self.check_dominators(doms, + {0: [0, 7], + 7: [7], + 10: [10], + 19: [19], + 23: [23], + 29: [29, 37], + 36: [36, 37], + 37: [37], + }) + g = self.multiple_loops() + doms = g.post_dominators() + self.check_dominators(doms, + {0: [0, 60, 68, 61, 7], + 7: [60, 68, 61, 7], + 10: [68, 7, 10, 13, 20, 56, 57, 60, 61], + 13: [68, 7, 13, 20, 56, 57, 60, 61], + 20: [20, 68, 7, 56, 57, 60, 61], + 23: [68, 7, 20, 23, 56, 57, 60, 61], + 32: [32, 68, 7, 20, 56, 57, 60, 61], + 44: [68, 7, 44, 20, 56, 57, 60, 61], + 56: [68, 7, 56, 57, 60, 61], + 57: [57, 60, 68, 61, 7], + 60: [60, 68, 61], + 61: [68, 61], + 68: [68], + 71: [71], + 80: [80], + 87: [88, 87], + 88: [88] + }) + + def test_post_dominators_infinite_loops(self): + # Post-dominators with infinite loops need special care + # (the ordinary algorithm won't work). + g = self.infinite_loop1() + doms = g.post_dominators() + self.check_dominators(doms, + {0: [0], + 6: [6], + 10: [10, 13], + 13: [13], + 19: [19], + 26: [26], + }) + g = self.infinite_loop2() + doms = g.post_dominators() + self.check_dominators(doms, + {0: [0, 3], + 3: [3], + 9: [9], + 16: [16], + }) + + def test_dominator_tree(self): + def check(graph, expected): + domtree = graph.dominator_tree() + self.assertEqual(domtree, expected) + + check(self.loopless1(), + {0: {12, 18, 21}, 12: set(), 18: set(), 21: set()}) + check(self.loopless2(), + {12: set(), 18: set(), 21: {34, 42}, 34: set(), 42: set(), + 99: {18, 12, 21}}) + check(self.loopless1_dead_nodes(), + {0: {12, 18, 21}, 12: set(), 18: set(), 21: set()}) + check(self.multiple_loops(), + {0: {7}, 7: {10, 60}, 60: {61}, 61: {68}, 68: {71, 87}, + 87: {88}, 88: set(), 71: {80}, 80: set(), 10: {13}, + 13: {20}, 20: {56, 23}, 23: {32, 44}, 44: set(), + 32: set(), 56: {57}, 57: set()}) + check(self.multiple_exits(), + {0: {7}, 7: {10, 36, 37}, 36: set(), 10: {19, 23}, + 23: {29}, 29: set(), 37: set(), 19: set()}) + check(self.infinite_loop1(), + {0: {10, 6}, 6: set(), 10: {13}, 13: {26, 19}, 19: set(), + 26: set()}) + check(self.infinite_loop2(), + {0: {3}, 3: {16, 9}, 9: set(), 16: set()}) + + def test_immediate_dominators(self): + def check(graph, expected): + idoms = graph.immediate_dominators() + self.assertEqual(idoms, expected) + + check(self.loopless1(), + {0: 0, 12: 0, 18: 0, 21: 0}) + check(self.loopless2(), + {18: 99, 12: 99, 21: 99, 42: 21, 34: 21, 99: 99}) + check(self.loopless1_dead_nodes(), + {0: 0, 12: 0, 18: 0, 21: 0}) + check(self.multiple_loops(), + {0: 0, 7: 0, 10: 7, 13: 10, 20: 13, 23: 20, + 32: 23, 44: 23, 56: 20, 57: 56, 60: 7, 61: 60, + 68: 61, 71: 68, 80: 71, 87: 68, 88: 87}) + check(self.multiple_exits(), + {0:0, 7: 0, 10: 7, 19: 10, 23: 10, 29: 23, 36: 7, 37: 7}) + check(self.infinite_loop1(), + {0: 0, 6: 0, 10: 0, 13: 10, 19: 13, 26: 13}) + check(self.infinite_loop2(), + {0: 0, 3: 0, 9: 3, 16: 3}) + + def test_dominance_frontier(self): + def check(graph, expected): + df = graph.dominance_frontier() + self.assertEqual(df, expected) + + check(self.loopless1(), + {0: set(), 12: {21}, 18: {21}, 21: set()}) + check(self.loopless2(), + {18: {21}, 12: {21}, 21: set(), 42: set(), 34: set(), 99: set()}) + check(self.loopless1_dead_nodes(), + {0: set(), 12: {21}, 18: {21}, 21: set()}) + check(self.multiple_loops(), + {0: set(), 7: {7}, 10: {7}, 13: {7}, 20: {20, 7}, 23: {20}, + 32: {20}, 44: {20}, 56: {7}, 57: {7}, 60: set(), 61: set(), + 68: {68}, 71: {68}, 80: set(), 87: set(), 88: set()}) + check(self.multiple_exits(), + {0: set(), 7: {7}, 10: {37, 7}, 19: set(), + 23: {37, 7}, 29: {37}, 36: {37}, 37: set()}) + check(self.infinite_loop1(), + {0: set(), 6: set(), 10: set(), 13: {13}, 19: {13}, 26: {13}}) + check(self.infinite_loop2(), + {0: set(), 3: {3}, 9: {3}, 16: {3}}) + + def test_backbone_loopless(self): + for g in [self.loopless1(), self.loopless1_dead_nodes()]: + self.assertEqual(sorted(g.backbone()), [0, 21]) + g = self.loopless2() + self.assertEqual(sorted(g.backbone()), [21, 99]) + + def test_backbone_loops(self): + g = self.multiple_loops() + self.assertEqual(sorted(g.backbone()), [0, 7, 60, 61, 68]) + g = self.infinite_loop1() + self.assertEqual(sorted(g.backbone()), [0]) + g = self.infinite_loop2() + self.assertEqual(sorted(g.backbone()), [0, 3]) + + def test_loops(self): + for g in [self.loopless1(), self.loopless1_dead_nodes(), + self.loopless2()]: + self.assertEqual(len(g.loops()), 0) + + g = self.multiple_loops() + # Loop headers + self.assertEqual(sorted(g.loops()), [7, 20, 68]) + outer1 = g.loops()[7] + inner1 = g.loops()[20] + outer2 = g.loops()[68] + self.assertEqual(outer1.header, 7) + self.assertEqual(sorted(outer1.entries), [0]) + self.assertEqual(sorted(outer1.exits), [60]) + self.assertEqual(sorted(outer1.body), + [7, 10, 13, 20, 23, 32, 44, 56, 57]) + self.assertEqual(inner1.header, 20) + self.assertEqual(sorted(inner1.entries), [13]) + self.assertEqual(sorted(inner1.exits), [56]) + self.assertEqual(sorted(inner1.body), [20, 23, 32, 44]) + self.assertEqual(outer2.header, 68) + self.assertEqual(sorted(outer2.entries), [61]) + self.assertEqual(sorted(outer2.exits), [80, 87]) + self.assertEqual(sorted(outer2.body), [68, 71]) + for node in [0, 60, 61, 80, 87, 88]: + self.assertEqual(g.in_loops(node), []) + for node in [7, 10, 13, 56, 57]: + self.assertEqual(g.in_loops(node), [outer1]) + for node in [20, 23, 32, 44]: + self.assertEqual(g.in_loops(node), [inner1, outer1]) + for node in [68, 71]: + self.assertEqual(g.in_loops(node), [outer2]) + + g = self.infinite_loop1() + # Loop headers + self.assertEqual(sorted(g.loops()), [13]) + loop = g.loops()[13] + self.assertEqual(loop.header, 13) + self.assertEqual(sorted(loop.entries), [10]) + self.assertEqual(sorted(loop.exits), []) + self.assertEqual(sorted(loop.body), [13, 19, 26]) + for node in [0, 6, 10]: + self.assertEqual(g.in_loops(node), []) + for node in [13, 19, 26]: + self.assertEqual(g.in_loops(node), [loop]) + + g = self.infinite_loop2() + # Loop headers + self.assertEqual(sorted(g.loops()), [3]) + loop = g.loops()[3] + self.assertEqual(loop.header, 3) + self.assertEqual(sorted(loop.entries), [0]) + self.assertEqual(sorted(loop.exits), []) + self.assertEqual(sorted(loop.body), [3, 9, 16]) + for node in [0]: + self.assertEqual(g.in_loops(node), []) + for node in [3, 9, 16]: + self.assertEqual(g.in_loops(node), [loop]) + + g = self.multiple_exits() + # Loop headers + self.assertEqual(sorted(g.loops()), [7]) + loop = g.loops()[7] + self.assertEqual(loop.header, 7) + self.assertEqual(sorted(loop.entries), [0]) + self.assertEqual(sorted(loop.exits), [19, 29, 36]) + self.assertEqual(sorted(loop.body), [7, 10, 23]) + for node in [0, 19, 29, 36]: + self.assertEqual(g.in_loops(node), []) + for node in [7, 10, 23]: + self.assertEqual(g.in_loops(node), [loop]) + + def test_loop_dfs_pathological(self): + # The follow adjlist is an export from the reproducer in #6186 + g = self.from_adj_list({ + 0: {38, 14}, + 14: {38, 22}, + 22: {38, 30}, + 30: {42, 38}, + 38: {42}, + 42: {64, 50}, + 50: {64, 58}, + 58: {128}, + 64: {72, 86}, + 72: {80, 86}, + 80: {128}, + 86: {108, 94}, + 94: {108, 102}, + 102: {128}, + 108: {128, 116}, + 116: {128, 124}, + 124: {128}, + 128: {178, 174}, + 174: {178}, + 178: {210, 206}, + 206: {210}, + 210: {248, 252}, + 248: {252}, + 252: {282, 286}, + 282: {286}, + 286: {296, 326}, + 296: {330}, + 326: {330}, + 330: {370, 340}, + 340: {374}, + 370: {374}, + 374: {380, 382}, + 380: {382}, + 382: {818, 390}, + 390: {456, 458}, + 456: {458}, + 458: {538, 566}, + 538: {548, 566}, + 548: set(), + 566: {586, 572}, + 572: {586}, + 586: {708, 596}, + 596: {608}, + 608: {610}, + 610: {704, 620}, + 620: {666, 630}, + 630: {636, 646}, + 636: {666, 646}, + 646: {666}, + 666: {610}, + 704: {706}, + 706: {818}, + 708: {720}, + 720: {722}, + 722: {816, 732}, + 732: {778, 742}, + 742: {748, 758}, + 748: {778, 758}, + 758: {778}, + 778: {722}, + 816: {818}, + 818: set(), + }) + g.set_entry_point(0) + g.process() + stats = {} + # Compute backedges and store the iteration count for testing + back_edges = g._find_back_edges(stats=stats) + self.assertEqual(back_edges, {(666, 610), (778, 722)}) + self.assertEqual(stats['iteration_count'], 155) + + def test_equals(self): + + def get_new(): + g = self.from_adj_list({0: [18, 12], 12: [21], 18: [21], 21: []}) + g.set_entry_point(0) + g.process() + return g + + x = get_new() + y = get_new() + + # identical + self.assertEqual(x, y) + + # identical but defined in a different order + g = self.from_adj_list({0: [12, 18], 18: [21], 21: [], 12: [21]}) + g.set_entry_point(0) + g.process() + self.assertEqual(x, g) + + # different entry point + z = get_new() + z.set_entry_point(18) + z.process() + self.assertNotEqual(x, z) + + # extra node/edge, same entry point + z = self.from_adj_list({0: [18, 12], 12: [21], 18: [21], 21: [22], + 22: []}) + z.set_entry_point(0) + z.process() + self.assertNotEqual(x, z) + + # same nodes, different edges + a = self.from_adj_list({0: [18, 12], 12: [0], 18: []}) + a.set_entry_point(0) + a.process() + z = self.from_adj_list({0: [18, 12], 12: [18], 18: []}) + z.set_entry_point(0) + z.process() + self.assertNotEqual(a, z) + + +class TestRealCodeDomFront(TestCase): + """Test IDOM and DOMFRONT computation on real python bytecode. + Note: there will be less testing on IDOM (esp in loop) because of + the extra blocks inserted by the interpreter. But, testing on DOMFRONT + (which depends on IDOM) is easier. + + Testing is done by associating names to basicblock by using globals of + the pattern "SET_BLOCK_", which are scanned by + `.get_cfa_and_namedblocks` into *namedblocks* dictionary. That way, we + can check that a block of a certain name is a IDOM or DOMFRONT of another + named block. + """ + def cfa(self, bc): + cfa = ControlFlowAnalysis(bc) + cfa.run() + return cfa + + def get_cfa_and_namedblocks(self, fn): + fid = FunctionIdentity.from_function(fn) + bc = ByteCode(func_id=fid) + cfa = self.cfa(bc) + namedblocks = self._scan_namedblocks(bc, cfa) + + #### To debug, uncomment below + # print(bc.dump()) + # print("IDOMS") + # for k, v in sorted(cfa.graph.immediate_dominators().items()): + # print('{} -> {}'.format(k, v)) + # print("DOMFRONT") + # for k, vs in sorted(cfa.graph.dominance_frontier().items()): + # print('{} -> {}'.format(k, vs)) + # print(namedblocks) + # cfa.graph.render_dot().view() + + return cfa, namedblocks + + def _scan_namedblocks(self, bc, cfa): + """Scan namedblocks as denoted by a LOAD_GLOBAL bytecode referring + to global variables with the pattern "SET_BLOCK_", where "" + would be the name for the current block. + """ + namedblocks = {} + blocks = sorted([x.offset for x in cfa.iterblocks()]) + prefix = 'SET_BLOCK_' + + for inst in bc: + # Find LOAD_GLOBAL that refers to "SET_BLOCK_" + if inst.opname == 'LOAD_GLOBAL': + gv = bc.co_names[_fix_LOAD_GLOBAL_arg(inst.arg)] + if gv.startswith(prefix): + name = gv[len(prefix):] + # Find the block where this instruction resides + for s, e in zip(blocks, blocks[1:] + [blocks[-1] + 1]): + if s <= inst.offset < e: + break + else: + raise AssertionError('unreachable loop') + blkno = s + namedblocks[name] = blkno + return namedblocks + + def test_loop(self): + def foo(n): + c = 0 + SET_BLOCK_A # noqa: F821 + i = 0 + while SET_BLOCK_B0: # noqa: F821 + SET_BLOCK_B1 # noqa: F821 + c += 1 + i += 1 + SET_BLOCK_C # noqa: F821 + return c + + cfa, blkpts = self.get_cfa_and_namedblocks(foo) + + # Py3.10 turns while loop into if(...) { do {...} while(...) }. + # Also, `SET_BLOCK_B0` is duplicated. As a result, the second B0 + # is picked up by `blkpts`. + domfront = cfa.graph.dominance_frontier() + self.assertFalse(domfront[blkpts['A']]) + self.assertFalse(domfront[blkpts['C']]) + + def test_loop_nested_and_break(self): + def foo(n): + SET_BLOCK_A # noqa: F821 + while SET_BLOCK_B0: # noqa: F821 + SET_BLOCK_B1 # noqa: F821 + while SET_BLOCK_C0: # noqa: F821 + SET_BLOCK_C1 # noqa: F821 + if SET_BLOCK_D0: # noqa: F821 + SET_BLOCK_D1 # noqa: F821 + break + elif n: + SET_BLOCK_D2 # noqa: F821 + SET_BLOCK_E # noqa: F821 + SET_BLOCK_F # noqa: F821 + SET_BLOCK_G # noqa: F821 + + cfa, blkpts = self.get_cfa_and_namedblocks(foo) + self.assertEqual(blkpts['D0'], blkpts['C1']) + + # Py3.10 changes while loop into if-do-while + domfront = cfa.graph.dominance_frontier() + self.assertFalse(domfront[blkpts['A']]) + self.assertFalse(domfront[blkpts['G']]) + # 2 domfront members for C1 + # C0 because of the loop; F because of the break. + self.assertEqual({blkpts['F']}, domfront[blkpts['D1']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['D2']]) + + def test_if_else(self): + def foo(a, b): + c = 0 + SET_BLOCK_A # noqa: F821 + if a < b: + SET_BLOCK_B # noqa: F821 + c = 1 + elif SET_BLOCK_C0: # noqa: F821 + SET_BLOCK_C1 # noqa: F821 + c = 2 + else: + SET_BLOCK_D # noqa: F821 + c = 3 + + SET_BLOCK_E # noqa: F821 + if a % b == 0: + SET_BLOCK_F # noqa: F821 + c += 1 + SET_BLOCK_G # noqa: F821 + return c + + cfa, blkpts = self.get_cfa_and_namedblocks(foo) + + idoms = cfa.graph.immediate_dominators() + self.assertEqual(blkpts['A'], idoms[blkpts['B']]) + self.assertEqual(blkpts['A'], idoms[blkpts['C0']]) + self.assertEqual(blkpts['C0'], idoms[blkpts['C1']]) + self.assertEqual(blkpts['C0'], idoms[blkpts['D']]) + self.assertEqual(blkpts['A'], idoms[blkpts['E']]) + self.assertEqual(blkpts['E'], idoms[blkpts['F']]) + self.assertEqual(blkpts['E'], idoms[blkpts['G']]) + + domfront = cfa.graph.dominance_frontier() + self.assertFalse(domfront[blkpts['A']]) + self.assertFalse(domfront[blkpts['E']]) + self.assertFalse(domfront[blkpts['G']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['B']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['C0']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['C1']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['D']]) + self.assertEqual({blkpts['G']}, domfront[blkpts['F']]) + + def test_if_else_nested(self): + def foo(): + if SET_BLOCK_A0: # noqa: F821 + SET_BLOCK_A1 # noqa: F821 + if SET_BLOCK_B0: # noqa: F821 + SET_BLOCK_B1 # noqa: F821 + a = 0 + else: + if SET_BLOCK_C0: # noqa: F821 + SET_BLOCK_C1 # noqa: F821 + a = 1 + else: + SET_BLOCK_C2 # noqa: F821 + a = 2 + SET_BLOCK_D # noqa: F821 + SET_BLOCK_E # noqa: F821 + SET_BLOCK_F # noqa: F821 + return a + + cfa, blkpts = self.get_cfa_and_namedblocks(foo) + + idoms = cfa.graph.immediate_dominators() + self.assertEqual(blkpts['A0'], idoms[blkpts['A1']]) + self.assertEqual(blkpts['A1'], idoms[blkpts['B1']]) + self.assertEqual(blkpts['A1'], idoms[blkpts['C0']]) + self.assertEqual(blkpts['C0'], idoms[blkpts['D']]) + self.assertEqual(blkpts['A1'], idoms[blkpts['E']]) + self.assertEqual(blkpts['A0'], idoms[blkpts['F']]) + + domfront = cfa.graph.dominance_frontier() + self.assertFalse(domfront[blkpts['A0']]) + self.assertFalse(domfront[blkpts['F']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['B1']]) + self.assertEqual({blkpts['D']}, domfront[blkpts['C1']]) + self.assertEqual({blkpts['E']}, domfront[blkpts['D']]) + self.assertEqual({blkpts['F']}, domfront[blkpts['E']]) + + def test_infinite_loop(self): + def foo(): + SET_BLOCK_A # noqa: F821 + while True: # infinite loop + if SET_BLOCK_B: # noqa: F821 + SET_BLOCK_C # noqa: F821 + return + SET_BLOCK_D # noqa: F821 + SET_BLOCK_E # noqa: F821 + + cfa, blkpts = self.get_cfa_and_namedblocks(foo) + + idoms = cfa.graph.immediate_dominators() + # Py3.10 optimizes away the infinite loop and removes SET_BLOCK_E from + # the bytecode. + self.assertNotIn('E', blkpts) + self.assertEqual(blkpts['B'], idoms[blkpts['C']]) + self.assertEqual(blkpts['B'], idoms[blkpts['D']]) + + domfront = cfa.graph.dominance_frontier() + self.assertFalse(domfront[blkpts['A']]) + self.assertFalse(domfront[blkpts['C']]) + self.assertEqual({blkpts['B']}, domfront[blkpts['B']]) + self.assertEqual({blkpts['B']}, domfront[blkpts['D']]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_func_interface.py b/venv/lib/python3.10/site-packages/numba/tests/test_func_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..546862858c4a9a0e6febb8fcbc6da881bdf1cb7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_func_interface.py @@ -0,0 +1,43 @@ +import unittest +from numba import jit + + +class TestFuncInterface(unittest.TestCase): + def test_jit_function_docstring(self): + + def add(x, y): + '''Return sum of two numbers''' + return x + y + + c_add = jit(add) + self.assertEqual(c_add.__doc__, 'Return sum of two numbers') + + def test_jit_function_name(self): + + def add(x, y): + return x + y + + c_add = jit(add) + self.assertEqual(c_add.__name__, 'add') + + def test_jit_function_module(self): + + def add(x, y): + return x + y + + c_add = jit(add) + # Expected answer depends on how you run this test. + # Compare to python function instead. + self.assertEqual(c_add.__module__, add.__module__) + + def test_jit_function_code_object(self): + def add(x, y): + return x + y + + c_add = jit(add) + self.assertEqual(c_add.__code__, add.__code__) + self.assertEqual(c_add.func_code, add.__code__) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_func_lifetime.py b/venv/lib/python3.10/site-packages/numba/tests/test_func_lifetime.py new file mode 100644 index 0000000000000000000000000000000000000000..5624b57047bda17d17f21db4335d7691f2f0c0ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_func_lifetime.py @@ -0,0 +1,164 @@ +import gc +import weakref + +from numba import jit +from numba.core import types +from numba.tests.support import TestCase +import unittest + + +class Dummy(object): + + def __add__(self, other): + return other + 5 + + +def global_usecase1(x): + return x + 1 + +def global_usecase2(): + return global_obj + 1 + + +class TestFuncLifetime(TestCase): + """ + Test the lifetime of compiled function objects and their dependencies. + """ + + def get_impl(self, dispatcher): + """ + Get the single implementation (a C function object) of a dispatcher. + """ + self.assertEqual(len(dispatcher.overloads), 1) + cres = list(dispatcher.overloads.values())[0] + return cres.entry_point + + def check_local_func_lifetime(self, **jitargs): + def f(x): + return x + 1 + + c_f = jit('int32(int32)', **jitargs)(f) + self.assertPreciseEqual(c_f(1), 2) + + cfunc = self.get_impl(c_f) + + # Since we can't take a weakref to a C function object + # (see http://bugs.python.org/issue22116), ensure it's + # collected by taking a weakref to its __self__ instead + # (a _dynfunc._Closure object). + refs = [weakref.ref(obj) for obj in (f, c_f, cfunc.__self__)] + obj = f = c_f = cfunc = None + gc.collect() + self.assertEqual([wr() for wr in refs], [None] * len(refs)) + + def test_local_func_lifetime(self): + self.check_local_func_lifetime(forceobj=True) + + def test_local_func_lifetime_npm(self): + self.check_local_func_lifetime(nopython=True) + + def check_global_func_lifetime(self, **jitargs): + c_f = jit(**jitargs)(global_usecase1) + self.assertPreciseEqual(c_f(1), 2) + + cfunc = self.get_impl(c_f) + + wr = weakref.ref(c_f) + refs = [weakref.ref(obj) for obj in (c_f, cfunc.__self__)] + obj = c_f = cfunc = None + gc.collect() + self.assertEqual([wr() for wr in refs], [None] * len(refs)) + + def test_global_func_lifetime(self): + self.check_global_func_lifetime(forceobj=True) + + def test_global_func_lifetime_npm(self): + self.check_global_func_lifetime(nopython=True) + + def check_global_obj_lifetime(self, **jitargs): + # Since global objects can be recorded for typing purposes, + # check that they are not kept around after they are removed + # from the globals. + global global_obj + global_obj = Dummy() + + c_f = jit(**jitargs)(global_usecase2) + self.assertPreciseEqual(c_f(), 6) + + refs = [weakref.ref(obj) for obj in (c_f, global_obj)] + obj = c_f = global_obj = None + gc.collect() + self.assertEqual([wr() for wr in refs], [None] * len(refs)) + + def test_global_obj_lifetime(self): + self.check_global_obj_lifetime(forceobj=True) + + def check_inner_function_lifetime(self, **jitargs): + """ + When a jitted function calls into another jitted function, check + that everything is collected as desired. + """ + def mult_10(a): + return a * 10 + + c_mult_10 = jit('intp(intp)', **jitargs)(mult_10) + c_mult_10.disable_compile() + + def do_math(x): + return c_mult_10(x + 4) + + c_do_math = jit('intp(intp)', **jitargs)(do_math) + c_do_math.disable_compile() + + self.assertEqual(c_do_math(1), 50) + + wrs = [weakref.ref(obj) for obj in + (mult_10, c_mult_10, do_math, c_do_math, + self.get_impl(c_mult_10).__self__, + self.get_impl(c_do_math).__self__, + )] + obj = mult_10 = c_mult_10 = do_math = c_do_math = None + gc.collect() + self.assertEqual([w() for w in wrs], [None] * len(wrs)) + + def test_inner_function_lifetime(self): + self.check_inner_function_lifetime(forceobj=True) + + def test_inner_function_lifetime_npm(self): + self.check_inner_function_lifetime(nopython=True) + + +class TestLifeTimeIssue(TestCase): + def test_double_free(self): + from numba import njit + import numpy as np + + # This is the function that causes the crash + + @njit + def is_point_in_polygons(point, polygons): + num_polygons = polygons.shape[0] + if num_polygons != 0: + # An extra decref is inserted in this block + intentionally_unused_variable = polygons[0] + return 0 + + # This function creates some NRT objects for the previous function + # to corrupt. + + @njit + def dummy(): + return np.empty(10, dtype=np.int64) + + polygons = np.array([[[0, 1]]]) + points = np.array([[-1.5, 0.5]]) + a = dummy() + is_point_in_polygons(points[0], polygons) + b = dummy() + # Crash happens at second call + is_point_in_polygons(points[0], polygons) + c = dummy() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_funcdesc.py b/venv/lib/python3.10/site-packages/numba/tests/test_funcdesc.py new file mode 100644 index 0000000000000000000000000000000000000000..e5e899bfe45fb22df60c2037332c67d1fa004199 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_funcdesc.py @@ -0,0 +1,61 @@ + +import unittest + +from numba import njit +from numba.core.funcdesc import PythonFunctionDescriptor, default_mangler +from numba.core.compiler import run_frontend +from numba.core.itanium_mangler import mangle_abi_tag + + +class TestModule(unittest.TestCase): + def test_module_not_in_namespace(self): + """ Test of trying to run a compiled function + where the module from which the function is being compiled + doesn't exist in the namespace. + """ + filename = 'test.py' + name = 'mypackage' + code = """ +def f(x): + return x +""" + + objs = dict(__file__=filename, __name__=name) + compiled = compile(code, filename, 'exec') + exec(compiled, objs) + + compiled_f = njit(objs['f']) + self.assertEqual(compiled_f(3), 3) + + +class TestFuncDescMangledName(unittest.TestCase): + def test_mangling_abi_tags(self): + """ + This is a minimal test for the abi-tags support in the mangler. + """ + def udt(): + pass + + # run minimal frontend to create a function descriptor + func_ir = run_frontend(udt) + typemap = {} + restype = None + calltypes = () + mangler = default_mangler + inline = False + noalias = False + + abi_tags = ("Shrubbery", "Herring") + + fd = PythonFunctionDescriptor.from_specialized_function( + func_ir, typemap, restype, calltypes, mangler, inline, noalias, + abi_tags=abi_tags, + ) + + # mangled tag must exist in the mangled name + self.assertIn("".join([mangle_abi_tag(x) for x in abi_tags]), + fd.mangled_name) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_function_type.py b/venv/lib/python3.10/site-packages/numba/tests/test_function_type.py new file mode 100644 index 0000000000000000000000000000000000000000..dce78c2e701a643d273ab4370f16272f87b31cb2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_function_type.py @@ -0,0 +1,1408 @@ +import re +import sys +import unittest +import types as pytypes +from numba import jit, njit, cfunc, types, int64, float64, float32, errors +from numba import literal_unroll, typeof +from numba.core.config import IS_WIN32 +import ctypes +import warnings + +from .support import TestCase, MemoryLeakMixin +from .support import redirect_c_stderr, captured_stderr + +import numpy as np + + +def dump(foo): # FOR DEBUGGING, TO BE REMOVED + from numba.core import function + foo_type = function.fromobject(foo) + foo_sig = foo_type.signature() + foo.compile(foo_sig) + print('{" LLVM IR OF "+foo.__name__+" ":*^70}') + print(foo.inspect_llvm(foo_sig.args)) + print('{"":*^70}') + + +# Decorators for transforming a Python function to different kinds of +# functions: + +def mk_cfunc_func(sig): + def cfunc_func(func): + assert isinstance(func, pytypes.FunctionType), repr(func) + f = cfunc(sig)(func) + f.pyfunc = func + return f + return cfunc_func + + +def njit_func(func): + assert isinstance(func, pytypes.FunctionType), repr(func) + f = jit(nopython=True)(func) + f.pyfunc = func + return f + + +def mk_njit_with_sig_func(sig): + def njit_with_sig_func(func): + assert isinstance(func, pytypes.FunctionType), repr(func) + f = jit(sig, nopython=True)(func) + f.pyfunc = func + return f + return njit_with_sig_func + + +def mk_ctypes_func(sig): + def ctypes_func(func, sig=int64(int64)): + assert isinstance(func, pytypes.FunctionType), repr(func) + cfunc = mk_cfunc_func(sig)(func) + addr = cfunc._wrapper_address + if sig == int64(int64): + f = ctypes.CFUNCTYPE(ctypes.c_int64)(addr) + f.pyfunc = func + return f + raise NotImplementedError( + f'ctypes decorator for {func} with signature {sig}') + return ctypes_func + + +class WAP(types.WrapperAddressProtocol): + """An example implementation of wrapper address protocol. + + """ + def __init__(self, func, sig): + self.pyfunc = func + self.cfunc = cfunc(sig)(func) + self.sig = sig + + def __wrapper_address__(self): + return self.cfunc._wrapper_address + + def signature(self): + return self.sig + + def __call__(self, *args, **kwargs): + return self.pyfunc(*args, **kwargs) + + +def mk_wap_func(sig): + def wap_func(func): + return WAP(func, sig) + return wap_func + + +class TestFunctionType(TestCase): + """Test first-class functions in the context of a Numba jit compiled + function. + + """ + + def test_in__(self): + """Function is passed in as an argument. + """ + + def a(i): + return i + 1 + + def foo(f): + return 0 + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), + njit_func, + mk_njit_with_sig_func(sig), + mk_ctypes_func(sig), + mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__, jit=jit_opts): + a_ = decor(a) + self.assertEqual(jit_(foo)(a_), foo(a)) + + def test_in_call__(self): + """Function is passed in as an argument and called. + Also test different return values. + """ + + def a_i64(i): + return i + 1234567 + + def a_f64(i): + return i + 1.5 + + def a_str(i): + return "abc" + + def foo(f): + return f(123) + + for f, sig in [(a_i64, int64(int64)), (a_f64, float64(int64))]: + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), + mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest( + sig=sig, decor=decor.__name__, jit=jit_opts): + f_ = decor(f) + self.assertEqual(jit_(foo)(f_), foo(f)) + + def test_in_call_out(self): + """Function is passed in as an argument, called, and returned. + """ + + def a(i): + return i + 1 + + def foo(f): + f(123) + return f + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + r1 = jit_(foo)(a_).pyfunc + r2 = foo(a) + self.assertEqual(r1, r2) + + def test_in_seq_call(self): + """Functions are passed in as arguments, used as tuple items, and + called. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def foo(f, g): + r = 0 + for f_ in (f, g): + r = r + f_(r) + return r + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), mk_wap_func(sig), + mk_njit_with_sig_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(foo)(a_, b_), foo(a, b)) + + def test_in_ns_seq_call(self): + """Functions are passed in as an argument and via namespace scoping + (mixed pathways), used as tuple items, and called. + + """ + + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def mkfoo(b_): + def foo(f): + r = 0 + for f_ in (f, b_): + r = r + f_(r) + return r + return foo + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), + mk_njit_with_sig_func(sig), mk_wap_func(sig), + mk_ctypes_func(sig)][:-1]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(mkfoo(b_))(a_), mkfoo(b)(a)) + + def test_ns_call(self): + """Function is passed in via namespace scoping and called. + + """ + + def a(i): + return i + 1 + + def mkfoo(a_): + def foo(): + return a_(123) + return foo + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)()) + + def test_ns_out(self): + """Function is passed in via namespace scoping and returned. + + """ + def a(i): + return i + 1 + + def mkfoo(a_): + def foo(): + return a_ + return foo + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig), + mk_ctypes_func(sig)][:-1]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)()) + + def test_ns_call_out(self): + """Function is passed in via namespace scoping, called, and then + returned. + + """ + def a(i): + return i + 1 + + def mkfoo(a_): + def foo(): + a_(123) + return a_ + return foo + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig), + mk_ctypes_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + self.assertEqual(jit_(mkfoo(a_))().pyfunc, mkfoo(a)()) + + def test_in_overload(self): + """Function is passed in as an argument and called with different + argument types. + + """ + def a(i): + return i + 1 + + def foo(f): + r1 = f(123) + r2 = f(123.45) + return (r1, r2) + + for decor in [njit_func]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + self.assertEqual(jit_(foo)(a_), foo(a)) + + def test_ns_overload(self): + """Function is passed in via namespace scoping and called with + different argument types. + + """ + def a(i): + return i + 1 + + def mkfoo(a_): + def foo(): + r1 = a_(123) + r2 = a_(123.45) + return (r1, r2) + return foo + + for decor in [njit_func]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + self.assertEqual(jit_(mkfoo(a_))(), mkfoo(a)()) + + def test_in_choose(self): + """Functions are passed in as arguments and called conditionally. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def foo(a, b, choose_left): + if choose_left: + r = a(1) + else: + r = b(2) + return r + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(foo)(a_, b_, True), foo(a, b, True)) + self.assertEqual(jit_(foo)(a_, b_, False), + foo(a, b, False)) + self.assertNotEqual(jit_(foo)(a_, b_, True), + foo(a, b, False)) + + def test_ns_choose(self): + """Functions are passed in via namespace scoping and called + conditionally. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def mkfoo(a_, b_): + def foo(choose_left): + if choose_left: + r = a_(1) + else: + r = b_(2) + return r + return foo + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(mkfoo(a_, b_))(True), + mkfoo(a, b)(True)) + self.assertEqual(jit_(mkfoo(a_, b_))(False), + mkfoo(a, b)(False)) + self.assertNotEqual(jit_(mkfoo(a_, b_))(True), + mkfoo(a, b)(False)) + + def test_in_choose_out(self): + """Functions are passed in as arguments and returned conditionally. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def foo(a, b, choose_left): + if choose_left: + return a + else: + return b + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), njit_func, + mk_njit_with_sig_func(sig), mk_wap_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(foo)(a_, b_, True).pyfunc, + foo(a, b, True)) + self.assertEqual(jit_(foo)(a_, b_, False).pyfunc, + foo(a, b, False)) + self.assertNotEqual(jit_(foo)(a_, b_, True).pyfunc, + foo(a, b, False)) + + def test_in_choose_func_value(self): + """Functions are passed in as arguments, selected conditionally and + called. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def foo(a, b, choose_left): + if choose_left: + f = a + else: + f = b + return f(1) + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), mk_wap_func(sig), njit_func, + mk_njit_with_sig_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(foo)(a_, b_, True), foo(a, b, True)) + self.assertEqual(jit_(foo)(a_, b_, False), + foo(a, b, False)) + self.assertNotEqual(jit_(foo)(a_, b_, True), + foo(a, b, False)) + + def test_in_pick_func_call(self): + """Functions are passed in as items of tuple argument, retrieved via + indexing, and called. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def foo(funcs, i): + f = funcs[i] + r = f(123) + return r + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), mk_wap_func(sig), + mk_njit_with_sig_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(foo)((a_, b_), 0), foo((a, b), 0)) + self.assertEqual(jit_(foo)((a_, b_), 1), foo((a, b), 1)) + self.assertNotEqual(jit_(foo)((a_, b_), 0), foo((a, b), 1)) + + def test_in_iter_func_call(self): + """Functions are passed in as items of tuple argument, retrieved via + indexing, and called within a variable for-loop. + + """ + def a(i): + return i + 1 + + def b(i): + return i + 2 + + def foo(funcs, n): + r = 0 + for i in range(n): + f = funcs[i] + r = r + f(r) + return r + + sig = int64(int64) + + for decor in [mk_cfunc_func(sig), mk_wap_func(sig), + mk_njit_with_sig_func(sig)]: + for jit_opts in [dict(nopython=True), dict(forceobj=True)]: + jit_ = jit(**jit_opts) + with self.subTest(decor=decor.__name__): + a_ = decor(a) + b_ = decor(b) + self.assertEqual(jit_(foo)((a_, b_), 2), foo((a, b), 2)) + + def test_experimental_feature_warning(self): + @jit(nopython=True) + def more(x): + return x + 1 + + @jit(nopython=True) + def less(x): + return x - 1 + + @jit(nopython=True) + def foo(sel, x): + fn = more if sel else less + return fn(x) + + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") + res = foo(True, 10) + + self.assertEqual(res, 11) + self.assertEqual(foo(False, 10), 9) + + self.assertGreaterEqual(len(ws), 1) + pat = "First-class function type feature is experimental" + for w in ws: + if pat in str(w.message): + break + else: + self.fail("missing warning") + + +class TestFunctionTypeExtensions(TestCase): + """Test calling external library functions within Numba jit compiled + functions. + + """ + + def test_wrapper_address_protocol_libm(self): + """Call cos and sinf from standard math library. + + """ + import ctypes.util + + class LibM(types.WrapperAddressProtocol): + + def __init__(self, fname): + if IS_WIN32: + lib = ctypes.cdll.msvcrt + else: + libpath = ctypes.util.find_library('m') + lib = ctypes.cdll.LoadLibrary(libpath) + self.lib = lib + self._name = fname + if fname == 'cos': + # test for double-precision math function + addr = ctypes.cast(self.lib.cos, ctypes.c_voidp).value + signature = float64(float64) + elif fname == 'sinf': + # test for single-precision math function + # Other 32/64 bit platforms define sinf as the + # single-precision sin function + addr = ctypes.cast(self.lib.sinf, ctypes.c_voidp).value + signature = float32(float32) + else: + raise NotImplementedError( + f'wrapper address of `{fname}`' + f' with signature `{signature}`') + self._signature = signature + self._address = addr + + def __repr__(self): + return f'{type(self).__name__}({self._name!r})' + + def __wrapper_address__(self): + return self._address + + def signature(self): + return self._signature + + mycos = LibM('cos') + mysin = LibM('sinf') + + def myeval(f, x): + return f(x) + + # Not testing forceobj=True as it requires implementing + # LibM.__call__ using ctypes which would be out-of-scope here. + for jit_opts in [dict(nopython=True)]: + jit_ = jit(**jit_opts) + with self.subTest(jit=jit_opts): + if mycos.signature() is not None: + self.assertEqual(jit_(myeval)(mycos, 0.0), 1.0) + if mysin.signature() is not None: + self.assertEqual(jit_(myeval)(mysin, float32(0.0)), 0.0) + + def test_compilation_results(self): + """Turn the existing compilation results of a dispatcher instance to + first-class functions with precise types. + """ + + @jit(nopython=True) + def add_template(x, y): + return x + y + + # Trigger compilations + self.assertEqual(add_template(1, 2), 3) + self.assertEqual(add_template(1.2, 3.4), 4.6) + + cres1, cres2 = add_template.overloads.values() + + # Turn compilation results into first-class functions + iadd = types.CompileResultWAP(cres1) + fadd = types.CompileResultWAP(cres2) + + @jit(nopython=True) + def foo(add, x, y): + return add(x, y) + + @jit(forceobj=True) + def foo_obj(add, x, y): + return add(x, y) + + self.assertEqual(foo(iadd, 3, 4), 7) + self.assertEqual(foo(fadd, 3.4, 4.5), 7.9) + + self.assertEqual(foo_obj(iadd, 3, 4), 7) + self.assertEqual(foo_obj(fadd, 3.4, 4.5), 7.9) + + +class TestMiscIssues(TestCase): + """Test issues of using first-class functions in the context of Numba + jit compiled functions. + + """ + + def test_issue_3405_using_cfunc(self): + + @cfunc('int64()') + def a(): + return 2 + + @cfunc('int64()') + def b(): + return 3 + + def g(arg): + if arg: + f = a + else: + f = b + return f() + + self.assertEqual(jit(nopython=True)(g)(True), 2) + self.assertEqual(jit(nopython=True)(g)(False), 3) + + def test_issue_3405_using_njit(self): + + @jit(nopython=True) + def a(): + return 2 + + @jit(nopython=True) + def b(): + return 3 + + def g(arg): + if not arg: + f = b + else: + f = a + return f() + + self.assertEqual(jit(nopython=True)(g)(True), 2) + self.assertEqual(jit(nopython=True)(g)(False), 3) + + def test_pr4967_example(self): + + @cfunc('int64(int64)') + def a(i): + return i + 1 + + @cfunc('int64(int64)') + def b(i): + return i + 2 + + @jit(nopython=True) + def foo(f, g): + i = f(2) + seq = (f, g) + for fun in seq: + i += fun(i) + return i + + a_ = a._pyfunc + b_ = b._pyfunc + self.assertEqual(foo(a, b), + a_(2) + a_(a_(2)) + b_(a_(2) + a_(a_(2)))) + + def test_pr4967_array(self): + import numpy as np + + @cfunc("intp(intp[:], float64[:])") + def foo1(x, y): + return x[0] + y[0] + + @cfunc("intp(intp[:], float64[:])") + def foo2(x, y): + return x[0] - y[0] + + def bar(fx, fy, i): + a = np.array([10], dtype=np.intp) + b = np.array([12], dtype=np.float64) + if i == 0: + f = fx + elif i == 1: + f = fy + else: + return + return f(a, b) + + r = jit(nopython=True, no_cfunc_wrapper=True)(bar)(foo1, foo2, 0) + self.assertEqual(r, bar(foo1, foo2, 0)) + self.assertNotEqual(r, bar(foo1, foo2, 1)) + + def test_reference_example(self): + import numba + + @numba.njit + def composition(funcs, x): + r = x + for f in funcs[::-1]: + r = f(r) + return r + + @numba.cfunc("double(double)") + def a(x): + return x + 1.0 + + @numba.njit() + def b(x): + return x * x + + r = composition((a, b, b, a), 0.5) + self.assertEqual(r, (0.5 + 1.0) ** 4 + 1.0) + + r = composition((b, a, b, b, a), 0.5) + self.assertEqual(r, ((0.5 + 1.0) ** 4 + 1.0) ** 2) + + def test_apply_function_in_function(self): + + def foo(f, f_inner): + return f(f_inner) + + @cfunc('int64(float64)') + def f_inner(i): + return int64(i * 3) + + @cfunc(int64(types.FunctionType(f_inner._sig))) + def f(f_inner): + return f_inner(123.4) + + self.assertEqual(jit(nopython=True)(foo)(f, f_inner), + foo(f._pyfunc, f_inner._pyfunc)) + + def test_function_with_none_argument(self): + + @cfunc(int64(types.none)) + def a(i): + return 1 + + @jit(nopython=True) + def foo(f): + return f(None) + + self.assertEqual(foo(a), 1) + + def test_constant_functions(self): + + @jit(nopython=True) + def a(): + return 123 + + @jit(nopython=True) + def b(): + return 456 + + @jit(nopython=True) + def foo(): + return a() + b() + + r = foo() + if r != 123 + 456: + print(foo.overloads[()].library.get_llvm_str()) + self.assertEqual(r, 123 + 456) + + def test_generators(self): + + @jit(forceobj=True) + def gen(xs): + for x in xs: + x += 1 + yield x + + @jit(forceobj=True) + def con(gen_fn, xs): + return [it for it in gen_fn(xs)] + + self.assertEqual(con(gen, (1, 2, 3)), [2, 3, 4]) + + @jit(nopython=True) + def gen_(xs): + for x in xs: + x += 1 + yield x + self.assertEqual(con(gen_, (1, 2, 3)), [2, 3, 4]) + + def test_jit_support(self): + + @jit(nopython=True) + def foo(f, x): + return f(x) + + @jit() + def a(x): + return x + 1 + + @jit() + def a2(x): + return x - 1 + + @jit() + def b(x): + return x + 1.5 + + self.assertEqual(foo(a, 1), 2) + a2(5) # pre-compile + self.assertEqual(foo(a2, 2), 1) + self.assertEqual(foo(a2, 3), 2) + self.assertEqual(foo(a, 2), 3) + self.assertEqual(foo(a, 1.5), 2.5) + self.assertEqual(foo(a2, 1), 0) + self.assertEqual(foo(a, 2.5), 3.5) + self.assertEqual(foo(b, 1.5), 3.0) + self.assertEqual(foo(b, 1), 2.5) + + def test_signature_mismatch(self): + @jit(nopython=True) + def f1(x): + return x + + @jit(nopython=True) + def f2(x): + return x + + @jit(nopython=True) + def foo(disp1, disp2, sel): + if sel == 1: + fn = disp1 + else: + fn = disp2 + return fn([1]), fn(2) + + with self.assertRaises(errors.UnsupportedError) as cm: + foo(f1, f2, sel=1) + self.assertRegex( + str(cm.exception), 'mismatch of function types:') + + # this works because `sel == 1` condition is optimized away: + self.assertEqual(foo(f1, f1, sel=1), ([1], 2)) + + def test_unique_dispatcher(self): + # In general, the type of a dispatcher instance is imprecise + # and when used as an input to type-inference, the typing will + # likely fail. However, if a dispatcher instance contains + # exactly one overload and compilation is disabled for the dispatcher, + # then the type of dispatcher instance is interpreted as precise + # and is transformed to a FunctionType instance with the defined + # signature of the single overload. + + def foo_template(funcs, x): + r = x + for f in funcs: + r = f(r) + return r + + a = jit(nopython=True)(lambda x: x + 1) + b = jit(nopython=True)(lambda x: x + 2) + foo = jit(nopython=True)(foo_template) + + # compiling and disabling compilation for `a` is sufficient, + # `b` will inherit its type from the container Tuple type + a(0) + a.disable_compile() + + r = foo((a, b), 0) + self.assertEqual(r, 3) + # the Tuple type of foo's first argument is a precise FunctionType: + self.assertEqual(foo.signatures[0][0].dtype.is_precise(), True) + + def test_zero_address(self): + + sig = int64() + + @cfunc(sig) + def test(): + return 123 + + class Good(types.WrapperAddressProtocol): + """A first-class function type with valid address. + """ + + def __wrapper_address__(self): + return test.address + + def signature(self): + return sig + + class Bad(types.WrapperAddressProtocol): + """A first-class function type with invalid 0 address. + """ + + def __wrapper_address__(self): + return 0 + + def signature(self): + return sig + + class BadToGood(types.WrapperAddressProtocol): + """A first-class function type with invalid address that is + recovered to a valid address. + """ + + counter = -1 + + def __wrapper_address__(self): + self.counter += 1 + return test.address * min(1, self.counter) + + def signature(self): + return sig + + good = Good() + bad = Bad() + bad2good = BadToGood() + + @jit(int64(sig.as_type())) + def foo(func): + return func() + + @jit(int64()) + def foo_good(): + return good() + + @jit(int64()) + def foo_bad(): + return bad() + + @jit(int64()) + def foo_bad2good(): + return bad2good() + + self.assertEqual(foo(good), 123) + + self.assertEqual(foo_good(), 123) + + with self.assertRaises(ValueError) as cm: + foo(bad) + self.assertRegex( + str(cm.exception), + 'wrapper address of <.*> instance must be a positive') + + with self.assertRaises(RuntimeError) as cm: + foo_bad() + self.assertRegex( + str(cm.exception), r'.* function address is null') + + self.assertEqual(foo_bad2good(), 123) + + def test_issue_5470(self): + + @njit() + def foo1(): + return 10 + + @njit() + def foo2(): + return 20 + + formulae_foo = (foo1, foo1) + + @njit() + def bar_scalar(f1, f2): + return f1() + f2() + + @njit() + def bar(): + return bar_scalar(*formulae_foo) + + self.assertEqual(bar(), 20) + + formulae_foo = (foo1, foo2) + + @njit() + def bar(): + return bar_scalar(*formulae_foo) + + self.assertEqual(bar(), 30) + + def test_issue_5540(self): + + @njit(types.int64(types.int64)) + def foo(x): + return x + 1 + + @njit + def bar_bad(foos): + f = foos[0] + return f(x=1) + + @njit + def bar_good(foos): + f = foos[0] + return f(1) + + self.assertEqual(bar_good((foo, )), 2) + + # new/old style error handling + with self.assertRaises((errors.UnsupportedError, + errors.TypingError)) as cm: + bar_bad((foo, )) + + self.assertRegex( + str(cm.exception), + r'.*first-class function call cannot use keyword arguments') + + def test_issue_5615(self): + + @njit + def foo1(x): + return x + 1 + + @njit + def foo2(x): + return x + 2 + + @njit + def bar(fcs): + x = 0 + a = 10 + i, j = fcs[0] + x += i(j(a)) + for t in literal_unroll(fcs): + i, j = t + x += i(j(a)) + return x + + tup = ((foo1, foo2), (foo2, foo1)) + + self.assertEqual(bar(tup), 39) + + def test_issue_5685(self): + + @njit + def foo1(): + return 1 + + @njit + def foo2(x): + return x + 1 + + @njit + def foo3(x): + return x + 2 + + @njit + def bar(fcs): + r = 0 + for pair in literal_unroll(fcs): + f1, f2 = pair + r += f1() + f2(2) + return r + + self.assertEqual(bar(((foo1, foo2),)), 4) + self.assertEqual(bar(((foo1, foo2), (foo1, foo3))), 9) # reproducer + + +class TestBasicSubtyping(TestCase): + def test_basic(self): + """ + Test that a dispatcher object *with* a pre-compiled overload + can be used as input to another function with locked-down signature + """ + a = 1 + + @njit + def foo(x): + return x + 1 + + foo(a) + int_int_fc = types.FunctionType(types.int64(types.int64,)) + + @njit(types.int64(int_int_fc)) + def bar(fc): + return fc(a) + + self.assertEqual(bar(foo), foo(a)) + + def test_basic2(self): + """ + Test that a dispatcher object *without* a pre-compiled overload + can be used as input to another function with locked-down signature + """ + a = 1 + + @njit + def foo(x): + return x + 1 + + int_int_fc = types.FunctionType(types.int64(types.int64,)) + + @njit(types.int64(int_int_fc)) + def bar(fc): + return fc(a) + + self.assertEqual(bar(foo), foo(a)) + + def test_basic3(self): + """ + Test that a dispatcher object *without* a pre-compiled overload + can be used as input to another function with locked-down signature and + that it behaves as a truly generic function (foo1 does not get locked) + """ + a = 1 + + @njit + def foo1(x): + return x + 1 + + @njit + def foo2(x): + return x + 2 + + int_int_fc = types.FunctionType(types.int64(types.int64,)) + + @njit(types.int64(int_int_fc)) + def bar(fc): + return fc(a) + + self.assertEqual(bar(foo1) + 1, bar(foo2)) + + def test_basic4(self): + """ + Test that a dispatcher object can be used as input to another + function with signature as part of a tuple + """ + a = 1 + + @njit + def foo1(x): + return x + 1 + + @njit + def foo2(x): + return x + 2 + + tup = (foo1, foo2) + int_int_fc = types.FunctionType(types.int64(types.int64,)) + + @njit(types.int64(types.UniTuple(int_int_fc, 2))) + def bar(fcs): + x = 0 + for i in range(2): + x += fcs[i](a) + return x + self.assertEqual(bar(tup), foo1(a) + foo2(a)) + + def test_basic5(self): + a = 1 + + @njit + def foo1(x): + return x + 1 + + @njit + def foo2(x): + return x + 2 + + @njit + def bar1(x): + return x / 10 + + @njit + def bar2(x): + return x / 1000 + + tup = (foo1, foo2) + tup_bar = (bar1, bar2) + int_int_fc = types.FunctionType(types.int64(types.int64,)) + + flt_flt_fc = types.FunctionType(types.float64(types.float64,)) + + @njit((types.UniTuple(int_int_fc, 2), types.UniTuple(flt_flt_fc, 2))) + def bar(fcs, ffs): + x = 0 + for i in range(2): + x += fcs[i](a) + for fn in ffs: + x += fn(a) + return x + + got = bar(tup, tup_bar) + expected = foo1(a) + foo2(a) + bar1(a) + bar2(a) + self.assertEqual(got, expected) + + +class TestMultiFunctionType(MemoryLeakMixin, TestCase): + + def test_base(self): + # The test is adapted from https://github.com/numba/numba/issues/9071 + nb_array = typeof(np.ones(2)) + callee_int_type = types.FunctionType(int64(int64)) + sig_int = int64(callee_int_type, int64) + callee_array_type = types.FunctionType(float64(nb_array)) + sig_array = float64(callee_array_type, nb_array) + + @njit([sig_int, sig_array]) + def caller(callee, a): + return callee(a) + + @njit + def callee_int(b): + return b + + @njit + def callee_array(c): + return c.sum() + + b = 1 + c = np.ones(2) + + self.assertEqual(caller(callee_int, b), b) + self.assertEqual(caller(callee_array, c), c.sum()) + + +class TestInliningFunctionType(MemoryLeakMixin, TestCase): + def count_num_bb_in_cfg(self, dispatcher): + dot = dispatcher.inspect_cfg(dispatcher.signatures[0]).dot + num_of_nodes = re.findall(r"Node0x[0-9a-z]+", dot) + return len(num_of_nodes) + + def test_inlining_global_dispatcher(self): + @njit + def add(x, y): + return x + y + + fnty = types.FunctionType(int64(int64, int64)) + + @njit(int64(fnty, int64, int64)) + def callme(fn, x, y): + c = 0 + for i in range(100): + c += fn(x, y) + return c + + @njit + def bar(x, y): + return callme(add, x, y) + + res = bar(123, 321) + self.assertEqual(100 * (123 + 321), res) + # There's only one BB because LLVM will be able to fully optimize + # the reduce-add loop if add() is properly inlined. + self.assertEqual(self.count_num_bb_in_cfg(bar), 1) + + def test_not_inlining_dispatcher_args(self): + @njit + def add(x, y): + return x + y + + fnty = types.FunctionType(int64(int64, int64)) + + @njit(int64(fnty, int64, int64)) + def callme(fn, x, y): + c = 0 + for i in range(100): + c += fn(x, y) + return c + + res = callme(add, 123, 321) + + self.assertEqual(100 * (123 + 321), res) + # Since add() is not inline-able. The number of BB will be greater + # than 1. See test_inlining_global_dispatcher(). + self.assertGreater(self.count_num_bb_in_cfg(callme), 1) + + +class TestExceptionInFunctionType(MemoryLeakMixin, TestCase): + def test_exception_raising(self): + class MyError(Exception): + pass + + @njit + def add(x, y): + res = x + y + if res > 100: + raise MyError(res) + return res + + fnty = types.FunctionType(int64(int64, int64)) + + @njit(int64(fnty)) + def callme(fn): + c = 0 + for i in range(100): + c = fn(c, i) + return c + + @njit + def bar(): + return callme(add) + + # Pass Dispatcher as a global reference + with self.assertRaises(MyError) as exc: + bar() + self.assertEqual(exc.exception.args, (105,)) + + # Pass Dispatcher by argument + with self.assertRaises(MyError) as exc: + callme(add) + self.assertEqual(exc.exception.args, (105,)) + + def test_exception_ignored_in_cfunc(self): + class MyError(Exception): + pass + + @njit + def add(x, y): + res = x + y + if res > 100: + raise MyError(res) + return res + + fnty = types.FunctionType(int64(int64, int64)) + + @njit(int64(fnty, int64, int64)) + def callme(fn, x, y): + return fn(x, y) + + # Cfunc as argument will ignore raised exception + @cfunc(int64(int64, int64)) + def c_add(x, y): + return add(x, y) + + self.assertEqual(callme(c_add, 12, 32), 44) + + # If unittest is buffering (-b), the message goes to Python level stderr + # otherwise, it goes to C stderr. + with redirect_c_stderr() as c_stderr, captured_stderr() as stderr: + # raise ignored and result is garbage + callme(c_add, 100, 1) + sys.stderr.flush() + + err = c_stderr.read() + if not err: + err = stderr.getvalue() + + self.assertIn("Exception ignored in:", err) + self.assertIn(str(MyError(101)), err) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_gdb_bindings.py b/venv/lib/python3.10/site-packages/numba/tests/test_gdb_bindings.py new file mode 100644 index 0000000000000000000000000000000000000000..16d6afc4a301d08c20b129069892f8ff71cafba3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_gdb_bindings.py @@ -0,0 +1,277 @@ +""" +Tests gdb bindings +""" +import os +import platform +import re +import subprocess +import sys +import threading +from itertools import permutations + +from numba import njit, gdb, gdb_init, gdb_breakpoint, prange +from numba.core import errors +from numba import jit + +from numba.tests.support import (TestCase, captured_stdout, tag, + skip_parfors_unsupported) +from numba.tests.gdb_support import needs_gdb +import unittest + + +_platform = sys.platform + +_unix_like = (_platform.startswith('linux') + or _platform.startswith('darwin') + or ('bsd' in _platform)) + +unix_only = unittest.skipUnless(_unix_like, "unix-like OS is required") +not_unix = unittest.skipIf(_unix_like, "non unix-like OS is required") + +_arch_name = platform.machine() +_is_arm = _arch_name in {'aarch64', 'armv7l'} +not_arm = unittest.skipIf(_is_arm, "testing disabled on ARM") + +_gdb_cond = os.environ.get('GDB_TEST', None) == '1' +needs_gdb_harness = unittest.skipUnless(_gdb_cond, "needs gdb harness") + +long_running = tag('long_running') + +_dbg_njit = njit(debug=True) +_dbg_jit = jit(forceobj=True, debug=True) + + +def impl_gdb_call(a): + gdb('-ex', 'set confirm off', '-ex', 'c', '-ex', 'q') + b = a + 1 + c = a * 2.34 + d = (a, b, c) + print(a, b, c, d) + + +def impl_gdb_call_w_bp(a): + gdb_init('-ex', 'set confirm off', '-ex', 'c', '-ex', 'q') + b = a + 1 + c = a * 2.34 + d = (a, b, c) + gdb_breakpoint() + print(a, b, c, d) + + +def impl_gdb_split_init_and_break_w_parallel(a): + gdb_init('-ex', 'set confirm off', '-ex', 'c', '-ex', 'q') + a += 3 + for i in prange(4): + b = a + 1 + c = a * 2.34 + d = (a, b, c) + gdb_breakpoint() + print(a, b, c, d) + + +@not_arm +@unix_only +class TestGdbBindImpls(TestCase): + """ + Contains unit test implementations for gdb binding testing. Test must be + decorated with `@needs_gdb_harness` to prevent their running under normal + test conditions, the test methods must also end with `_impl` to be + considered for execution. The tests themselves are invoked by the + `TestGdbBinding` test class through the parsing of this class for test + methods and then running the discovered tests in a separate process. Test + names not including the word `quick` will be tagged as @tag('long_running') + """ + + @needs_gdb_harness + def test_gdb_cmd_lang_cpython_quick_impl(self): + with captured_stdout(): + impl_gdb_call(10) + + @needs_gdb_harness + def test_gdb_cmd_lang_nopython_quick_impl(self): + with captured_stdout(): + _dbg_njit(impl_gdb_call)(10) + + @needs_gdb_harness + def test_gdb_cmd_lang_objmode_quick_impl(self): + with captured_stdout(): + _dbg_jit(impl_gdb_call)(10) + + @needs_gdb_harness + def test_gdb_split_init_and_break_cpython_impl(self): + with captured_stdout(): + impl_gdb_call_w_bp(10) + + @needs_gdb_harness + def test_gdb_split_init_and_break_nopython_impl(self): + with captured_stdout(): + _dbg_njit(impl_gdb_call_w_bp)(10) + + @needs_gdb_harness + def test_gdb_split_init_and_break_objmode_impl(self): + with captured_stdout(): + _dbg_jit(impl_gdb_call_w_bp)(10) + + @skip_parfors_unsupported + @needs_gdb_harness + def test_gdb_split_init_and_break_w_parallel_cpython_impl(self): + with captured_stdout(): + impl_gdb_split_init_and_break_w_parallel(10) + + @skip_parfors_unsupported + @needs_gdb_harness + def test_gdb_split_init_and_break_w_parallel_nopython_impl(self): + with captured_stdout(): + _dbg_njit(impl_gdb_split_init_and_break_w_parallel)(10) + + @skip_parfors_unsupported + @needs_gdb_harness + def test_gdb_split_init_and_break_w_parallel_objmode_impl(self): + with captured_stdout(): + _dbg_jit(impl_gdb_split_init_and_break_w_parallel)(10) + + +@not_arm +@unix_only +@needs_gdb +class TestGdbBinding(TestCase): + """ + This test class is used to generate tests which will run the test cases + defined in TestGdbBindImpls in isolated subprocesses, this is for safety + in case something goes awry. + """ + + # test mutates env + _numba_parallel_test_ = False + + _DEBUG = True + + def run_cmd(self, cmdline, env, kill_is_ok=False): + popen = subprocess.Popen(cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + shell=True) + # finish in 20s or kill it, there's no work being done + + def kill(): + popen.stdout.flush() + popen.stderr.flush() + popen.kill() + timeout = threading.Timer(20., kill) + try: + timeout.start() + out, err = popen.communicate() + retcode = popen.returncode + if retcode != 0: + raise AssertionError( + "process failed with code %s: " + "stderr follows\n%s\n" + "stdout :%s" % (retcode, err.decode(), out.decode())) + return out.decode(), err.decode() + finally: + timeout.cancel() + return None, None + + def run_test_in_separate_process(self, test, **kwargs): + env_copy = os.environ.copy() + env_copy['NUMBA_OPT'] = '1' + # Set GDB_TEST to permit the execution of tests decorated with + # @needs_gdb_harness + env_copy['GDB_TEST'] = '1' + cmdline = [sys.executable, "-m", "numba.runtests", test] + return self.run_cmd(' '.join(cmdline), env_copy, **kwargs) + + @classmethod + def _inject(cls, name): + themod = TestGdbBindImpls.__module__ + thecls = TestGdbBindImpls.__name__ + # strip impl + assert name.endswith('_impl') + methname = name.replace('_impl', '') + injected_method = '%s.%s.%s' % (themod, thecls, name) + + def test_template(self): + o, e = self.run_test_in_separate_process(injected_method) + dbgmsg = f'\nSTDOUT={o}\nSTDERR={e}\n' + # If the test was skipped in the subprocess, then mark this as a + # skipped test. + m = re.search(r"\.\.\. skipped '(.*?)'", e) + if m is not None: + self.skipTest(m.group(1)) + self.assertIn('GNU gdb', o, msg=dbgmsg) + self.assertIn('OK', e, msg=dbgmsg) + self.assertNotIn('FAIL', e, msg=dbgmsg) + self.assertNotIn('ERROR', e, msg=dbgmsg) + if 'quick' in name: + setattr(cls, methname, test_template) + else: + setattr(cls, methname, long_running(test_template)) + + @classmethod + def generate(cls): + for name in dir(TestGdbBindImpls): + if name.startswith('test_gdb'): + cls._inject(name) + + +TestGdbBinding.generate() + + +@not_arm +@unix_only +@needs_gdb +class TestGdbMisc(TestCase): + + @long_running + def test_call_gdb_twice(self): + def gen(f1, f2): + @njit + def impl(): + a = 1 + f1() + b = 2 + f2() + return a + b + return impl + + msg_head = "Calling either numba.gdb() or numba.gdb_init() more than" + + def check(func): + with self.assertRaises(errors.UnsupportedError) as raises: + func() + self.assertIn(msg_head, str(raises.exception)) + + for g1, g2 in permutations([gdb, gdb_init]): + func = gen(g1, g2) + check(func) + + @njit + def use_globals(): + a = 1 + gdb() + b = 2 + gdb_init() + return a + b + + check(use_globals) + + +@not_unix +class TestGdbExceptions(TestCase): + + def test_call_gdb(self): + def nop_compiler(x): + return x + for compiler in [nop_compiler, jit(forceobj=True), njit]: + for meth in [gdb, gdb_init]: + def python_func(): + meth() + with self.assertRaises(errors.TypingError) as raises: + compiler(python_func)() + msg = "gdb support is only available on unix-like systems" + self.assertIn(msg, str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_gdb_dwarf.py b/venv/lib/python3.10/site-packages/numba/tests/test_gdb_dwarf.py new file mode 100644 index 0000000000000000000000000000000000000000..07aecbf40e368db8b2df96128d19955337300cc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_gdb_dwarf.py @@ -0,0 +1,261 @@ +"""Tests for gdb interacting with the DWARF numba generates""" +from numba.tests.support import TestCase, linux_only +from numba.tests.gdb_support import needs_gdb, skip_unless_pexpect, GdbMIDriver +from unittest.mock import patch, Mock +from numba.core import datamodel +import numpy as np +from numba import typeof +import ctypes as ct +import unittest + + +@linux_only +@needs_gdb +@skip_unless_pexpect +class TestGDBDwarf(TestCase): + # This runs the tests in numba.tests.gdb, each submodule must contain one + # test class called "Test" and it must contain one test called "test". + # Variation is provided by the module name. The reason this convention exits + # is because gdb tests tend to be line number sensitive (breakpoints etc + # care about this) and doing this prevents constant churn and permits the + # reuse of the existing subprocess_test_runner harness. + _NUMBA_OPT_0_ENV = {'NUMBA_OPT': '0'} + + def _gdb_has_numpy(self): + """Returns True if gdb has NumPy support, False otherwise""" + driver = GdbMIDriver(__file__, debug=False,) + has_numpy = driver.supports_numpy() + driver.quit() + return has_numpy + + def _subprocess_test_runner(self, test_mod): + themod = f'numba.tests.gdb.{test_mod}' + self.subprocess_test_runner(test_module=themod, + test_class='Test', + test_name='test', + envvars=self._NUMBA_OPT_0_ENV) + + def test_basic(self): + self._subprocess_test_runner('test_basic') + + def test_array_arg(self): + self._subprocess_test_runner('test_array_arg') + + def test_conditional_breakpoint(self): + self._subprocess_test_runner('test_conditional_breakpoint') + + def test_break_on_symbol(self): + self._subprocess_test_runner('test_break_on_symbol') + + def test_break_on_symbol_version(self): + self._subprocess_test_runner('test_break_on_symbol_version') + + def test_pretty_print(self): + if not self._gdb_has_numpy(): + _msg = "Cannot find gdb with NumPy support" + self.skipTest(_msg) + + self._subprocess_test_runner('test_pretty_print') + + +class TestGDBPrettyPrinterLogic(TestCase): + # Tests the logic in numba.misc.gdb_print_extension.NumbaArrayPrinter + # it's quite involved and susceptible to changes to the string + # representation of Numba array and dtypes as it parses these + # representations and recreates NumPy array/dtypes based on them! + + def setUp(self): + # Patch sys.modules with mock gdb modules such that the + # numba.misc.gdb_print_extension can import ok, the rest of the gdb + # classes etc are implemented later + + mock_modules = {'gdb': Mock(), + 'gdb.printing': Mock()} + self.patched_sys = patch.dict('sys.modules', mock_modules) + self.patched_sys.start() + + # Now sys.modules has a gdb in it, patch the gdb.selected_inferior. + # This function should return a process wrapping object that has a + # read_memory method that can read a memory region from a given address + # in the process' address space. + + import gdb + + class SelectedInferior(): + + def read_memory(self, data, extent): + buf = (ct.c_char * extent).from_address(data) + return buf.raw # this is bytes + + si = SelectedInferior() + gdb.configure_mock(**{'selected_inferior': lambda :si}) + + def tearDown(self): + # drop the sys.modules patch + self.patched_sys.stop() + + def get_gdb_repr(self, array): + # Returns the gdb repr of an array as reconstructed via the + # gdb_print_extension (should be the same as NumPy!). + + # This is the module being tested, it uses gdb and gdb.printing, both + # of which are mocked in self.setUp() + from numba.misc import gdb_print_extension + + # The following classes are ducks for the gdb classes (which are not + # easily/guaranteed importable from the test suite). They implement the + # absolute bare minimum necessary to test the gdb_print_extension. + + class DISubrange(): + def __init__(self, lo, hi): + self._lo = lo + self._hi = hi + + @property + def type(self): + return self + + def range(self): + return self._lo, self._hi + + class DW_TAG_array_type(): + def __init__(self, lo, hi): + self._lo, self._hi = lo, hi + + def fields(self): + return [DISubrange(self._lo, self._hi),] + + class DIDerivedType_tuple(): + def __init__(self, the_tuple): + self._type = DW_TAG_array_type(0, len(the_tuple) - 1) + self._tuple = the_tuple + + @property + def type(self): + return self._type + + def __getitem__(self, item): + return self._tuple[item] + + class DICompositeType_Array(): + def __init__(self, arr, type_str): + self._arr = arr + self._type_str = type_str + + def __getitem__(self, item): + return getattr(self, item) + + @property + def data(self): + return self._arr.ctypes.data + + @property + def itemsize(self): + return self._arr.itemsize + + @property + def shape(self): + return DIDerivedType_tuple(self._arr.shape) + + @property + def strides(self): + return DIDerivedType_tuple(self._arr.strides) + + @property + def type(self): + return self._type_str + + # The type string encoded into the DWARF is the string repr of the Numba + # type followed by the LLVM repr of the data model in brackets. + dmm = datamodel.default_manager + array_model = datamodel.models.ArrayModel(dmm, typeof(array)) + data_type = array_model.get_data_type() + type_str = f"{typeof(array)} ({data_type.structure_repr()})" + fake_gdb_arr = DICompositeType_Array(array, type_str) + + printer = gdb_print_extension.NumbaArrayPrinter(fake_gdb_arr) + + return printer.to_string().strip() # strip, there's new lines + + def check(self, array): + gdb_printed = self.get_gdb_repr(array) + self.assertEqual(str(gdb_printed), str(array)) + + def test_np_array_printer_simple_numeric_types(self): + # Tests printer over a selection of basic types + n = 4 + m = 3 + + for dt in (np.int8, np.uint16, np.int64, np.float32, np.complex128): + arr = np.arange(m * n, dtype=dt).reshape(m, n) + self.check(arr) + + def test_np_array_printer_simple_numeric_types_strided(self): + # Tests printer over randomized strided arrays + n_tests = 30 + np.random.seed(0) + + for _ in range(n_tests): + + shape = np.random.randint(1, high=12, size=np.random.randint(1, 5)) + tmp = np.arange(np.prod(shape)).reshape(shape) + + slices = [] + for x in shape: + start = np.random.randint(0, x) + # x + 3 is to ensure that sometimes the stop is beyond the + # end of the size in a given dimension + stop = np.random.randint(start + 1, max(start + 1, x + 3)) + step = np.random.randint(1, 3) # step as 1, 2 + strd = slice(start, stop, step) + slices.append(strd) + + arr = tmp[tuple(slices)] + self.check(arr) + + def test_np_array_printer_simple_structured_dtype(self): + # Tests printer over a selection of basic types + n = 4 + m = 3 + + aligned = np.dtype([("x", np.int16), ("y", np.float64)], align=True) + unaligned = np.dtype([("x", np.int16), ("y", np.float64)], align=False) + + for dt in (aligned, unaligned): + arr = np.empty(m * n, dtype=dt).reshape(m, n) + arr['x'] = np.arange(m * n, dtype=dt['x']).reshape(m, n) + arr['y'] = 100 * np.arange(m * n, dtype=dt['y']).reshape(m, n) + self.check(arr) + + def test_np_array_printer_chr_array(self): + # Test unichr array + arr = np.array(['abcde']) + self.check(arr) + + def test_np_array_printer_unichr_structured_dtype(self): + # Not supported yet + n = 4 + m = 3 + + dt = np.dtype([("x", ' C order + noncontig, # 1d non-contig -> A order + (noncontig, contig), # 2d C order + (contig, noncontig), # 2d F order + (noncontig, noncontig), # 2d A order + (noncontig, noncontig, contig), # 3d C order + (contig, noncontig, noncontig), # 3d F order + (noncontig, noncontig, noncontig), # 3d A order + ) + + for ty, idx in product(tys, indexes): + compilable = njit(gen(ty, idx)) + # check the type of the typed list returned matches the type + # as constructed in the interpreter + expected = ty[idx] + # check execution + self.assertEqual(compilable()._dtype, expected) + got = compilable.nopython_signatures[0].return_type.dtype + # check sig + self.assertEqual(got, expected) + + def test_shorthand_syntax(self): + # tests a couple of shorthand syntax examples + # (test_static_getitem_on_type is a more extensive test of the + # functionality but it uses slices directly). + + @njit + def foo1(): + ty = types.float32[::1, :] # 2d F order + return typed.List.empty_list(ty) + + self.assertEqual(foo1()._dtype, types.float32[::1, :]) + + @njit + def foo2(): + ty = types.complex64[:, :, :] # 3d A order + return typed.List.empty_list(ty) + + self.assertEqual(foo2()._dtype, types.complex64[:, :, :]) + + def test_static_getitem_on_invalid_type(self): + # check that an unsupported type cannot be instantiated in njit code + + # check this executes in the interpreter: + types.void[:] + + # check the same fails in compilation as it's not supported + # it'll fall back to a generic getitem + with self.assertRaises(errors.TypingError) as raises: + @njit + def foo(): + types.void[:] + + foo() + + msg = ("No implementation", + "getitem(typeref[none], slice)") + + excstr = str(raises.exception) + for m in msg: + self.assertIn(m, excstr) + + def test_standard_getitem_on_type(self): + # not supported at present, should be doable if the slice is a literal + # though. + + # check using a non-static arg to the getitem raises + with self.assertRaises(errors.TypingError) as raises: + @njit + def foo(not_static): + types.float64[not_static] + + foo(slice(None, None, 1)) + + msg = ("No implementation", + "getitem(class(float64), slice)") + + excstr = str(raises.exception) + for m in msg: + self.assertIn(m, excstr) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_gil.py b/venv/lib/python3.10/site-packages/numba/tests/test_gil.py new file mode 100644 index 0000000000000000000000000000000000000000..639ef06eaa4dbcfcce00de02a10a8cfc62807b61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_gil.py @@ -0,0 +1,182 @@ +import ctypes +import ctypes.util +import os +import sys +import threading +import warnings + +import numpy as np + +import unittest +from numba import jit +from numba.core import errors +from numba.tests.support import TestCase, tag + + +# This CPython API function is a portable way to get the current thread id. +PyThread_get_thread_ident = ctypes.pythonapi.PyThread_get_thread_ident +PyThread_get_thread_ident.restype = ctypes.c_long +PyThread_get_thread_ident.argtypes = [] + +# A way of sleeping from nopython code +if os.name == 'nt': + sleep = ctypes.windll.kernel32.Sleep + sleep.argtypes = [ctypes.c_uint] + sleep.restype = None + sleep_factor = 1 # milliseconds +else: + sleep = ctypes.CDLL(ctypes.util.find_library("c")).usleep + sleep.argtypes = [ctypes.c_uint] + sleep.restype = ctypes.c_int + sleep_factor = 1000 # microseconds + + +def f(a, indices): + # If run from one thread at a time, the function will always fill the + # array with identical values. + # If run from several threads at a time, the function will probably + # fill the array with differing values. + for idx in indices: + # Let another thread run + sleep(10 * sleep_factor) + a[idx] = PyThread_get_thread_ident() + +f_sig = "void(int64[:], intp[:])" + +def lifted_f(a, indices): + """ + Same as f(), but inside a lifted loop + """ + object() # Force object mode + for idx in indices: + # Let another thread run + sleep(10 * sleep_factor) + a[idx] = PyThread_get_thread_ident() + +def object_f(a, indices): + """ + Same as f(), but in object mode + """ + for idx in indices: + # Let another thread run + sleep(10 * sleep_factor) + object() # Force object mode + a[idx] = PyThread_get_thread_ident() + + +class TestGILRelease(TestCase): + + def make_test_array(self, n_members): + return np.arange(n_members, dtype=np.int64) + + def run_in_threads(self, func, n_threads): + # Run the function in parallel over an array and collect results. + threads = [] + # Warm up compilation, since we don't want that to interfere with + # the test proper. + func(self.make_test_array(1), np.arange(1, dtype=np.intp)) + arr = self.make_test_array(50) + for i in range(n_threads): + # Ensure different threads write into the array in different + # orders. + indices = np.arange(arr.size, dtype=np.intp) + np.random.shuffle(indices) + t = threading.Thread(target=func, args=(arr, indices)) + threads.append(t) + for t in threads: + t.start() + for t in threads: + t.join() + return arr + + def check_gil_held(self, func): + arr = self.run_in_threads(func, n_threads=4) + distinct = set(arr) + self.assertEqual(len(distinct), 1, distinct) + + def check_gil_released(self, func): + for n_threads in (4, 12, 32): + # Try harder each time. On an empty machine 4 threads seems + # sufficient, but in some contexts (e.g. Travis CI) we need more. + arr = self.run_in_threads(func, n_threads) + distinct = set(arr) + try: + self.assertGreater(len(distinct), 1, distinct) + except AssertionError as e: + failure = e + else: + return + raise failure + + def test_gil_held(self): + """ + Test the GIL is held by default, by checking serialized runs + produce deterministic results. + """ + cfunc = jit(f_sig, nopython=True)(f) + self.check_gil_held(cfunc) + + def test_gil_released(self): + """ + Test releasing the GIL, by checking parallel runs produce + unpredictable results. + """ + cfunc = jit(f_sig, nopython=True, nogil=True)(f) + self.check_gil_released(cfunc) + + def test_gil_released_inside_lifted_loop(self): + """ + Test the GIL can by released by a lifted loop even though the + surrounding code uses object mode. + """ + cfunc = jit(f_sig, forceobj=True, nogil=True)(lifted_f) + self.check_gil_released(cfunc) + + def test_gil_released_by_caller(self): + """ + Releasing the GIL in the caller is sufficient to have it + released in a callee. + """ + compiled_f = jit(f_sig, nopython=True)(f) + @jit(f_sig, nopython=True, nogil=True) + def caller(a, i): + compiled_f(a, i) + self.check_gil_released(caller) + + def test_gil_released_by_caller_and_callee(self): + """ + Same, but with both caller and callee asking to release the GIL. + """ + compiled_f = jit(f_sig, nopython=True, nogil=True)(f) + @jit(f_sig, nopython=True, nogil=True) + def caller(a, i): + compiled_f(a, i) + self.check_gil_released(caller) + + def test_gil_ignored_by_callee(self): + """ + When only the callee asks to release the GIL, it gets ignored. + """ + compiled_f = jit(f_sig, nopython=True, nogil=True)(f) + @jit(f_sig, nopython=True) + def caller(a, i): + compiled_f(a, i) + self.check_gil_held(caller) + + def test_object_mode(self): + """ + When the function is compiled in object mode, a warning is + printed out. + """ + with warnings.catch_warnings(record=True) as wlist: + warnings.simplefilter('always', errors.NumbaWarning) + cfunc = jit(f_sig, forceobj=True, nogil=True)(object_f) + self.assertTrue(any(w.category is errors.NumbaWarning + and "Code running in object mode won't allow parallel execution" in str(w.message) + for w in wlist), wlist) + # Just check it doesn't crash. + self.run_in_threads(cfunc, 2) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_globals.py b/venv/lib/python3.10/site-packages/numba/tests/test_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..6cdcb8605aef1c55cea37ea8c8dd0f25f493ff45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_globals.py @@ -0,0 +1,266 @@ +import numpy as np +from numba import jit, njit, errors +from numba.extending import register_jitable +from numba.tests import usecases +import unittest + +X = np.arange(10) + + +def global_ndarray_func(x): + y = x + X.shape[0] + return y + + +# Create complex array with real and imaginary parts of distinct value +cplx_X = np.arange(10, dtype=np.complex128) +tmp = np.arange(10, dtype=np.complex128) +cplx_X += (tmp+10)*1j + + +def global_cplx_arr_copy(a): + for i in range(len(a)): + a[i] = cplx_X[i] + + +# Create a recarray with fields of distinct value +x_dt = np.dtype([('a', np.int32), ('b', np.float32)]) +rec_X = np.recarray(10, dtype=x_dt) +for i in range(len(rec_X)): + rec_X[i].a = i + rec_X[i].b = i + 0.5 + + +def global_rec_arr_copy(a): + for i in range(len(a)): + a[i] = rec_X[i] + + +def global_rec_arr_extract_fields(a, b): + for i in range(len(a)): + a[i] = rec_X[i].a + b[i] = rec_X[i].b + + +# Create additional global recarray +y_dt = np.dtype([('c', np.int16), ('d', np.float64)]) +rec_Y = np.recarray(10, dtype=y_dt) +for i in range(len(rec_Y)): + rec_Y[i].c = i + 10 + rec_Y[i].d = i + 10.5 + + +def global_two_rec_arrs(a, b, c, d): + for i in range(len(a)): + a[i] = rec_X[i].a + b[i] = rec_X[i].b + c[i] = rec_Y[i].c + d[i] = rec_Y[i].d + + +# Test a global record +record_only_X = np.recarray(1, dtype=x_dt)[0] +record_only_X.a = 1 +record_only_X.b = 1.5 + +@jit(nopython=True) +def global_record_func(x): + return x.a == record_only_X.a + + +@jit(nopython=True) +def global_module_func(x, y): + return usecases.andornopython(x, y) + +# Test a global tuple +tup_int = (1, 2) +tup_str = ('a', 'b') +tup_mixed = (1, 'a') +tup_float = (1.2, 3.5) +tup_npy_ints = (np.uint64(12), np.int8(3)) +tup_tup_array = ((np.ones(5),),) +mixed_tup_tup_array = (('Z', np.ones(5),), 2j, 'A') + +def global_int_tuple(): + return tup_int[0] + tup_int[1] + + +def global_str_tuple(): + return tup_str[0] + tup_str[1] + + +def global_mixed_tuple(): + idx = tup_mixed[0] + field = tup_mixed[1] + return rec_X[idx][field] + + +def global_float_tuple(): + return tup_float[0] + tup_float[1] + + +def global_npy_int_tuple(): + return tup_npy_ints[0] + tup_npy_ints[1] + + +def global_write_to_arr_in_tuple(): + tup_tup_array[0][0][0] = 10. + + +def global_write_to_arr_in_mixed_tuple(): + mixed_tup_tup_array[0][1][0] = 10. + + +_glbl_np_bool_T = np.bool_(True) +_glbl_np_bool_F = np.bool_(False) + + +@register_jitable # consumer function +def _sink(*args): + pass + +def global_npy_bool(): + _sink(_glbl_np_bool_T, _glbl_np_bool_F) + return _glbl_np_bool_T, _glbl_np_bool_F + + +class TestGlobals(unittest.TestCase): + + def check_global_ndarray(self, **jitargs): + # (see github issue #448) + ctestfunc = jit(**jitargs)(global_ndarray_func) + self.assertEqual(ctestfunc(1), 11) + + def test_global_ndarray(self): + # This also checks we can access an unhashable global value + # (see issue #697) + self.check_global_ndarray(forceobj=True) + + def test_global_ndarray_npm(self): + self.check_global_ndarray(nopython=True) + + + def check_global_complex_arr(self, **jitargs): + # (see github issue #897) + ctestfunc = jit(**jitargs)(global_cplx_arr_copy) + arr = np.zeros(len(cplx_X), dtype=np.complex128) + ctestfunc(arr) + np.testing.assert_equal(arr, cplx_X) + + def test_global_complex_arr(self): + self.check_global_complex_arr(forceobj=True) + + def test_global_complex_arr_npm(self): + self.check_global_complex_arr(nopython=True) + + + def check_global_rec_arr(self, **jitargs): + # (see github issue #897) + ctestfunc = jit(**jitargs)(global_rec_arr_copy) + arr = np.zeros(rec_X.shape, dtype=x_dt) + ctestfunc(arr) + np.testing.assert_equal(arr, rec_X) + + def test_global_rec_arr(self): + self.check_global_rec_arr(forceobj=True) + + def test_global_rec_arr_npm(self): + self.check_global_rec_arr(nopython=True) + + + def check_global_rec_arr_extract(self, **jitargs): + # (see github issue #897) + ctestfunc = jit(**jitargs)(global_rec_arr_extract_fields) + arr1 = np.zeros(rec_X.shape, dtype=np.int32) + arr2 = np.zeros(rec_X.shape, dtype=np.float32) + ctestfunc(arr1, arr2) + np.testing.assert_equal(arr1, rec_X.a) + np.testing.assert_equal(arr2, rec_X.b) + + def test_global_rec_arr_extract(self): + self.check_global_rec_arr_extract(forceobj=True) + + def test_global_rec_arr_extract_npm(self): + self.check_global_rec_arr_extract(nopython=True) + + + def check_two_global_rec_arrs(self, **jitargs): + # (see github issue #897) + ctestfunc = jit(**jitargs)(global_two_rec_arrs) + arr1 = np.zeros(rec_X.shape, dtype=np.int32) + arr2 = np.zeros(rec_X.shape, dtype=np.float32) + arr3 = np.zeros(rec_Y.shape, dtype=np.int16) + arr4 = np.zeros(rec_Y.shape, dtype=np.float64) + ctestfunc(arr1, arr2, arr3, arr4) + np.testing.assert_equal(arr1, rec_X.a) + np.testing.assert_equal(arr2, rec_X.b) + np.testing.assert_equal(arr3, rec_Y.c) + np.testing.assert_equal(arr4, rec_Y.d) + + def test_two_global_rec_arrs(self): + self.check_two_global_rec_arrs(forceobj=True) + + def test_two_global_rec_arrs_npm(self): + self.check_two_global_rec_arrs(nopython=True) + + def test_global_module(self): + # (see github issue #1059) + res = global_module_func(5, 6) + self.assertEqual(True, res) + + def test_global_record(self): + # (see github issue #1081) + x = np.recarray(1, dtype=x_dt)[0] + x.a = 1 + res = global_record_func(x) + self.assertEqual(True, res) + x.a = 2 + res = global_record_func(x) + self.assertEqual(False, res) + + def test_global_int_tuple(self): + pyfunc = global_int_tuple + jitfunc = njit(pyfunc) + self.assertEqual(pyfunc(), jitfunc()) + + def test_global_str_tuple(self): + pyfunc = global_str_tuple + jitfunc = njit(pyfunc) + self.assertEqual(pyfunc(), jitfunc()) + + def test_global_mixed_tuple(self): + pyfunc = global_mixed_tuple + jitfunc = njit(pyfunc) + self.assertEqual(pyfunc(), jitfunc()) + + def test_global_float_tuple(self): + pyfunc = global_float_tuple + jitfunc = njit(pyfunc) + self.assertEqual(pyfunc(), jitfunc()) + + def test_global_npy_int_tuple(self): + pyfunc = global_npy_int_tuple + jitfunc = njit(pyfunc) + self.assertEqual(pyfunc(), jitfunc()) + + def test_global_write_to_arr_in_tuple(self): + # Test writing to an array in a global tuple + # See issue https://github.com/numba/numba/issues/7120 + for func in (global_write_to_arr_in_tuple, + global_write_to_arr_in_mixed_tuple): + jitfunc = njit(func) + with self.assertRaises(errors.TypingError) as e: + jitfunc() + msg = "Cannot modify readonly array of type:" + self.assertIn(msg, str(e.exception)) + + def test_global_npy_bool(self): + # Test global NumPy bool + # See issue https://github.com/numba/numba/issues/6979 + pyfunc = global_npy_bool + jitfunc = njit(pyfunc) + self.assertEqual(pyfunc(), jitfunc()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_hashing.py b/venv/lib/python3.10/site-packages/numba/tests/test_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..ca2cd1a945b5f16a20f7911d7d1385644afd8d8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_hashing.py @@ -0,0 +1,532 @@ +# -*- coding: utf-8 -*- +""" +Test hashing of various supported types. +""" + +import unittest + +import os +import sys +import subprocess +from collections import defaultdict +from textwrap import dedent + +import numpy as np + +from numba import jit, config, typed, typeof +from numba.core import types, utils +import unittest +from numba.tests.support import (TestCase, skip_unless_py10_or_later, + run_in_subprocess) + +from numba.cpython.unicode import compile_time_get_string_data +from numba.cpython import hashing +from numba.np.numpy_support import numpy_version + + +def hash_usecase(x): + return hash(x) + + +class TestHashingSetup(TestCase): + + def test_warn_on_fnv(self): + # FNV hash alg variant is not supported, check Numba warns + work = """ + import sys + import warnings + from collections import namedtuple + + # hash_info is a StructSequence, mock as a named tuple + fields = ["width", "modulus", "inf", "nan", "imag", "algorithm", + "hash_bits", "seed_bits", "cutoff"] + + hinfo = sys.hash_info + FAKE_HASHINFO = namedtuple('FAKE_HASHINFO', fields) + + fd = dict() + for f in fields: + fd[f] = getattr(hinfo, f) + + fd['algorithm'] = 'fnv' + + fake_hashinfo = FAKE_HASHINFO(**fd) + + # replace the hashinfo with the fnv version + sys.hash_info = fake_hashinfo + with warnings.catch_warnings(record=True) as warns: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + from numba import njit + @njit + def foo(): + hash(1) + foo() + assert len(warns) > 0 + expect = "FNV hashing is not implemented in Numba. See PEP 456" + for w in warns: + if expect in str(w.message): + break + else: + raise RuntimeError("Expected warning not found") + """ + subprocess.check_call([sys.executable, '-c', dedent(work)]) + + +class TestHashAlgs(TestCase): + # This tests Numba hashing replication against cPython "gold", i.e. the + # actual hash values for given inputs, algs and PYTHONHASHSEEDs + # Test adapted from: + # https://github.com/python/cpython/blob/9dda9020abcf0d51d59b283a89c58c8e1fb0f574/Lib/test/test_hash.py#L197-L264 + # and + # https://github.com/python/cpython/blob/9dda9020abcf0d51d59b283a89c58c8e1fb0f574/Lib/test/test_hash.py#L174-L189 + + # 32bit little, 64bit little, 32bit big, 64bit big + known_hashes = { + 'djba33x': [ # only used for small strings + # seed 0, 'abc' + [193485960, 193485960, 193485960, 193485960], + # seed 42, 'abc' + [-678966196, 573763426263223372, -820489388, -4282905804826039665], + ], + 'siphash13': [ + # NOTE: PyUCS2 layout depends on endianness + # seed 0, 'abc' + [69611762, -4594863902769663758, 69611762, -4594863902769663758], + # seed 42, 'abc' + [-975800855, 3869580338025362921, -975800855, 3869580338025362921], + # seed 42, 'abcdefghijk' + [-595844228, 7764564197781545852, -595844228, 7764564197781545852], + # seed 0, 'äú∑ℇ' + [-1093288643, -2810468059467891395, -1041341092, 4925090034378237276], + # seed 42, 'äú∑ℇ' + [-585999602, -2845126246016066802, -817336969, -2219421378907968137], + ], + 'siphash24': [ + # NOTE: PyUCS2 layout depends on endianness + # seed 0, 'abc' + [1198583518, 4596069200710135518, 1198583518, 4596069200710135518], + # seed 42, 'abc' + [273876886, -4501618152524544106, 273876886, -4501618152524544106], + # seed 42, 'abcdefghijk' + [-1745215313, 4436719588892876975, -1745215313, 4436719588892876975], + # seed 0, 'äú∑ℇ' + [493570806, 5749986484189612790, -1006381564, -5915111450199468540], + # seed 42, 'äú∑ℇ' + [-1677110816, -2947981342227738144, -1860207793, -4296699217652516017], + ], + } + + def get_expected_hash(self, position, length): + if length < sys.hash_info.cutoff: + algorithm = "djba33x" + else: + algorithm = sys.hash_info.algorithm + IS_64BIT = not config.IS_32BITS + if sys.byteorder == 'little': + platform = 1 if IS_64BIT else 0 + else: + assert(sys.byteorder == 'big') + platform = 3 if IS_64BIT else 2 + return self.known_hashes[algorithm][position][platform] + + def get_hash_command(self, repr_): + return 'print(hash(eval(%a)))' % repr_ + + def get_hash(self, repr_, seed=None): + env = os.environ.copy() + if seed is not None: + env['PYTHONHASHSEED'] = str(seed) + else: + env.pop('PYTHONHASHSEED', None) + out, _ = run_in_subprocess(code=self.get_hash_command(repr_), + env=env) + stdout = out.decode().strip() + return int(stdout) + + def test_against_cpython_gold(self): + + args = (('abc', 0, 0), ('abc', 42, 1), ('abcdefghijk', 42, 2), + ('äú∑ℇ', 0, 3), ('äú∑ℇ', 42, 4),) + + for input_str, seed, position in args: + with self.subTest(input_str=input_str, seed=seed): + got = self.get_hash(repr(input_str), seed=seed) + expected = self.get_expected_hash(position, len(input_str)) + self.assertEqual(got, expected) + + +class BaseTest(TestCase): + + def setUp(self): + self.cfunc = jit(nopython=True)(hash_usecase) + + def check_hash_values(self, values): + cfunc = self.cfunc + for val in list(values): + nb_hash = cfunc(val) + self.assertIsInstance(nb_hash, int) + try: + self.assertEqual(nb_hash, hash(val)) + except AssertionError as e: + print("val, nb_hash, hash(val)") + print(val, nb_hash, hash(val)) + print("abs(val), hashing._PyHASH_MODULUS - 1") + print(abs(val), hashing._PyHASH_MODULUS - 1) + raise e + + def int_samples(self, typ=np.int64): + for start in (0, -50, 60000, 1 << 32): + info = np.iinfo(typ) + if not info.min <= start <= info.max: + continue + n = 100 + yield range(start, start + n) + yield range(start, start + 100 * n, 100) + yield range(start, start + 128 * n, 128) + yield [-1] + + def safe_construct(self, typ, value): + return getattr(np, 'int' + str(np.iinfo(typ).bits))(value).view(typ) + + def float_samples(self, typ): + info = np.finfo(typ) + + for start in (0, 10, info.max ** 0.5, info.max / 1000.0): + n = 100 + min_step = max(info.tiny, start * info.resolution) + for step in (1.2, min_step ** 0.5, min_step): + if step < min_step: + continue + a = np.linspace(start, start + n * step, n) + a = a.astype(typ) + yield a + yield -a + yield a + a.mean() + + # Infs, nans, zeros, magic -1 + a = [0.0, 0.5, -0.0, -1.0, float('inf'), -float('inf'),] + + # Python 3.10 has a hash for nan based on the pointer to the PyObject + # containing the nan, skip this input and use explicit test instead. + + yield typ(a) + + def complex_samples(self, typ, float_ty): + for real in self.float_samples(float_ty): + for imag in self.float_samples(float_ty): + # Ensure equal sizes + real = real[:len(imag)] + imag = imag[:len(real)] + a = real + typ(1j) * imag + # Python 3.10 has a hash for nan based on the pointer to the + # PyObject containing the nan, skip input that ends up as nan + if not np.any(np.isnan(a)): + yield a + + +class TestNumberHashing(BaseTest): + """ + Test hashing of number types. + """ + + def setUp(self): + if numpy_version >= (2, 0) and numpy_version <= (2, 1): + # Temporarily set promotions state to legacy, + # to ensure overflow logic works + self.initial_state = np._get_promotion_state() + np._set_promotion_state("legacy") + + return super().setUp() + + def tearDown(self) -> None: + if numpy_version >= (2, 0) and numpy_version <= (2, 1): + # Reset numpy promotion state to initial state + # since the setting is global + np._set_promotion_state(self.initial_state) + + return super().tearDown() + + def check_floats(self, typ): + for a in self.float_samples(typ): + self.assertEqual(a.dtype, np.dtype(typ)) + self.check_hash_values(a) + + def check_complex(self, typ, float_ty): + for a in self.complex_samples(typ, float_ty): + self.assertEqual(a.dtype, np.dtype(typ)) + self.check_hash_values(a) + + def test_floats(self): + self.check_floats(np.float32) + self.check_floats(np.float64) + + def test_complex(self): + self.check_complex(np.complex64, np.float32) + self.check_complex(np.complex128, np.float64) + + def test_bool(self): + self.check_hash_values([False, True]) + + def test_ints(self): + minmax = [] + + for ty in [np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64]: + for a in self.int_samples(ty): + self.check_hash_values(a) + info = np.iinfo(ty) + # check hash(-1) = -2 + # check hash(0) = 0 + self.check_hash_values([self.safe_construct(ty, -1)]) + self.check_hash_values([ty(0)]) + signed = 'uint' not in str(ty) + # check bit shifting patterns from min through to max + sz = ty().itemsize + for x in [info.min, info.max]: + shifts = 8 * sz + # x is a python int, do shifts etc as a python int and init + # numpy type from that to avoid numpy type rules + y = x + for i in range(shifts): + twiddle1 = 0xaaaaaaaaaaaaaaaa + twiddle2 = 0x5555555555555555 + vals = [y] + for tw in [twiddle1, twiddle2]: + val = y & twiddle1 + if val < sys.maxsize: + vals.append(val) + for v in vals: + self.check_hash_values([ty(v)]) + if signed: # try the same with flipped signs + # negated signed INT_MIN will overflow + for v in vals: + if v != info.min: + self.check_hash_values([ty(-v)]) + if x == 0: # unsigned min is 0, shift up + y = (y | 1) << 1 + else: # everything else shift down + y = y >> 1 + + # these straddle the branch between returning the int as the hash and + # doing the PyLong hash alg + self.check_hash_values([np.int64(0x1ffffffffffffffe)]) + self.check_hash_values([np.int64(0x1fffffffffffffff)]) + self.check_hash_values([np.uint64(0x1ffffffffffffffe)]) + self.check_hash_values([np.uint64(0x1fffffffffffffff)]) + + # check some values near sys int mins + self.check_hash_values([np.int64(-0x7fffffffffffffff)]) + self.check_hash_values([np.int64(-0x7ffffffffffffff6)]) + self.check_hash_values([np.int64(-0x7fffffffffffff9c)]) + self.check_hash_values([np.int32(-0x7fffffff)]) + self.check_hash_values([np.int32(-0x7ffffff6)]) + self.check_hash_values([np.int32(-0x7fffff9c)]) + + @skip_unless_py10_or_later + def test_py310_nan_hash(self): + # On Python 3.10+ nan's hash to a value which is based on the pointer to + # the PyObject containing the nan. Numba cannot replicate as there's no + # object, it instead produces equivalent behaviour, i.e. hashes to + # something "unique". + + # Run 10 hashes, make sure that the "uniqueness" is sufficient that + # there's more than one hash value. Not much more can be done! + x = [float('nan') for i in range(10)] + out = set([self.cfunc(z) for z in x]) + self.assertGreater(len(out), 1) + + +class TestTupleHashing(BaseTest): + """ + Test hashing of tuples. + """ + + def setUp(self): + if numpy_version >= (2, 0) and numpy_version <= (2, 1): + # Temporarily set promotions state to legacy, + # to ensure overflow logic works + self.initial_state = np._get_promotion_state() + np._set_promotion_state("legacy") + + return super().setUp() + + def tearDown(self) -> None: + if numpy_version >= (2, 0) and numpy_version <= (2, 1): + # Reset numpy promotion state to initial state + # since the setting is global + np._set_promotion_state(self.initial_state) + + return super().tearDown() + + def check_tuples(self, value_generator, split): + for values in value_generator: + tuples = [split(a) for a in values] + self.check_hash_values(tuples) + + def test_homogeneous_tuples(self): + typ = np.uint64 + + def split2(i): + """ + Split i's bits into 2 integers. + """ + i = self.safe_construct(typ, i) + return (i & typ(0x5555555555555555), + i & typ(0xaaaaaaaaaaaaaaaa), + ) + + def split3(i): + """ + Split i's bits into 3 integers. + """ + i = self.safe_construct(typ, i) + return (i & typ(0x2492492492492492), + i & typ(0x4924924924924924), + i & typ(0x9249249249249249), + ) + + self.check_tuples(self.int_samples(), split2) + self.check_tuples(self.int_samples(), split3) + + # Check exact. Sample values from: + # https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Lib/test/test_tuple.py#L80-L93 + # Untypable empty tuples are replaced with (7,). + self.check_hash_values([(7,), (0,), (0, 0), (0.5,), + (0.5, (7,), (-2, 3, (4, 6)))]) + + def test_heterogeneous_tuples(self): + modulo = 2**63 + + def split(i): + a = i & 0x5555555555555555 + b = (i & 0xaaaaaaaa) ^ ((i >> 32) & 0xaaaaaaaa) + return np.int64(a), np.float64(b * 0.0001) + + self.check_tuples(self.int_samples(), split) + + +class TestUnicodeHashing(BaseTest): + + def test_basic_unicode(self): + kind1_string = "abcdefghijklmnopqrstuvwxyz" + for i in range(len(kind1_string)): + self.check_hash_values([kind1_string[:i]]) + + sep = "眼" + kind2_string = sep.join(list(kind1_string)) + for i in range(len(kind2_string)): + self.check_hash_values([kind2_string[:i]]) + + sep = "🐍⚡" + kind4_string = sep.join(list(kind1_string)) + for i in range(len(kind4_string)): + self.check_hash_values([kind4_string[:i]]) + + empty_string = "" + self.check_hash_values(empty_string) + + def test_hash_passthrough(self): + # no `hash` call made, this just checks that `._hash` is correctly + # passed through from an already existing string + kind1_string = "abcdefghijklmnopqrstuvwxyz" + + @jit(nopython=True) + def fn(x): + return x._hash + + hash_value = compile_time_get_string_data(kind1_string)[-1] + self.assertTrue(hash_value != -1) + self.assertEqual(fn(kind1_string), hash_value) + + def test_hash_passthrough_call(self): + # check `x._hash` and hash(x) are the same + kind1_string = "abcdefghijklmnopqrstuvwxyz" + + @jit(nopython=True) + def fn(x): + return x._hash, hash(x) + + hash_value = compile_time_get_string_data(kind1_string)[-1] + self.assertTrue(hash_value != -1) + self.assertEqual(fn(kind1_string), (hash_value, hash_value)) + + @unittest.skip("Needs hash computation at const unpickling time") + def test_hash_literal(self): + # a strconst always seem to have an associated hash value so the hash + # member of the returned value should contain the correct hash + @jit(nopython=True) + def fn(): + x = "abcdefghijklmnopqrstuvwxyz" + return x + val = fn() + tmp = hash("abcdefghijklmnopqrstuvwxyz") + self.assertEqual(tmp, (compile_time_get_string_data(val)[-1])) + + def test_hash_on_str_creation(self): + # In cPython some? new strings do not have a cached hash until hash() is + # called + def impl(do_hash): + const1 = "aaaa" + const2 = "眼眼眼眼" + new = const1 + const2 + if do_hash: + hash(new) + return new + + jitted = jit(nopython=True)(impl) + + # do not compute the hash, cPython will have no cached hash, but Numba + # will + compute_hash = False + expected = impl(compute_hash) + got = jitted(compute_hash) + a = (compile_time_get_string_data(expected)) + b = (compile_time_get_string_data(got)) + self.assertEqual(a[:-1], b[:-1]) + self.assertTrue(a[-1] != b[-1]) + + # now with compute hash enabled, cPython will have a cached hash as will + # Numba + compute_hash = True + expected = impl(compute_hash) + got = jitted(compute_hash) + a = (compile_time_get_string_data(expected)) + b = (compile_time_get_string_data(got)) + self.assertEqual(a, b) + + +class TestUnhashable(TestCase): + # Tests that unhashable types behave correctly and raise a TypeError at + # runtime. + + def test_hash_unhashable(self): + unhashables = (typed.Dict().empty(types.int64, types.int64), + typed.List().empty_list(types.int64), + np.ones(4)) + cfunc = jit(nopython=True)(hash_usecase) + for ty in unhashables: + with self.assertRaises(TypeError) as raises: + cfunc(ty) + expected = f"unhashable type: '{str(typeof(ty))}'" + self.assertIn(expected, str(raises.exception)) + + def test_no_generic_hash(self): + # In CPython, if there's no attr `__hash__` on an object, a hash of the + # object's pointer is returned (see: _Py_HashPointer in the CPython + # source). Numba has no access to such objects and can't create them + # either, so it catches this case and raises an exception. + + @jit(nopython=True) + def foo(): + hash(np.cos) + + with self.assertRaises(TypeError) as raises: + foo() + + expected = ("No __hash__ is defined for object ") + self.assertIn(expected, str(raises.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_heapq.py b/venv/lib/python3.10/site-packages/numba/tests/test_heapq.py new file mode 100644 index 0000000000000000000000000000000000000000..232fd60121ecd8bd3f7e05b58beeb5ee30d0cf14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_heapq.py @@ -0,0 +1,484 @@ +import heapq as hq +import itertools + +import numpy as np + +from numba import jit, typed +from numba.tests.support import TestCase, MemoryLeakMixin + + +def heapify(x): + return hq.heapify(x) + + +def heappop(heap): + return hq.heappop(heap) + + +def heappush(heap, item): + return hq.heappush(heap, item) + + +def heappushpop(heap, item): + return hq.heappushpop(heap, item) + + +def heapreplace(heap, item): + return hq.heapreplace(heap, item) + + +def nsmallest(n, iterable): + return hq.nsmallest(n, iterable) + + +def nlargest(n, iterable): + return hq.nlargest(n, iterable) + + +class _TestHeapq(MemoryLeakMixin): + + def setUp(self): + super(_TestHeapq, self).setUp() + self.rnd = np.random.RandomState(42) + + def test_heapify_basic_sanity(self): + pyfunc = heapify + cfunc = jit(nopython=True)(pyfunc) + + a = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + b = self.listimpl(a) + + pyfunc(a) + cfunc(b) + self.assertPreciseEqual(a, list(b)) + + # includes non-finite elements + element_pool = [3.142, -10.0, 5.5, np.nan, -np.inf, np.inf] + + # list which may contain duplicate elements + for x in itertools.combinations_with_replacement(element_pool, 6): + a = list(x) + b = self.listimpl(a) + + pyfunc(a) + cfunc(b) + self.assertPreciseEqual(a, list(b)) + + # single element list + for i in range(len(element_pool)): + a = [element_pool[i]] + b = self.listimpl(a) + + pyfunc(a) + cfunc(b) + self.assertPreciseEqual(a, list(b)) + + # elements are tuples + a = [(3, 33), (1, 11), (2, 22)] + b = self.listimpl(a) + pyfunc(a) + cfunc(b) + self.assertPreciseEqual(a, list(b)) + + def check_invariant(self, heap): + for pos, item in enumerate(heap): + if pos: + parentpos = (pos - 1) >> 1 + self.assertTrue(heap[parentpos] <= item) + + def test_push_pop(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + pyfunc_heappush = heappush + cfunc_heappush = jit(nopython=True)(pyfunc_heappush) + + pyfunc_heappop = heappop + cfunc_heappop = jit(nopython=True)(pyfunc_heappop) + + heap = self.listimpl([-1.0]) + data = self.listimpl([-1.0]) + self.check_invariant(heap) + for i in range(256): + item = self.rnd.randn(1).item(0) + data.append(item) + cfunc_heappush(heap, item) + self.check_invariant(heap) + results = [] + while heap: + item = cfunc_heappop(heap) + self.check_invariant(heap) + results.append(item) + data_sorted = data[:] + data_sorted.sort() + self.assertPreciseEqual(list(data_sorted), results) + self.check_invariant(results) + + def test_heapify(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + pyfunc = heapify + cfunc = jit(nopython=True)(pyfunc) + + for size in list(range(1, 30)) + [20000]: + heap = self.listimpl(self.rnd.random_sample(size)) + cfunc(heap) + self.check_invariant(heap) + + def test_heapify_exceptions(self): + pyfunc = heapify + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc((1, 5, 4)) + + msg = 'heap argument must be a list' + self.assertIn(msg, str(e.exception)) + + with self.assertTypingError() as e: + cfunc(self.listimpl([1 + 1j, 2 - 3j])) + + msg = ("'<' not supported between instances " + "of 'complex' and 'complex'") + self.assertIn(msg, str(e.exception)) + + def test_heappop_basic_sanity(self): + pyfunc = heappop + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + yield [(3, 33), (1, 111), (2, 2222)] + yield np.full(5, fill_value=np.nan).tolist() + yield np.linspace(-10, -5, 100).tolist() + + for a in a_variations(): + heapify(a) + b = self.listimpl(a) + + for i in range(len(a)): + val_py = pyfunc(a) + val_c = cfunc(b) + self.assertPreciseEqual(a, list(b)) + self.assertPreciseEqual(val_py, val_c) + + def test_heappop_exceptions(self): + pyfunc = heappop + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc((1, 5, 4)) + + msg = 'heap argument must be a list' + self.assertIn(msg, str(e.exception)) + + def iterables(self): + yield self.listimpl([1, 3, 5, 7, 9, 2, 4, 6, 8, 0]) + a = np.linspace(-10, 2, 23) + yield self.listimpl(a) + yield self.listimpl(a[::-1]) + self.rnd.shuffle(a) + yield self.listimpl(a) + + def test_heappush_basic(self): + pyfunc_push = heappush + cfunc_push = jit(nopython=True)(pyfunc_push) + + pyfunc_pop = heappop + cfunc_pop = jit(nopython=True)(pyfunc_pop) + + for iterable in self.iterables(): + expected = sorted(iterable) + heap = self.listimpl([iterable.pop(0)]) # must initialise heap + + for value in iterable: + cfunc_push(heap, value) + + got = [cfunc_pop(heap) for _ in range(len(heap))] + self.assertPreciseEqual(expected, got) + + def test_heappush_exceptions(self): + pyfunc = heappush + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc((1, 5, 4), 6) + + msg = 'heap argument must be a list' + self.assertIn(msg, str(e.exception)) + + with self.assertTypingError() as e: + cfunc(self.listimpl([1, 5, 4]), 6.0) + + msg = 'heap type must be the same as item type' + self.assertIn(msg, str(e.exception)) + + def test_nsmallest_basic(self): + pyfunc = nsmallest + cfunc = jit(nopython=True)(pyfunc) + + for iterable in self.iterables(): + for n in range(-5, len(iterable) + 3): + expected = pyfunc(1, iterable) + got = cfunc(1, iterable) + self.assertPreciseEqual(expected, got) + + # n is boolean + out = cfunc(False, self.listimpl([3, 2, 1])) + self.assertPreciseEqual(out, []) + + out = cfunc(True, self.listimpl([3, 2, 1])) + self.assertPreciseEqual(out, [1]) + + # iterable is not a list + out = cfunc(2, (6, 5, 4, 3, 2, 1)) + self.assertPreciseEqual(out, [1, 2]) + + out = cfunc(3, np.arange(6)) + self.assertPreciseEqual(out, [0, 1, 2]) + + def test_nlargest_basic(self): + pyfunc = nlargest + cfunc = jit(nopython=True)(pyfunc) + + for iterable in self.iterables(): + for n in range(-5, len(iterable) + 3): + expected = pyfunc(1, iterable) + got = cfunc(1, iterable) + self.assertPreciseEqual(expected, got) + + # n is boolean + out = cfunc(False, self.listimpl([3, 2, 1])) + self.assertPreciseEqual(out, []) + + out = cfunc(True, self.listimpl([3, 2, 1])) + self.assertPreciseEqual(out, [3]) + + # iterable is not a list + out = cfunc(2, (6, 5, 4, 3, 2, 1)) + self.assertPreciseEqual(out, [6, 5]) + + out = cfunc(3, np.arange(6)) + self.assertPreciseEqual(out, [5, 4, 3]) + + def _assert_typing_error(self, cfunc): + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc(2.2, self.listimpl([3, 2, 1])) + + msg = "First argument 'n' must be an integer" + self.assertIn(msg, str(e.exception)) + + with self.assertTypingError() as e: + cfunc(2, 100) + + msg = "Second argument 'iterable' must be iterable" + self.assertIn(msg, str(e.exception)) + + def test_nsmallest_exceptions(self): + pyfunc = nsmallest + cfunc = jit(nopython=True)(pyfunc) + self._assert_typing_error(cfunc) + + def test_nlargest_exceptions(self): + pyfunc = nlargest + cfunc = jit(nopython=True)(pyfunc) + self._assert_typing_error(cfunc) + + def test_heapreplace_basic(self): + pyfunc = heapreplace + cfunc = jit(nopython=True)(pyfunc) + + a = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] + + heapify(a) + b = self.listimpl(a) + + for item in [-4, 4, 14]: + pyfunc(a, item) + cfunc(b, item) + self.assertPreciseEqual(a, list(b)) + + a = np.linspace(-3, 13, 20) + a[4] = np.nan + a[-1] = np.inf + a = a.tolist() + + heapify(a) + b = self.listimpl(a) + + for item in [-4.0, 3.142, -np.inf, np.inf]: + pyfunc(a, item) + cfunc(b, item) + self.assertPreciseEqual(a, list(b)) + + def test_heapreplace_exceptions(self): + pyfunc = heapreplace + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc((1, 5, 4), -1) + + msg = 'heap argument must be a list' + self.assertIn(msg, str(e.exception)) + + with self.assertTypingError() as e: + cfunc(self.listimpl([1, 5, 4]), -1.0) + + msg = 'heap type must be the same as item type' + self.assertIn(msg, str(e.exception)) + + def heapiter(self, heap): + try: + while 1: + yield heappop(heap) + except IndexError: + pass + + def test_nbest(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + cfunc_heapify = jit(nopython=True)(heapify) + cfunc_heapreplace = jit(nopython=True)(heapreplace) + + data = self.rnd.choice(range(2000), 1000).tolist() + heap = self.listimpl(data[:10]) + cfunc_heapify(heap) + + for item in data[10:]: + if item > heap[0]: + cfunc_heapreplace(heap, item) + + self.assertPreciseEqual(list(self.heapiter(list(heap))), + sorted(data)[-10:]) + + def test_heapsort(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + cfunc_heapify = jit(nopython=True)(heapify) + cfunc_heappush = jit(nopython=True)(heappush) + cfunc_heappop = jit(nopython=True)(heappop) + + for trial in range(100): + # Ensure consistency of typing, use float64 as it's double + # everywhere + values = np.arange(5, dtype=np.float64) + data = self.listimpl(self.rnd.choice(values, 10)) + if trial & 1: + heap = data[:] + cfunc_heapify(heap) + else: + heap = self.listimpl([data[0]]) + for item in data[1:]: + cfunc_heappush(heap, item) + heap_sorted = [cfunc_heappop(heap) for _ in range(10)] + self.assertPreciseEqual(heap_sorted, sorted(data)) + + def test_nsmallest(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + pyfunc = nsmallest + cfunc = jit(nopython=True)(pyfunc) + + data = self.listimpl(self.rnd.choice(range(2000), 1000)) + + for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): + self.assertPreciseEqual(list(cfunc(n, data)), sorted(data)[:n]) + + def test_nlargest(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + pyfunc = nlargest + cfunc = jit(nopython=True)(pyfunc) + + data = self.listimpl(self.rnd.choice(range(2000), 1000)) + + for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): + self.assertPreciseEqual(list(cfunc(n, data)), + sorted(data, reverse=True)[:n]) + + def test_nbest_with_pushpop(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + pyfunc_heappushpop = heappushpop + cfunc_heappushpop = jit(nopython=True)(pyfunc_heappushpop) + + pyfunc_heapify = heapify + cfunc_heapify = jit(nopython=True)(pyfunc_heapify) + + # Ensure consistency of typing, use float64 as it's double everywhere + values = np.arange(2000, dtype=np.float64) + data = self.listimpl(self.rnd.choice(values, 1000)) + heap = data[:10] + cfunc_heapify(heap) + + for item in data[10:]: + cfunc_heappushpop(heap, item) + + self.assertPreciseEqual(list(self.heapiter(list(heap))), + sorted(data)[-10:]) + + def test_heappushpop(self): + # inspired by + # https://github.com/python/cpython/blob/e42b7051/Lib/test/test_heapq.py + pyfunc = heappushpop + cfunc = jit(nopython=True)(pyfunc) + + h = self.listimpl([1.0]) + x = cfunc(h, 10.0) + self.assertPreciseEqual((list(h), x), ([10.0], 1.0)) + self.assertPreciseEqual(type(h[0]), float) + self.assertPreciseEqual(type(x), float) + + h = self.listimpl([10]) + x = cfunc(h, 9) + self.assertPreciseEqual((list(h), x), ([10], 9)) + + h = self.listimpl([10]) + x = cfunc(h, 11) + self.assertPreciseEqual((list(h), x), ([11], 10)) + + def test_heappushpop_exceptions(self): + pyfunc = heappushpop + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc((1, 5, 4), -1) + + msg = 'heap argument must be a list' + self.assertIn(msg, str(e.exception)) + + with self.assertTypingError() as e: + cfunc(self.listimpl([1, 5, 4]), False) + + msg = 'heap type must be the same as item type' + self.assertIn(msg, str(e.exception)) + + +class TestHeapqReflectedList(_TestHeapq, TestCase): + """Test heapq with reflected lists""" + + listimpl = list + + +class TestHeapqTypedList(_TestHeapq, TestCase): + """Test heapq with typed lists""" + + listimpl = typed.List diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_help.py b/venv/lib/python3.10/site-packages/numba/tests/test_help.py new file mode 100644 index 0000000000000000000000000000000000000000..8199d4c93c4b6969f9437e8b28cc05a4948d9c5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_help.py @@ -0,0 +1,92 @@ +import sys +import subprocess +import types as pytypes +import os.path + +import numpy as np + +import builtins +from numba.core import types +from numba.tests.support import TestCase, temp_directory +from numba.misc.help.inspector import inspect_function, inspect_module + + +class TestInspector(TestCase): + def check_function_descriptor(self, info, must_be_defined=False): + self.assertIsInstance(info, dict) + self.assertIn('numba_type', info) + numba_type = info['numba_type'] + if numba_type is None: + self.assertFalse(must_be_defined) + else: + self.assertIsInstance(numba_type, types.Type) + self.assertIn('explained', info) + self.assertIsInstance(info['explained'], str) + self.assertIn('source_infos', info) + self.assertIsInstance(info['source_infos'], dict) + + def test_inspect_function_on_range(self): + info = inspect_function(range) + self.check_function_descriptor(info, must_be_defined=True) + + def test_inspect_function_on_np_all(self): + info = inspect_function(np.all) + self.check_function_descriptor(info, must_be_defined=True) + source_infos = info['source_infos'] + self.assertGreater(len(source_infos), 0) + c = 0 + for srcinfo in source_infos.values(): + self.assertIsInstance(srcinfo['kind'], str) + self.assertIsInstance(srcinfo['name'], str) + self.assertIsInstance(srcinfo['sig'], str) + self.assertIsInstance(srcinfo['filename'], str) + self.assertIsInstance(srcinfo['lines'], tuple) + self.assertIn('docstring', srcinfo) + c += 1 + self.assertEqual(c, len(source_infos)) + + def test_inspect_module(self): + c = 0 + for it in inspect_module(builtins): + self.assertIsInstance(it['module'], pytypes.ModuleType) + self.assertIsInstance(it['name'], str) + self.assertTrue(callable(it['obj'])) + self.check_function_descriptor(it) + c += 1 + self.assertGreater(c, 0) + + def test_inspect_cli(self): + # Try CLI on math module + cmdbase = [sys.executable, '-m', 'numba.misc.help.inspector'] + + dirpath = temp_directory('{}.{}'.format(__name__, + self.__class__.__name__)) + filename = os.path.join(dirpath, 'out') + + # Try default format "html" + expected_file = filename + '.html' + cmds = cmdbase + ['--file', filename, 'math'] + # File shouldn't exist yet + self.assertFalse(os.path.isfile(expected_file)) + # Run CLI + subprocess.check_output(cmds) + # File should exist now + self.assertTrue(os.path.isfile(expected_file)) + + # Try changing the format to "rst" + cmds = cmdbase + ['--file', filename, '--format', 'rst', 'math'] + expected_file = filename + '.rst' + # File shouldn't exist yet + self.assertFalse(os.path.isfile(expected_file)) + # Run CLI + subprocess.check_output(cmds) + # File should exist now + self.assertTrue(os.path.isfile(expected_file)) + + # Try unsupported format + cmds = cmdbase + ['--file', filename, '--format', 'foo', 'math'] + # Run CLI + with self.assertRaises(subprocess.CalledProcessError) as raises: + subprocess.check_output(cmds, stderr=subprocess.STDOUT) + self.assertIn("\'foo\' is not supported", + raises.exception.stdout.decode()) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_import.py b/venv/lib/python3.10/site-packages/numba/tests/test_import.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1c931dfae65e1e5e2feda1f2d8fae182d5188c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_import.py @@ -0,0 +1,112 @@ +import unittest +from numba.tests.support import TestCase, run_in_subprocess + + +class TestNumbaImport(TestCase): + """ + Test behaviour of importing Numba. + """ + + def test_laziness(self): + """ + Importing top-level numba features should not import too many modules. + """ + # A heuristic set of modules that shouldn't be imported immediately + banlist = ['cffi', + 'distutils', + 'numba.cuda', + 'numba.cpython.mathimpl', + 'numba.cpython.randomimpl', + 'numba.tests', + 'numba.core.typing.collections', + 'numba.core.typing.listdecl', + 'numba.core.typing.npdatetime', + ] + # Sanity check the modules still exist... + for mod in banlist: + if mod not in ('cffi',): + __import__(mod) + + code = """if 1: + from numba import jit, vectorize + from numba.core import types + import sys + print(list(sys.modules)) + """ + + out, _ = run_in_subprocess(code) + modlist = set(eval(out.strip())) + unexpected = set(banlist) & set(modlist) + self.assertFalse(unexpected, "some modules unexpectedly imported") + + def test_no_impl_import(self): + """ + Tests that importing jit does not trigger import of modules containing + lowering implementations that would likely install things in the + builtins registry and have side effects impacting other targets + """ + # None of these modules should be imported through the process of + # doing 'import numba' or 'from numba import njit' + banlist = ['numba.cpython.slicing', + 'numba.cpython.tupleobj', + 'numba.cpython.enumimpl', + 'numba.cpython.hashing', + 'numba.cpython.heapq', + 'numba.cpython.iterators', + 'numba.cpython.numbers', + 'numba.cpython.rangeobj', + 'numba.cpython.cmathimpl', + 'numba.cpython.mathimpl', + 'numba.cpython.printimpl', + 'numba.cpython.randomimpl', + 'numba.core.optional', + 'numba.misc.gdb_hook', + 'numba.misc.literal', + 'numba.misc.cffiimpl', + 'numba.np.linalg', + 'numba.np.polynomial', + 'numba.np.arraymath', + 'numba.np.npdatetime', + 'numba.np.npyimpl', + 'numba.typed.typeddict', + 'numba.typed.typedlist', + 'numba.experimental.jitclass.base',] + + code1 = """if 1: + import sys + import numba + print(list(sys.modules)) + """ + + code2 = """if 1: + import sys + from numba import njit + @njit + def foo(): + pass + print(list(sys.modules)) + """ + + for code in (code1, code2): + out, _ = run_in_subprocess(code) + modlist = set(eval(out.strip())) + unexpected = set(banlist) & set(modlist) + self.assertFalse(unexpected, "some modules unexpectedly imported") + + def test_no_accidental_warnings(self): + # checks that importing Numba isn't accidentally triggering warnings due + # to e.g. deprecated use of import locations from Python's stdlib + code = "import numba" + # See: https://github.com/numba/numba/issues/6831 + # bug in setuptools/packaging causing a deprecation warning + flags = ["-Werror", "-Wignore::DeprecationWarning:packaging.version:"] + run_in_subprocess(code, flags) + + def test_import_star(self): + # checks that "from numba import *" works. + code = "from numba import *" + run_in_subprocess(code) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_indexing.py b/venv/lib/python3.10/site-packages/numba/tests/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..500ea46f761ec6d43e88431ff964ede0347000c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_indexing.py @@ -0,0 +1,1119 @@ +import decimal +import itertools + +import numpy as np + +import unittest +from numba import jit, njit, typeof +from numba.core import utils, types, errors +from numba.tests.support import TestCase, tag +from numba.core.typing import arraydecl +from numba.core.types import intp, ellipsis, slice2_type, slice3_type + + +enable_pyobj_flags = {'forceobj': True} + +Noflags = {'nopython': True} + + +def slicing_1d_usecase(a, start, stop, step): + return a[start:stop:step] + +def slicing_1d_usecase2(a, start, stop, step): + b = a[start:stop:step] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_1d_usecase3(a, start, stop): + b = a[start:stop] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_1d_usecase4(a): + b = a[:] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_1d_usecase5(a, start): + b = a[start:] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_1d_usecase6(a, stop): + b = a[:stop] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_1d_usecase7(a, start): + # Omitted stop with negative step (issue #1690) + b = a[start::-2] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_1d_usecase8(a, start): + # Omitted start with negative step + b = a[::-2] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + + +def slicing_2d_usecase(a, start1, stop1, step1, start2, stop2, step2): + # The index is a homogeneous tuple of slices + return a[start1:stop1:step1, start2:stop2:step2] + +def slicing_2d_usecase3(a, start1, stop1, step1, index): + # The index is a heterogeneous tuple + return a[start1:stop1:step1, index] + +def slicing_3d_usecase(a, index0, start1, index2): + b = a[index0, start1:, index2] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def slicing_3d_usecase2(a, index0, stop1, index2): + b = a[index0, :stop1, index2] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def partial_1d_usecase(a, index): + b = a[index] + total = 0 + for i in range(b.shape[0]): + total += b[i] * (i + 1) + return total + +def integer_indexing_1d_usecase(a, i): + return a[i] + +def integer_indexing_2d_usecase(a, i1, i2): + return a[i1,i2] + +def integer_indexing_2d_usecase2(a, i1, i2): + return a[i1][i2] + +def ellipsis_usecase1(a, i, j): + return a[i:j, ...] + +def ellipsis_usecase2(a, i, j): + return a[..., i:j] + +def ellipsis_usecase3(a, i, j): + return a[i, ..., j] + +def none_index_usecase(a): + return a[None] + +def empty_tuple_usecase(a): + return a[()] + + +@njit +def setitem_usecase(a, index, value): + a[index] = value + + +@njit +def setitem_broadcast_usecase(a, value): + a[:] = value + + +def slicing_1d_usecase_set(a, b, start, stop, step): + a[start:stop:step] = b + return a + +def slicing_1d_usecase_add(a, b, start, stop): + # NOTE: uses the ROT_FOUR opcode on Python 2, only on the [start:stop] + # with inplace operator form. + a[start:stop] += b + return a + +def slicing_2d_usecase_set(a, b, start, stop, step, start2, stop2, step2): + a[start:stop:step,start2:stop2:step2] = b + return a + + +class TestGetItem(TestCase): + """ + Test basic indexed load from an array (returning a view or a scalar). + Note fancy indexing is tested in test_fancy_indexing. + """ + + def test_1d_slicing(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4') + for indices in [(0, 10, 1), + (2, 3, 1), + (10, 0, 1), + (0, 10, -1), + (0, 10, 2), + (9, 0, -1), + (-5, -2, 1), + (0, -1, 1), + ]: + expected = pyfunc(a, *indices) + self.assertPreciseEqual(cfunc(a, *indices), expected) + + def test_1d_slicing_npm(self): + self.test_1d_slicing(flags=Noflags) + + def test_1d_slicing2(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase2 + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4') + + args = [(0, 10, 1), + (2, 3, 1), + (10, 0, 1), + (0, 10, -1), + (0, 10, 2)] + + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + + # Any + arraytype = types.Array(types.int32, 1, 'A') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(20, dtype='i4')[::2] + self.assertFalse(a.flags['C_CONTIGUOUS']) + self.assertFalse(a.flags['F_CONTIGUOUS']) + + args = [(0, 10, 1), + (2, 3, 1), + (10, 0, 1), + (0, 10, -1), + (0, 10, 2)] + + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + def test_1d_slicing2_npm(self): + self.test_1d_slicing2(flags=Noflags) + + def test_1d_slicing3(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase3 + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4') + + args = [(3, 10), + (2, 3), + (10, 0), + (0, 10), + (5, 10)] + + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + + # Any + arraytype = types.Array(types.int32, 1, 'A') + argtys = (arraytype, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(20, dtype='i4')[::2] + self.assertFalse(a.flags['C_CONTIGUOUS']) + self.assertFalse(a.flags['F_CONTIGUOUS']) + + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + def test_1d_slicing3_npm(self): + self.test_1d_slicing3(flags=Noflags) + + def test_1d_slicing4(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase4 + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype,) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4') + self.assertEqual(pyfunc(a), cfunc(a)) + + # Any + arraytype = types.Array(types.int32, 1, 'A') + argtys = (arraytype,) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(20, dtype='i4')[::2] + self.assertFalse(a.flags['C_CONTIGUOUS']) + self.assertFalse(a.flags['F_CONTIGUOUS']) + self.assertEqual(pyfunc(a), cfunc(a)) + + def test_1d_slicing4_npm(self): + self.test_1d_slicing4(flags=Noflags) + + def check_1d_slicing_with_arg(self, pyfunc, flags): + args = list(range(-9, 10)) + + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4') + for arg in args: + self.assertEqual(pyfunc(a, arg), cfunc(a, arg)) + + # Any + arraytype = types.Array(types.int32, 1, 'A') + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(20, dtype='i4')[::2] + self.assertFalse(a.flags['C_CONTIGUOUS']) + self.assertFalse(a.flags['F_CONTIGUOUS']) + for arg in args: + self.assertEqual(pyfunc(a, arg), cfunc(a, arg)) + + def test_1d_slicing5(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase5 + self.check_1d_slicing_with_arg(pyfunc, flags) + + def test_1d_slicing5_npm(self): + self.test_1d_slicing5(flags=Noflags) + + def test_1d_slicing6(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase6 + self.check_1d_slicing_with_arg(pyfunc, flags) + + def test_1d_slicing6_npm(self): + self.test_1d_slicing6(flags=Noflags) + + def test_1d_slicing7(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase7 + self.check_1d_slicing_with_arg(pyfunc, flags) + + def test_1d_slicing7_npm(self): + self.test_1d_slicing7(flags=Noflags) + + def test_1d_slicing8(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase8 + self.check_1d_slicing_with_arg(pyfunc, flags) + + def test_1d_slicing8_npm(self): + self.test_1d_slicing8(flags=Noflags) + + def test_2d_slicing(self, flags=enable_pyobj_flags): + """ + arr_2d[a:b:c] + """ + pyfunc = slicing_1d_usecase + arraytype = types.Array(types.int32, 2, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(100, dtype='i4').reshape(10, 10) + for args in [(0, 10, 1), (2, 3, 1), (10, 0, 1), + (0, 10, -1), (0, 10, 2)]: + self.assertPreciseEqual(pyfunc(a, *args), cfunc(a, *args), + msg="for args %s" % (args,)) + + def test_2d_slicing_npm(self): + self.test_2d_slicing(flags=Noflags) + + def test_2d_slicing2(self, flags=enable_pyobj_flags): + """ + arr_2d[a:b:c, d:e:f] + """ + # C layout + pyfunc = slicing_2d_usecase + arraytype = types.Array(types.int32, 2, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32, + types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(100, dtype='i4').reshape(10, 10) + + indices = [(0, 10, 1), + (2, 3, 1), + (10, 0, 1), + (0, 10, -1), + (0, 10, 2), + (10, 0, -1), + (9, 0, -2), + (-5, -2, 1), + (0, -1, 1), + ] + args = [tup1 + tup2 + for (tup1, tup2) in itertools.product(indices, indices)] + for arg in args: + expected = pyfunc(a, *arg) + self.assertPreciseEqual(cfunc(a, *arg), expected) + + # Any layout + arraytype = types.Array(types.int32, 2, 'A') + argtys = (arraytype, types.int32, types.int32, types.int32, + types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(400, dtype='i4').reshape(20, 20)[::2, ::2] + + for arg in args: + expected = pyfunc(a, *arg) + self.assertPreciseEqual(cfunc(a, *arg), expected) + + def test_2d_slicing2_npm(self): + self.test_2d_slicing2(flags=Noflags) + + def test_2d_slicing3(self, flags=enable_pyobj_flags): + """ + arr_2d[a:b:c, d] + """ + # C layout + pyfunc = slicing_2d_usecase3 + arraytype = types.Array(types.int32, 2, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32, + types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(100, dtype='i4').reshape(10, 10) + + args = [ + (0, 10, 1, 0), + (2, 3, 1, 1), + (10, 0, -1, 8), + (9, 0, -2, 4), + (0, 10, 2, 3), + (0, -1, 3, 1), + ] + for arg in args: + expected = pyfunc(a, *arg) + self.assertPreciseEqual(cfunc(a, *arg), expected) + + # Any layout + arraytype = types.Array(types.int32, 2, 'A') + argtys = (arraytype, types.int32, types.int32, types.int32, + types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(400, dtype='i4').reshape(20, 20)[::2, ::2] + + for arg in args: + expected = pyfunc(a, *arg) + self.assertPreciseEqual(cfunc(a, *arg), expected) + + def test_2d_slicing3_npm(self): + self.test_2d_slicing3(flags=Noflags) + + def test_3d_slicing(self, flags=enable_pyobj_flags): + # C layout + pyfunc = slicing_3d_usecase + arraytype = types.Array(types.int32, 3, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(1000, dtype='i4').reshape(10, 10, 10) + + args = [ + (0, 9, 1), + (2, 3, 1), + (9, 0, 1), + (0, 9, -1), + (0, 9, 2), + ] + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + # Any layout + arraytype = types.Array(types.int32, 3, 'A') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(2000, dtype='i4')[::2].reshape(10, 10, 10) + + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + def test_3d_slicing_npm(self): + self.test_3d_slicing(flags=Noflags) + + def test_3d_slicing2(self, flags=enable_pyobj_flags): + # C layout + pyfunc = slicing_3d_usecase2 + arraytype = types.Array(types.int32, 3, 'C') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(1000, dtype='i4').reshape(10, 10, 10) + + args = [ + (0, 9, 1), + (2, 3, 1), + (9, 0, 1), + (0, 9, -1), + (0, 9, 2), + ] + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + # Any layout + arraytype = types.Array(types.int32, 3, 'A') + argtys = (arraytype, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(2000, dtype='i4')[::2].reshape(10, 10, 10) + + for arg in args: + self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg)) + + def test_3d_slicing2_npm(self): + self.test_3d_slicing2(flags=Noflags) + + def test_1d_integer_indexing(self, flags=enable_pyobj_flags): + # C layout + pyfunc = integer_indexing_1d_usecase + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4') + self.assertEqual(pyfunc(a, 0), cfunc(a, 0)) + self.assertEqual(pyfunc(a, 9), cfunc(a, 9)) + self.assertEqual(pyfunc(a, -1), cfunc(a, -1)) + + # Any layout + arraytype = types.Array(types.int32, 1, 'A') + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(10, dtype='i4')[::2] + self.assertFalse(a.flags['C_CONTIGUOUS']) + self.assertFalse(a.flags['F_CONTIGUOUS']) + self.assertEqual(pyfunc(a, 0), cfunc(a, 0)) + self.assertEqual(pyfunc(a, 2), cfunc(a, 2)) + self.assertEqual(pyfunc(a, -1), cfunc(a, -1)) + + # Using a 0-d array as integer index + arraytype = types.Array(types.int32, 1, 'C') + indextype = types.Array(types.int16, 0, 'C') + argtys = (arraytype, indextype) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(3, 13, dtype=np.int32) + for i in (0, 9, -2): + idx = np.array(i).astype(np.int16) + assert idx.ndim == 0 + self.assertEqual(pyfunc(a, idx), cfunc(a, idx)) + + def test_1d_integer_indexing_npm(self): + self.test_1d_integer_indexing(flags=Noflags) + + def test_integer_indexing_1d_for_2d(self, flags=enable_pyobj_flags): + # Test partial (1d) indexing of a 2d array + pyfunc = integer_indexing_1d_usecase + arraytype = types.Array(types.int32, 2, 'C') + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(100, dtype='i4').reshape(10, 10) + self.assertPreciseEqual(pyfunc(a, 0), cfunc(a, 0)) + self.assertPreciseEqual(pyfunc(a, 9), cfunc(a, 9)) + self.assertPreciseEqual(pyfunc(a, -1), cfunc(a, -1)) + + arraytype = types.Array(types.int32, 2, 'A') + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(20, dtype='i4').reshape(5, 4)[::2] + self.assertPreciseEqual(pyfunc(a, 0), cfunc(a, 0)) + + def test_integer_indexing_1d_for_2d_npm(self): + self.test_integer_indexing_1d_for_2d(flags=Noflags) + + def test_2d_integer_indexing(self, flags=enable_pyobj_flags, + pyfunc=integer_indexing_2d_usecase): + # C layout + a = np.arange(100, dtype='i4').reshape(10, 10) + arraytype = types.Array(types.int32, 2, 'C') + argtys = (arraytype, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + self.assertEqual(pyfunc(a, 0, 3), cfunc(a, 0, 3)) + self.assertEqual(pyfunc(a, 9, 9), cfunc(a, 9, 9)) + self.assertEqual(pyfunc(a, -2, -1), cfunc(a, -2, -1)) + + # Any layout + a = np.arange(100, dtype='i4').reshape(10, 10)[::2, ::2] + self.assertFalse(a.flags['C_CONTIGUOUS']) + self.assertFalse(a.flags['F_CONTIGUOUS']) + + arraytype = types.Array(types.int32, 2, 'A') + argtys = (arraytype, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + self.assertEqual(pyfunc(a, 0, 1), cfunc(a, 0, 1)) + self.assertEqual(pyfunc(a, 2, 2), cfunc(a, 2, 2)) + self.assertEqual(pyfunc(a, -2, -1), cfunc(a, -2, -1)) + + # With 0-d arrays as integer indices + a = np.arange(100, dtype='i4').reshape(10, 10) + arraytype = types.Array(types.int32, 2, 'C') + indextype = types.Array(types.int32, 0, 'C') + argtys = (arraytype, indextype, indextype) + cfunc = jit(argtys, **flags)(pyfunc) + + for i, j in [(0, 3), (8, 9), (-2, -1)]: + i = np.array(i).astype(np.int32) + j = np.array(j).astype(np.int32) + self.assertEqual(pyfunc(a, i, j), cfunc(a, i, j)) + + def test_2d_integer_indexing_npm(self): + self.test_2d_integer_indexing(flags=Noflags) + + def test_2d_integer_indexing2(self): + self.test_2d_integer_indexing(pyfunc=integer_indexing_2d_usecase2) + self.test_2d_integer_indexing(flags=Noflags, + pyfunc=integer_indexing_2d_usecase2) + + def test_2d_integer_indexing_via_call(self): + @njit + def index1(X, i0): + return X[i0] + @njit + def index2(X, i0, i1): + return index1(X[i0], i1) + a = np.arange(10).reshape(2, 5) + self.assertEqual(index2(a, 0, 0), a[0][0]) + self.assertEqual(index2(a, 1, 1), a[1][1]) + self.assertEqual(index2(a, -1, -1), a[-1][-1]) + + def test_2d_float_indexing(self, flags=enable_pyobj_flags): + a = np.arange(100, dtype='i4').reshape(10, 10) + pyfunc = integer_indexing_2d_usecase + arraytype = types.Array(types.int32, 2, 'C') + argtys = (arraytype, types.float32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + self.assertEqual(pyfunc(a, 0, 0), cfunc(a, 0, 0)) + self.assertEqual(pyfunc(a, 9, 9), cfunc(a, 9, 9)) + self.assertEqual(pyfunc(a, -1, -1), cfunc(a, -1, -1)) + + def test_partial_1d_indexing(self, flags=enable_pyobj_flags): + pyfunc = partial_1d_usecase + + def check(arr, arraytype): + argtys = (arraytype, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + self.assertEqual(pyfunc(arr, 0), cfunc(arr, 0)) + n = arr.shape[0] - 1 + self.assertEqual(pyfunc(arr, n), cfunc(arr, n)) + self.assertEqual(pyfunc(arr, -1), cfunc(arr, -1)) + + a = np.arange(12, dtype='i4').reshape((4, 3)) + arraytype = types.Array(types.int32, 2, 'C') + check(a, arraytype) + + a = np.arange(12, dtype='i4').reshape((3, 4)).T + arraytype = types.Array(types.int32, 2, 'F') + check(a, arraytype) + + a = np.arange(12, dtype='i4').reshape((3, 4))[::2] + arraytype = types.Array(types.int32, 2, 'A') + check(a, arraytype) + + def check_ellipsis(self, pyfunc, flags): + def compile_func(arr): + argtys = (typeof(arr), types.intp, types.intp) + return jit(argtys, **flags)(pyfunc) + + def run(a): + bounds = (0, 1, 2, -1, -2) + cfunc = compile_func(a) + for i, j in itertools.product(bounds, bounds): + x = cfunc(a, i, j) + np.testing.assert_equal(pyfunc(a, i, j), cfunc(a, i, j)) + + run(np.arange(16, dtype='i4').reshape(4, 4)) + run(np.arange(27, dtype='i4').reshape(3, 3, 3)) + + def test_ellipsis1(self, flags=enable_pyobj_flags): + self.check_ellipsis(ellipsis_usecase1, flags) + + def test_ellipsis1_npm(self): + self.test_ellipsis1(flags=Noflags) + + def test_ellipsis2(self, flags=enable_pyobj_flags): + self.check_ellipsis(ellipsis_usecase2, flags) + + def test_ellipsis2_npm(self): + self.test_ellipsis2(flags=Noflags) + + def test_ellipsis3(self, flags=enable_pyobj_flags): + self.check_ellipsis(ellipsis_usecase3, flags) + + def test_ellipsis3_npm(self): + self.test_ellipsis3(flags=Noflags) + + def test_ellipsis_issue1498(self): + # This is an issue due to incorrect layout inferred for when + # ellpsis is used and ndenumerate is specializing on the layout. + @njit + def udt(arr): + out = np.zeros_like(arr) + i = 0 + for index, val in np.ndenumerate(arr[..., i]): + out[index][i] = val + + return out + + py_func = udt.py_func + + outersize = 4 + innersize = 4 + arr = np.arange(outersize * innersize).reshape(outersize, innersize) + got = udt(arr) + expected = py_func(arr) + np.testing.assert_equal(got, expected) + + def test_ellipsis_issue1499(self): + # This tests an issue when ndarray.__getitem__ recv a tuple of + # constants. The lowering is mishandling the constant value creation. + @njit + def udt(arr): + return arr[..., 0] + + arr = np.arange(3) + got = udt(arr) + expected = udt.py_func(arr) + np.testing.assert_equal(got, expected) + + def test_none_index(self, flags=enable_pyobj_flags): + pyfunc = none_index_usecase + arraytype = types.Array(types.int32, 2, 'C') + # TODO should be enable to handle this in NoPython mode + argtys = (arraytype,) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(100, dtype='i4').reshape(10, 10) + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + + def test_none_index_npm(self): + with self.assertTypingError(): + self.test_none_index(flags=Noflags) + + def test_empty_tuple_indexing(self, flags=enable_pyobj_flags): + pyfunc = empty_tuple_usecase + arraytype = types.Array(types.int32, 0, 'C') + argtys = (arraytype,) + cfunc = jit(argtys, **flags)(pyfunc) + + a = np.arange(1, dtype='i4').reshape(()) + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + + def test_empty_tuple_indexing_npm(self): + self.test_empty_tuple_indexing(flags=Noflags) + + +class TestSetItem(TestCase): + """ + Test basic indexed store into an array. + Note fancy indexing is tested in test_fancy_indexing. + """ + + def test_conversion_setitem(self, flags=enable_pyobj_flags): + """ this used to work, and was used in one of the tutorials """ + from numba import jit + + def pyfunc(array): + for index in range(len(array)): + array[index] = index % decimal.Decimal(100) + + cfunc = jit("void(i8[:])", **flags)(pyfunc) + + udt = np.arange(100, dtype='i1') + control = udt.copy() + pyfunc(control) + cfunc(udt) + self.assertPreciseEqual(udt, control) + + def test_1d_slicing_set(self, flags=enable_pyobj_flags): + """ + 1d to 1d slice assignment + """ + pyfunc = slicing_1d_usecase_set + # Note heterogeneous types for the source and destination arrays + # (int16[:] -> int32[:]) + dest_type = types.Array(types.int32, 1, 'C') + src_type = types.Array(types.int16, 1, 'A') + argtys = (dest_type, src_type, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + N = 10 + arg = np.arange(N, dtype='i2') + 40 + bounds = [0, 2, N - 2, N, N + 1, N + 3, + -2, -N + 2, -N, -N - 1, -N - 3] + def make_dest(): + return np.zeros_like(arg, dtype='i4') + for start, stop in itertools.product(bounds, bounds): + for step in (1, 2, -1, -2): + args = start, stop, step + index = slice(*args) + pyleft = pyfunc(make_dest(), arg[index], *args) + cleft = cfunc(make_dest(), arg[index], *args) + self.assertPreciseEqual(pyleft, cleft) + + # Mismatching input size and slice length + with self.assertRaises(ValueError): + cfunc(np.zeros_like(arg, dtype=np.int32), arg, 0, 0, 1) + + def check_1d_slicing_set_sequence(self, flags, seqty, seq): + """ + Generic sequence to 1d slice assignment + """ + pyfunc = slicing_1d_usecase_set + dest_type = types.Array(types.int32, 1, 'C') + argtys = (dest_type, seqty, types.int32, types.int32, types.int32) + # This emulates the use of `compile_result`. The args that are passed + # into this checking function are not as advertised in argtys and + # implicit casting is required. + cfunc = jit(argtys, **flags)(pyfunc).overloads[argtys].entry_point + + N = 10 + k = len(seq) + arg = np.arange(N, dtype=np.int32) + args = (seq, 1, -N + k + 1, 1) + expected = pyfunc(arg.copy(), *args) + got = cfunc(arg.copy(), *args) + self.assertPreciseEqual(expected, got) + + args = (seq, 1, -N + k, 1) + with self.assertRaises(ValueError) as raises: + cfunc(arg.copy(), *args) + + if flags.get('nopython', False): + # if in nopython mode, check the error message from Numba + slice_size = len(arg[slice(1, -N + k, 1)]) + msg = (f"cannot assign slice of shape ({k},) from input of shape " + f"({slice_size},)") + self.assertIn(msg, str(raises.exception)) + + def test_1d_slicing_set_tuple(self, flags=enable_pyobj_flags): + """ + Tuple to 1d slice assignment + """ + self.check_1d_slicing_set_sequence( + flags, types.UniTuple(types.int16, 2), (8, -42)) + + def test_1d_slicing_set_list(self, flags=enable_pyobj_flags): + """ + List to 1d slice assignment + """ + self.check_1d_slicing_set_sequence( + flags, types.List(types.int16), [8, -42]) + + def test_1d_slicing_broadcast(self, flags=enable_pyobj_flags): + """ + scalar to 1d slice assignment + """ + pyfunc = slicing_1d_usecase_set + arraytype = types.Array(types.int32, 1, 'C') + # Note heterogeneous types for the source scalar and the destination + # array (int16 -> int32[:]) + argtys = (arraytype, types.int16, types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + N = 10 + arg = np.arange(N, dtype='i4') + val = 42 + bounds = [0, 2, N - 2, N, N + 1, N + 3, + -2, -N + 2, -N, -N - 1, -N - 3] + for start, stop in itertools.product(bounds, bounds): + for step in (1, 2, -1, -2): + args = val, start, stop, step + pyleft = pyfunc(arg.copy(), *args) + cleft = cfunc(arg.copy(), *args) + self.assertPreciseEqual(pyleft, cleft) + + def test_1d_slicing_add(self, flags=enable_pyobj_flags): + pyfunc = slicing_1d_usecase_add + arraytype = types.Array(types.int32, 1, 'C') + argtys = (arraytype, arraytype, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + arg = np.arange(10, dtype='i4') + for test in ((0, 10), (2, 5)): + pyleft = pyfunc(np.zeros_like(arg), arg[slice(*test)], *test) + cleft = cfunc(np.zeros_like(arg), arg[slice(*test)], *test) + self.assertPreciseEqual(pyleft, cleft) + + def test_1d_slicing_set_npm(self): + self.test_1d_slicing_set(flags=Noflags) + + def test_1d_slicing_set_list_npm(self): + self.test_1d_slicing_set_list(flags=Noflags) + + def test_1d_slicing_set_tuple_npm(self): + self.test_1d_slicing_set_tuple(flags=Noflags) + + def test_1d_slicing_broadcast_npm(self): + self.test_1d_slicing_broadcast(flags=Noflags) + + def test_1d_slicing_add_npm(self): + self.test_1d_slicing_add(flags=Noflags) + + def test_2d_slicing_set(self, flags=enable_pyobj_flags): + """ + 2d to 2d slice assignment + """ + pyfunc = slicing_2d_usecase_set + arraytype = types.Array(types.int32, 2, 'A') + argtys = (arraytype, arraytype, types.int32, types.int32, types.int32, + types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + arg = np.arange(10*10, dtype='i4').reshape(10,10) + tests = [ + (0, 10, 1, 0, 10, 1), + (2, 3, 1, 2, 3, 1), + (10, 0, 1, 10, 0, 1), + (0, 10, -1, 0, 10, -1), + (0, 10, 2, 0, 10, 2), + ] + for test in tests: + pyleft = pyfunc(np.zeros_like(arg), arg[slice(*test[0:3]), slice(*test[3:6])], *test) + cleft = cfunc(np.zeros_like(arg), arg[slice(*test[0:3]), slice(*test[3:6])], *test) + self.assertPreciseEqual(cleft, pyleft) + + def test_2d_slicing_broadcast(self, flags=enable_pyobj_flags): + """ + scalar to 2d slice assignment + """ + pyfunc = slicing_2d_usecase_set + arraytype = types.Array(types.int32, 2, 'C') + # Note heterogeneous types for the source scalar and the destination + # array (int16 -> int32[:]) + argtys = (arraytype, types.int16, types.int32, types.int32, types.int32, + types.int32, types.int32, types.int32) + cfunc = jit(argtys, **flags)(pyfunc) + + arg = np.arange(10*10, dtype='i4').reshape(10,10) + val = 42 + tests = [ + (0, 10, 1, 0, 10, 1), + (2, 3, 1, 2, 3, 1), + (10, 0, 1, 10, 0, 1), + (0, 10, -1, 0, 10, -1), + (0, 10, 2, 0, 10, 2), + ] + for test in tests: + pyleft = pyfunc(arg.copy(), val, *test) + cleft = cfunc(arg.copy(), val, *test) + self.assertPreciseEqual(cleft, pyleft) + + def test_2d_slicing_set_npm(self): + self.test_2d_slicing_set(flags=Noflags) + + def test_2d_slicing_broadcast_npm(self): + self.test_2d_slicing_broadcast(flags=Noflags) + + def test_setitem(self): + """ + scalar indexed assignment + """ + arr = np.arange(5) + setitem_usecase(arr, 1, 42) + self.assertEqual(arr.tolist(), [0, 42, 2, 3, 4]) + # Using a 0-d array as scalar index + setitem_usecase(arr, np.array(3).astype(np.uint16), 8) + self.assertEqual(arr.tolist(), [0, 42, 2, 8, 4]) + # Scalar Broadcasting + arr = np.arange(9).reshape(3, 3) + setitem_usecase(arr, 1, 42) + self.assertEqual(arr.tolist(), [[0, 1, 2], [42, 42, 42], [6, 7, 8]]) + + def test_setitem_broadcast(self): + """ + broadcasted array assignment + """ + # Scalar Broadcasting + dst = np.arange(5) + setitem_broadcast_usecase(dst, 42) + self.assertEqual(dst.tolist(), [42] * 5) + # 1D -> 2D Array Broadcasting + dst = np.arange(6).reshape(2, 3) + setitem_broadcast_usecase(dst, np.arange(1, 4)) + self.assertEqual(dst.tolist(), [[1, 2, 3], [1, 2, 3]]) + # 2D -> 2D Array Broadcasting + dst = np.arange(6).reshape(2, 3) + setitem_broadcast_usecase(dst, np.arange(1, 4).reshape(1, 3)) + self.assertEqual(dst.tolist(), [[1, 2, 3], [1, 2, 3]]) + # 2D -> 4D Array Broadcasting + dst = np.arange(12).reshape(2, 1, 2, 3) + setitem_broadcast_usecase(dst, np.arange(1, 4).reshape(1, 3)) + inner2 = [[1, 2, 3], [1, 2, 3]] + self.assertEqual(dst.tolist(), [[inner2]] * 2) + # 2D -> 1D Array Broadcasting + dst = np.arange(5) + setitem_broadcast_usecase(dst, np.arange(1, 6).reshape(1, 5)) + self.assertEqual(dst.tolist(), [1, 2, 3, 4, 5]) + # 4D -> 2D Array Broadcasting + dst = np.arange(6).reshape(2, 3) + setitem_broadcast_usecase(dst, np.arange(1, 1 + dst.size).reshape(1, 1, 2, 3)) + self.assertEqual(dst.tolist(), [[1, 2, 3], [4, 5, 6]]) + + def test_setitem_broadcast_error(self): + # higher dim assigned into lower dim + # 2D -> 1D + dst = np.arange(5) + src = np.arange(10).reshape(2, 5) + with self.assertRaises(ValueError) as raises: + setitem_broadcast_usecase(dst, src) + errmsg = str(raises.exception) + self.assertEqual('cannot broadcast source array for assignment', + errmsg) + # 3D -> 2D + dst = np.arange(5).reshape(1, 5) + src = np.arange(10).reshape(1, 2, 5) + with self.assertRaises(ValueError) as raises: + setitem_broadcast_usecase(dst, src) + errmsg = str(raises.exception) + self.assertEqual(('cannot assign slice of shape (2, 5) from input of' + + ' shape (1, 5)'), + errmsg) + # lower to higher + # 1D -> 2D + dst = np.arange(10).reshape(2, 5) + src = np.arange(4) + with self.assertRaises(ValueError) as raises: + setitem_broadcast_usecase(dst, src) + errmsg = str(raises.exception) + self.assertEqual(('cannot assign slice of shape (2, 4) from input of' + + ' shape (2, 5)'), + errmsg) + + def test_slicing_1d_broadcast(self): + # 1D -> 2D sliced (1) + dst = np.arange(6).reshape(3, 2) + src = np.arange(1, 3) + slicing_1d_usecase_set(dst, src, 0, 2, 1) + self.assertEqual(dst.tolist(), [[1, 2], [1, 2], [4, 5]]) + # 1D -> 2D sliced (2) + dst = np.arange(6).reshape(3, 2) + src = np.arange(1, 3) + slicing_1d_usecase_set(dst, src, 0, None, 2) + self.assertEqual(dst.tolist(), [[1, 2], [2, 3], [1, 2]]) + # 2D -> 2D sliced (3) + dst = np.arange(6).reshape(3, 2) + src = np.arange(1, 5).reshape(2, 2) + slicing_1d_usecase_set(dst, src, None, 2, 1) + self.assertEqual(dst.tolist(), [[1, 2], [3, 4], [4, 5]]) + + def test_setitem_readonly(self): + arr = np.arange(5) + arr.flags.writeable = False + with self.assertRaises((TypeError, errors.TypingError)) as raises: + setitem_usecase(arr, 1, 42) + self.assertIn("Cannot modify readonly array of type:", + str(raises.exception)) + + +class TestTyping(TestCase): + """ + Check typing of basic indexing operations + """ + + def test_layout(self): + """ + Check an appropriate layout is inferred for the result of array + indexing. + """ + + func = arraydecl.get_array_index_type + + cty = types.Array(types.float64, 3, 'C') + fty = types.Array(types.float64, 3, 'F') + aty = types.Array(types.float64, 3, 'A') + + indices = [ + # Tuples of (indexing arguments, keeps "C" layout, keeps "F" layout) + ((), True, True), + ((ellipsis,), True, True), + + # Indexing from the left => can sometimes keep "C" layout + ((intp,), True, False), + ((slice2_type,), True, False), + ((intp, slice2_type), True, False), + ((slice2_type, intp), False, False), + ((slice2_type, slice2_type), False, False), + # Strided slices = > "A" layout + ((intp, slice3_type), False, False), + ((slice3_type,), False, False), + + # Indexing from the right => can sometimes keep "F" layout + ((ellipsis, intp,), False, True), + ((ellipsis, slice2_type,), False, True), + ((ellipsis, intp, slice2_type,), False, False), + ((ellipsis, slice2_type, intp,), False, True), + ((ellipsis, slice2_type, slice2_type,), False, False), + # Strided slices = > "A" layout + ((ellipsis, slice3_type,), False, False), + ((ellipsis, slice3_type, intp,), False, False), + + # Indexing from both sides => only if all dimensions are indexed + ((intp, ellipsis, intp,), False, False), + ((slice2_type, ellipsis, slice2_type,), False, False), + ((intp, intp, slice2_type,), True, False), + ((intp, ellipsis, intp, slice2_type,), True, False), + ((slice2_type, intp, intp,), False, True), + ((slice2_type, intp, ellipsis, intp,), False, True), + ((intp, slice2_type, intp,), False, False), + # Strided slices = > "A" layout + ((slice3_type, intp, intp,), False, False), + ((intp, intp, slice3_type,), False, False), + ] + + for index_tuple, keep_c, _ in indices: + index = types.Tuple(index_tuple) + r = func(cty, index) + self.assertEqual(tuple(r.index), index_tuple) + self.assertEqual(r.result.layout, 'C' if keep_c else 'A', + index_tuple) + self.assertFalse(r.advanced) + + for index_tuple, _, keep_f in indices: + index = types.Tuple(index_tuple) + r = func(fty, index) + self.assertEqual(tuple(r.index), index_tuple) + self.assertEqual(r.result.layout, 'F' if keep_f else 'A', + index_tuple) + self.assertFalse(r.advanced) + + for index_tuple, _, _ in indices: + index = types.Tuple(index_tuple) + r = func(aty, index) + self.assertEqual(tuple(r.index), index_tuple) + self.assertEqual(r.result.layout, 'A') + self.assertFalse(r.advanced) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_init_utils.py b/venv/lib/python3.10/site-packages/numba/tests/test_init_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cff483c5eefc3be2b924afeb733450edc2c4a320 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_init_utils.py @@ -0,0 +1,42 @@ +import unittest + +from numba.tests.support import TestCase + +from numba.misc.init_utils import version_info, generate_version_info + + +class TestGenerateVersionInfo(TestCase): + + def test_major_minor_patch(self): + expected = version_info(0, 1, 0, + (0, 1), (0, 1, 0), + "0.1.0", ('0', '1', '0'), None) + received = generate_version_info("0.1.0") + self.assertEqual(received, expected) + + def test_unknown(self): + expected = version_info(None, None, None, + (None, None), (None, None, None), + '0+unknown', ('0+unknown',), None) + received = generate_version_info('0+unknown') + self.assertEqual(received, expected) + + def test_dev(self): + expected = version_info(0, 1, None, + (0, 1), (0, 1, None), + '0.1.0dev0', ('0', '1', '0dev0'), None) + received = generate_version_info('0.1.0dev0') + self.assertEqual(received, expected) + + def test_full_rev(self): + expected = version_info(0, 1, None, + (0, 1), (0, 1, None), + '0.1.0dev0+1.g0123456789abcdef', + ('0', '1', '0dev0+1', 'g0123456789abcdef'), + 'g0123456789abcdef') + received = generate_version_info('0.1.0dev0+1.g0123456789abcdef') + self.assertEqual(received, expected) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_inlining.py b/venv/lib/python3.10/site-packages/numba/tests/test_inlining.py new file mode 100644 index 0000000000000000000000000000000000000000..45376a483d515974481212c6454ff5ec1f193136 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_inlining.py @@ -0,0 +1,279 @@ +import re +import numpy as np + +from numba.tests.support import (TestCase, override_config, captured_stdout, + skip_parfors_unsupported) +from numba import jit, njit +from numba.core import types, ir, postproc, compiler +from numba.core.ir_utils import (guard, find_callname, find_const, + get_definition, simplify_CFG) +from numba.core.registry import CPUDispatcher +from numba.core.inline_closurecall import inline_closure_call + +from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode, FixupArgs, + IRProcessing, DeadBranchPrune, + RewriteSemanticConstants, GenericRewrites, + WithLifting, PreserveIR, InlineClosureLikes) + +from numba.core.typed_passes import (NopythonTypeInference, AnnotateTypes, + NopythonRewrites, PreParforPass, ParforPass, + DumpParforDiagnostics, NativeLowering, + NativeParforLowering, IRLegalization, + NoPythonBackend, NativeLowering, + ParforFusionPass, ParforPreLoweringPass) + +from numba.core.compiler_machinery import FunctionPass, PassManager, register_pass +import unittest + +@register_pass(analysis_only=False, mutates_CFG=True) +class InlineTestPass(FunctionPass): + _name = "inline_test_pass" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + # assuming the function has one block with one call inside + assert len(state.func_ir.blocks) == 1 + block = list(state.func_ir.blocks.values())[0] + for i, stmt in enumerate(block.body): + if guard(find_callname,state.func_ir, stmt.value) is not None: + inline_closure_call(state.func_ir, {}, block, i, lambda: None, + state.typingctx, state.targetctx, (), + state.typemap, state.calltypes) + break + # also fix up the IR + post_proc = postproc.PostProcessor(state.func_ir) + post_proc.run() + post_proc.remove_dels() + return True + + +def gen_pipeline(state, test_pass): + name = 'inline_test' + pm = PassManager(name) + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(FixupArgs, "fix up args") + pm.add_pass(IRProcessing, "processing IR") + pm.add_pass(WithLifting, "Handle with contexts") + # pre typing + if not state.flags.no_rewrites: + pm.add_pass(GenericRewrites, "nopython rewrites") + pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants") + pm.add_pass(DeadBranchPrune, "dead branch pruning") + pm.add_pass(InlineClosureLikes, + "inline calls to locally defined closures") + # typing + pm.add_pass(NopythonTypeInference, "nopython frontend") + + if state.flags.auto_parallel.enabled: + pm.add_pass(PreParforPass, "Preprocessing for parfors") + if not state.flags.no_rewrites: + pm.add_pass(NopythonRewrites, "nopython rewrites") + if state.flags.auto_parallel.enabled: + pm.add_pass(ParforPass, "convert to parfors") + pm.add_pass(ParforFusionPass, "fuse parfors") + pm.add_pass(ParforPreLoweringPass, "parfor prelowering") + + pm.add_pass(test_pass, "inline test") + + # legalise + pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering") + pm.add_pass(AnnotateTypes, "annotate types") + pm.add_pass(PreserveIR, "preserve IR") + + # lower + if state.flags.auto_parallel.enabled: + pm.add_pass(NativeParforLowering, "native parfor lowering") + else: + pm.add_pass(NativeLowering, "native lowering") + pm.add_pass(NoPythonBackend, "nopython mode backend") + pm.add_pass(DumpParforDiagnostics, "dump parfor diagnostics") + return pm + +class InlineTestPipeline(compiler.CompilerBase): + """compiler pipeline for testing inlining after optimization + """ + def define_pipelines(self): + pm = gen_pipeline(self.state, InlineTestPass) + pm.finalize() + return [pm] + +class TestInlining(TestCase): + """ + Check that jitted inner functions are inlined into outer functions, + in nopython mode. + Note that not all inner functions are guaranteed to be inlined. + We just trust LLVM's inlining heuristics. + """ + + def make_pattern(self, fullname): + """ + Make regexpr to match mangled name + """ + parts = fullname.split('.') + return r'_ZN?' + r''.join([r'\d+{}'.format(p) for p in parts]) + + def assert_has_pattern(self, fullname, text): + pat = self.make_pattern(fullname) + self.assertIsNotNone(re.search(pat, text), + msg='expected {}'.format(pat)) + + def assert_not_has_pattern(self, fullname, text): + pat = self.make_pattern(fullname) + self.assertIsNone(re.search(pat, text), + msg='unexpected {}'.format(pat)) + + def test_inner_function(self): + from numba.tests.inlining_usecases import outer_simple, \ + __name__ as prefix + + with override_config('DUMP_ASSEMBLY', True): + with captured_stdout() as out: + cfunc = jit((types.int32,), nopython=True)(outer_simple) + self.assertPreciseEqual(cfunc(1), 4) + # Check the inner function was elided from the output (which also + # guarantees it was inlined into the outer function). + asm = out.getvalue() + self.assert_has_pattern('%s.outer_simple' % prefix, asm) + self.assert_not_has_pattern('%s.inner' % prefix, asm) + + def test_multiple_inner_functions(self): + from numba.tests.inlining_usecases import outer_multiple, \ + __name__ as prefix + # Same with multiple inner functions, and multiple calls to + # the same inner function (inner()). This checks that linking in + # the same library/module twice doesn't produce linker errors. + with override_config('DUMP_ASSEMBLY', True): + with captured_stdout() as out: + cfunc = jit((types.int32,), nopython=True)(outer_multiple) + self.assertPreciseEqual(cfunc(1), 6) + asm = out.getvalue() + self.assert_has_pattern('%s.outer_multiple' % prefix, asm) + self.assert_not_has_pattern('%s.more' % prefix, asm) + self.assert_not_has_pattern('%s.inner' % prefix, asm) + + @skip_parfors_unsupported + def test_inline_call_after_parfor(self): + from numba.tests.inlining_usecases import __dummy__ + # replace the call to make sure inlining doesn't cause label conflict + # with parfor body + def test_impl(A): + __dummy__() + return A.sum() + j_func = njit(parallel=True, pipeline_class=InlineTestPipeline)( + test_impl) + A = np.arange(10) + self.assertEqual(test_impl(A), j_func(A)) + + @skip_parfors_unsupported + def test_inline_update_target_def(self): + + def test_impl(a): + if a == 1: + b = 2 + else: + b = 3 + return b + + func_ir = compiler.run_frontend(test_impl) + blocks = list(func_ir.blocks.values()) + for block in blocks: + for i, stmt in enumerate(block.body): + # match b = 2 and replace with lambda: 2 + if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var) + and guard(find_const, func_ir, stmt.value) == 2): + # replace expr with a dummy call + func_ir._definitions[stmt.target.name].remove(stmt.value) + stmt.value = ir.Expr.call(ir.Var(block.scope, "myvar", loc=stmt.loc), (), (), stmt.loc) + func_ir._definitions[stmt.target.name].append(stmt.value) + #func = g.py_func# + inline_closure_call(func_ir, {}, block, i, lambda: 2) + break + + self.assertEqual(len(func_ir._definitions['b']), 2) + + @skip_parfors_unsupported + def test_inline_var_dict_ret(self): + # make sure inline_closure_call returns the variable replacement dict + # and it contains the original variable name used in locals + @njit(locals={'b': types.float64}) + def g(a): + b = a + 1 + return b + + def test_impl(): + return g(1) + + func_ir = compiler.run_frontend(test_impl) + blocks = list(func_ir.blocks.values()) + for block in blocks: + for i, stmt in enumerate(block.body): + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op == 'call'): + func_def = guard(get_definition, func_ir, stmt.value.func) + if (isinstance(func_def, (ir.Global, ir.FreeVar)) + and isinstance(func_def.value, CPUDispatcher)): + py_func = func_def.value.py_func + _, var_map = inline_closure_call( + func_ir, py_func.__globals__, block, i, py_func) + break + + self.assertTrue('b' in var_map) + + @skip_parfors_unsupported + def test_inline_call_branch_pruning(self): + # branch pruning pass should run properly in inlining to enable + # functions with type checks + @njit + def foo(A=None): + if A is None: + return 2 + else: + return A + + def test_impl(A=None): + return foo(A) + + @register_pass(analysis_only=False, mutates_CFG=True) + class PruningInlineTestPass(FunctionPass): + _name = "pruning_inline_test_pass" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + # assuming the function has one block with one call inside + assert len(state.func_ir.blocks) == 1 + block = list(state.func_ir.blocks.values())[0] + for i, stmt in enumerate(block.body): + if (guard(find_callname, state.func_ir, stmt.value) + is not None): + inline_closure_call(state.func_ir, {}, block, i, + foo.py_func, state.typingctx, state.targetctx, + (state.typemap[stmt.value.args[0].name],), + state.typemap, state.calltypes) + break + return True + + class InlineTestPipelinePrune(compiler.CompilerBase): + + def define_pipelines(self): + pm = gen_pipeline(self.state, PruningInlineTestPass) + pm.finalize() + return [pm] + + # make sure inline_closure_call runs in full pipeline + j_func = njit(pipeline_class=InlineTestPipelinePrune)(test_impl) + A = 3 + self.assertEqual(test_impl(A), j_func(A)) + self.assertEqual(test_impl(), j_func()) + + # make sure IR doesn't have branches + fir = j_func.overloads[(types.Omitted(None),)].metadata['preserved_ir'] + fir.blocks = simplify_CFG(fir.blocks) + self.assertEqual(len(fir.blocks), 1) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_interpreter.py b/venv/lib/python3.10/site-packages/numba/tests/test_interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..fef9ddaa91b2e7a40162475e627afd89244b8c0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_interpreter.py @@ -0,0 +1,1154 @@ +""" +Test bytecode fixes provided in interpreter.py +""" +import unittest +from numba import jit, njit, objmode, typeof, literally +from numba.extending import overload +from numba.core import types +from numba.core.errors import UnsupportedBytecodeError +from numba.tests.support import ( + TestCase, + MemoryLeakMixin, + skip_unless_py10_or_later, +) + + +@njit +def sum_jit_func( + arg0=0, + arg1=0, + arg2=0, + arg3=0, + arg4=0, + arg5=0, + arg6=0, + arg7=0, + arg8=0, + arg9=0, + arg10=0, + arg11=0, + arg12=0, + arg13=0, + arg14=0, + arg15=0, + arg16=0, + arg17=0, + arg18=0, + arg19=0, + arg20=0, + arg21=0, + arg22=0, + arg23=0, + arg24=0, + arg25=0, + arg26=0, + arg27=0, + arg28=0, + arg29=0, + arg30=0, + arg31=0, + arg32=0, + arg33=0, + arg34=0, + arg35=0, + arg36=0, + arg37=0, + arg38=0, + arg39=0, + arg40=0, + arg41=0, + arg42=0, + arg43=0, + arg44=0, + arg45=0, + arg46=0, +): + return ( + arg0 + + arg1 + + arg2 + + arg3 + + arg4 + + arg5 + + arg6 + + arg7 + + arg8 + + arg9 + + arg10 + + arg11 + + arg12 + + arg13 + + arg14 + + arg15 + + arg16 + + arg17 + + arg18 + + arg19 + + arg20 + + arg21 + + arg22 + + arg23 + + arg24 + + arg25 + + arg26 + + arg27 + + arg28 + + arg29 + + arg30 + + arg31 + + arg32 + + arg33 + + arg34 + + arg35 + + arg36 + + arg37 + + arg38 + + arg39 + + arg40 + + arg41 + + arg42 + + arg43 + + arg44 + + arg45 + + arg46 + ) + + +class TestCallFunctionExPeepHole(MemoryLeakMixin, TestCase): + """ + gh #7812 + + Tests that check a peephole optimization for Function calls + in Python 3.10. The bytecode changes when + (n_args / 2) + n_kws > 15, which moves the arguments from + the stack into a tuple and dictionary. + + This peephole optimization updates the IR to use the original format. + There are different paths when n_args > 30 and n_args <= 30 and when + n_kws > 15 and n_kws <= 15. + """ + THRESHOLD_ARGS = 31 + THRESHOLD_KWS = 16 + + def gen_func(self, n_args, n_kws): + """ + Generates a function that calls sum_jit_func + with the desired number of args and kws. + """ + param_list = [f"arg{i}" for i in range(n_args + n_kws)] + args_list = [] + for i in range(n_args + n_kws): + # Call a function on every 5th argument to ensure + # we test function calls. + if i % 5 == 0: + arg_val = f"pow(arg{i}, 2)" + else: + arg_val = f"arg{i}" + args_list.append(arg_val) + total_params = ", ".join(param_list) + func_text = f"def impl({total_params}):\n" + func_text += " return sum_jit_func(\n" + for i in range(n_args): + func_text += f" {args_list[i]},\n" + for i in range(n_args, n_args + n_kws): + func_text += f" {param_list[i]}={args_list[i]},\n" + func_text += " )\n" + local_vars = {} + exec(func_text, {"sum_jit_func": sum_jit_func}, local_vars) + return local_vars["impl"] + + @skip_unless_py10_or_later + def test_all_args(self): + """ + Tests calling a function when n_args > 30 and + n_kws = 0. This shouldn't use the peephole, but + it should still succeed. + """ + total_args = [i for i in range(self.THRESHOLD_ARGS)] + f = self.gen_func(self.THRESHOLD_ARGS, 0) + py_func = f + cfunc = njit()(f) + a = py_func(*total_args) + b = cfunc(*total_args) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_all_kws(self): + """ + Tests calling a function when n_kws > 15 and + n_args = 0. + """ + total_args = [i for i in range(self.THRESHOLD_KWS)] + f = self.gen_func(0, self.THRESHOLD_KWS) + py_func = f + cfunc = njit()(f) + a = py_func(*total_args) + b = cfunc(*total_args) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_small_args_small_kws(self): + """ + Tests calling a function when (n_args / 2) + n_kws > 15, + but n_args <= 30 and n_kws <= 15 + """ + used_args = self.THRESHOLD_ARGS - 1 + used_kws = self.THRESHOLD_KWS - 1 + total_args = [i for i in range((used_args) + (used_kws))] + f = self.gen_func(used_args, used_kws) + py_func = f + cfunc = njit()(f) + a = py_func(*total_args) + b = cfunc(*total_args) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_small_args_large_kws(self): + """ + Tests calling a function when (n_args / 2) + n_kws > 15, + but n_args <= 30 and n_kws > 15 + """ + used_args = self.THRESHOLD_ARGS - 1 + used_kws = self.THRESHOLD_KWS + total_args = [i for i in range((used_args) + (used_kws))] + f = self.gen_func(used_args, used_kws) + py_func = f + cfunc = njit()(f) + a = py_func(*total_args) + b = cfunc(*total_args) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_args_small_kws(self): + """ + Tests calling a function when (n_args / 2) + n_kws > 15, + but n_args > 30 and n_kws <= 15 + """ + used_args = self.THRESHOLD_ARGS + used_kws = self.THRESHOLD_KWS - 1 + total_args = [i for i in range((used_args) + (used_kws))] + f = self.gen_func(used_args, used_kws) + py_func = f + cfunc = njit()(f) + a = py_func(*total_args) + b = cfunc(*total_args) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_args_large_kws(self): + """ + Tests calling a function when (n_args / 2) + n_kws > 15, + but n_args > 30 and n_kws > 15 + """ + used_args = self.THRESHOLD_ARGS + used_kws = self.THRESHOLD_KWS + total_args = [i for i in range((used_args) + (used_kws))] + f = self.gen_func(used_args, used_kws) + py_func = f + cfunc = njit()(f) + a = py_func(*total_args) + b = cfunc(*total_args) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_kws_objmode(self): + """ + Tests calling an objectmode function with > 15 return values. + """ + def py_func(): + return ( + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + ) + + @njit + def objmode_func(): + """ + Wrapper to call py_func from objmode. This tests + large kws with objmode. If the definition for the + call is not properly updated this test will fail. + """ + with objmode( + a='int64', + b='int64', + c='int64', + d='int64', + e='int64', + f='int64', + g='int64', + h='int64', + i='int64', + j='int64', + k='int64', + l='int64', + m='int64', + n='int64', + o='int64', + p='int64', + ): + ( + a, + b, + c, + d, + e, + f, + g, + h, + i, + j, + k, + l, + m, + n, + o, + p + ) = py_func() + return ( + a + + b + + c + + d + + e + + f + + g + + h + + i + + j + + k + + l + + m + + n + + o + + p + ) + + a = sum(list(py_func())) + b = objmode_func() + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_args_inline_controlflow(self): + """ + Tests generating large args when one of the inputs + has inlined controlflow. + """ + def inline_func(flag): + return sum_jit_func( + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 if flag else 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + arg41=1, + ) + + with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)(False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', + str(raises.exception) + ) + + @skip_unless_py10_or_later + def test_large_args_noninlined_controlflow(self): + """ + Tests generating large args when one of the inputs + has the change suggested in the error message + for inlined control flow. + """ + def inline_func(flag): + a_val = 1 if flag else 2 + return sum_jit_func( + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + a_val, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + arg41=1, + ) + + py_func = inline_func + cfunc = njit()(inline_func) + a = py_func(False) + b = cfunc(False) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_all_args_inline_controlflow(self): + """ + Tests generating only large args when one of the inputs + has inlined controlflow. This requires a special check + inside peep_hole_call_function_ex_to_call_function_kw + because it usually only handles varkwargs. + """ + def inline_func(flag): + return sum_jit_func( + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1 if flag else 2, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + ) + + with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)(False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', + str(raises.exception) + ) + + @skip_unless_py10_or_later + def test_all_args_noninlined_controlflow(self): + """ + Tests generating large args when one of the inputs + has the change suggested in the error message + for inlined control flow. + """ + def inline_func(flag): + a_val = 1 if flag else 2 + return sum_jit_func( + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + a_val, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + ) + + py_func = inline_func + cfunc = njit()(inline_func) + a = py_func(False) + b = cfunc(False) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_kws_inline_controlflow(self): + """ + Tests generating large kws when one of the inputs + has inlined controlflow. + """ + def inline_func(flag): + return sum_jit_func( + arg0=1, + arg1=1, + arg2=1, + arg3=1, + arg4=1, + arg5=1, + arg6=1, + arg7=1, + arg8=1, + arg9=1, + arg10=1, + arg11=1, + arg12=1, + arg13=1, + arg14=1, + arg15=1 if flag else 2, + ) + + with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)(False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', + str(raises.exception) + ) + + @skip_unless_py10_or_later + def test_large_kws_noninlined_controlflow(self): + """ + Tests generating large kws when one of the inputs + has the change suggested in the error message + for inlined control flow. + """ + def inline_func(flag): + a_val = 1 if flag else 2 + return sum_jit_func( + arg0=1, + arg1=1, + arg2=1, + arg3=1, + arg4=1, + arg5=1, + arg6=1, + arg7=1, + arg8=1, + arg9=1, + arg10=1, + arg11=1, + arg12=1, + arg13=1, + arg14=1, + arg15=a_val, + ) + + py_func = inline_func + cfunc = njit()(inline_func) + a = py_func(False) + b = cfunc(False) + self.assertEqual(a, b) + + +class TestLargeConstDict(TestCase, MemoryLeakMixin): + """ + gh #7894 + + Tests that check a peephole optimization for constant + dictionaries in Python 3.10. The bytecode changes when + number of elements > 15, which splits the constant dictionary + into multiple dictionaries that are joined by a DICT_UPDATE + bytecode instruction. + + This optimization modifies the IR to rejoin dictionaries + and remove the DICT_UPDATE generated code. This then allows + code that depends on literal dictionaries or literal keys + to succeed. + """ + + @skip_unless_py10_or_later + def test_large_heterogeneous_const_dict(self): + """ + Tests that a function with a large heterogeneous constant + dictionary remains a constant. + """ + def const_func(): + # D is a heterogeneous dictionary + # so this code can only compile if + # d is constant. + d = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": 'a', + } + return d["S"] + + py_func = const_func + cfunc = njit()(const_func) + a = py_func() + b = cfunc() + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_heterogeneous_LiteralStrKeyDict_literal_values(self): + """Check the literal values for a LiteralStrKeyDict requiring + optimizations because it is heterogeneous. + """ + + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + a = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": 'a', + } + + def specific_ty(z): + return types.literal(z) if types.maybe_literal(z) else typeof(z) + expected = {types.literal(x): specific_ty(y) for x, y in a.items()} + self.assertTrue(isinstance(d, types.LiteralStrKeyDict)) + self.assertEqual(d.literal_value, expected) + self.assertEqual(hasattr(d, 'initial_value'), False) + return lambda d: d + + @njit + def foo(): + # D is a heterogeneous dictionary + # so this code can only compile if + # d has the correct literal values. + d = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": 'a', + } + bar(d) + + foo() + + @skip_unless_py10_or_later + def test_large_heterogeneous_const_keys_dict(self): + """ + Tests that a function with a large heterogeneous constant + dictionary remains a constant. + """ + def const_keys_func(a): + # D is a heterogeneous dictionary + # so this code can only compile if + # d has constant keys. + d = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": a, + } + return d["S"] + + py_func = const_keys_func + cfunc = njit()(const_keys_func) + # str to make the dictionary heterogeneous. + value = "a_string" + a = py_func(value) + b = cfunc(value) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_dict_mutation_not_carried(self): + """Checks that the optimization for large dictionaries + do not incorrectly update initial values due to other + mutations. + """ + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + a = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": 7, + } + if d.initial_value is None: + return lambda d: literally(d) + self.assertTrue(isinstance(d, types.DictType)) + self.assertEqual(d.initial_value, a) + return lambda d: d + + @njit + def foo(): + # This dictionary is mutated, check the initial_value carries + # correctly and is not mutated + d = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": 7, + } + d['X'] = 4 + bar(d) + + foo() + + @skip_unless_py10_or_later + def test_usercode_update_use_d2(self): + """ + Tests an example using a regular update is + not modified by the optimization. + """ + + def check_before(x): + pass + + def check_after(x): + pass + + checked_before = False + checked_after = False + + @overload(check_before, prefer_literal=True) + def ol_check_before(d): + nonlocal checked_before + # Typing iteration from d1.update(d2) + # may reset the initial values to None, + # so we only check on the first iteration. + if not checked_before: + checked_before = True + a = { + "a": 1, + "b": 2, + "c": 3, + } + self.assertTrue(isinstance(d, types.DictType)) + self.assertEqual(d.initial_value, a) + + return lambda d: None + + @overload(check_after, prefer_literal=True) + def ol_check_after(d): + nonlocal checked_after + # Typing iteration from d1.update(d2) + # may reset the initial values to None, + # so we only check on the first iteration. + if not checked_after: + checked_after = True + self.assertTrue(isinstance(d, types.DictType)) + self.assertTrue(d.initial_value is None) + + return lambda d: None + + def const_dict_func(): + """ + Dictionary update between two constant + dictionaries. This verifies d2 doesn't + get incorrectly removed. + """ + d1 = { + "a": 1, + "b": 2, + "c": 3, + } + d2 = { + "d": 4, + "e": 4 + } + check_before(d1) + d1.update(d2) + check_after(d1) + # Create a use of d2 in a new block. + if len(d1) > 4: + return d2 + return d1 + + py_func = const_dict_func + cfunc = njit()(const_dict_func) + a = py_func() + b = cfunc() + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_large_const_dict_inline_controlflow(self): + """ + Tests generating a large dictionary when one of + the inputs requires inline control flow + has the change suggested in the error message + for inlined control flow. + """ + def inline_func(a, flag): + # D is a heterogeneous dictionary + # so this code can only compile if + # d has constant keys. + d = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": 1 if flag else 2, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": a, + } + return d["S"] + + with self.assertRaises(UnsupportedBytecodeError) as raises: + njit()(inline_func)("a_string", False) + self.assertIn( + 'You can resolve this issue by moving the control flow out', + str(raises.exception) + ) + + @skip_unless_py10_or_later + def test_large_const_dict_noninline_controlflow(self): + """ + Tests generating large constant dict when one of the + inputs has the change suggested in the error message + for inlined control flow. + """ + def non_inline_func(a, flag): + # D is a heterogeneous dictionary + # so this code can only compile if + # d has constant keys. + val = 1 if flag else 2 + d = { + "A": 1, + "B": 1, + "C": 1, + "D": 1, + "E": 1, + "F": 1, + "G": 1, + "H": val, + "I": 1, + "J": 1, + "K": 1, + "L": 1, + "M": 1, + "N": 1, + "O": 1, + "P": 1, + "Q": 1, + "R": 1, + "S": a, + } + return d["S"] + + py_func = non_inline_func + cfunc = njit()(non_inline_func) + value = "a_string" + a = py_func(value, False) + b = cfunc(value, False) + self.assertEqual(a, b) + + @skip_unless_py10_or_later + def test_fuse_twice_literal_values(self): + """ + Tests that the correct literal values are generated + for a dictionary that produces two DICT_UPDATE + bytecode entries for the same dictionary. + """ + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + a = { + "a1" : 1, + "a2" : 2, + "a3" : 3, + "a4" : 4, + "a5" : 5, + "a6" : 6, + "a7" : 7, + "a8" : 8, + "a9" : 9, + "a10" : 10, + "a11" : 11, + "a12" : 12, + "a13" : 13, + "a14" : 14, + "a15" : 15, + "a16" : 16, + "a17" : 17, + "a18" : 18, + "a19" : 19, + "a20" : 20, + "a21" : 21, + "a22" : 22, + "a23" : 23, + "a24" : 24, + "a25" : 25, + "a26" : 26, + "a27" : 27, + "a28" : 28, + "a29" : 29, + "a30" : 30, + "a31" : 31, + "a32" : 32, + "a33" : 33, + "a34" : 34, # 34 items is the limit of + # (LOAD_CONST + MAP_ADD)^n + DICT_UPDATE + "a35" : 35, # 35 Generates an additional BUILD_MAP + DICT_UPDATE + } + if d.initial_value is None: + return lambda d: literally(d) + self.assertTrue(isinstance(d, types.DictType)) + self.assertEqual(d.initial_value, a) + return lambda d: d + + @njit + def foo(): + # This dictionary is mutated, check the initial_value carries + # correctly and is not mutated + d = { + "a1" : 1, + "a2" : 2, + "a3" : 3, + "a4" : 4, + "a5" : 5, + "a6" : 6, + "a7" : 7, + "a8" : 8, + "a9" : 9, + "a10" : 10, + "a11" : 11, + "a12" : 12, + "a13" : 13, + "a14" : 14, + "a15" : 15, + "a16" : 16, + "a17" : 17, + "a18" : 18, + "a19" : 19, + "a20" : 20, + "a21" : 21, + "a22" : 22, + "a23" : 23, + "a24" : 24, + "a25" : 25, + "a26" : 26, + "a27" : 27, + "a28" : 28, + "a29" : 29, + "a30" : 30, + "a31" : 31, + "a32" : 32, + "a33" : 33, + "a34" : 34, # 34 items is the limit of + # (LOAD_CONST + MAP_ADD)^n + DICT_UPDATE + "a35" : 35, # 35 Generates an additional BUILD_MAP + DICT_UPDATE + } + bar(d) + + foo() + + +class TestListExtendInStarArgNonTupleIterable(MemoryLeakMixin, TestCase): + """Test `fn(pos_arg0, pos_arg1, *args)` where args is a non-tuple iterable. + + Python 3.9+ will generate LIST_EXTEND bytecode to combine the positional + arguments with the `*args`. + + See #8059 + + NOTE: At the moment, there are no meaningful tests for NoPython because the + lack of support for `tuple(iterable)` for most iterable types. + """ + def test_list_extend_forceobj(self): + def consumer(*x): + return x + + @jit(forceobj=True) + def foo(x): + return consumer(1, 2, *x) + + got = foo("ijo") + expect = foo.py_func("ijo") + self.assertEqual(got, (1, 2, "i", "j", "o")) + self.assertEqual(got, expect) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_interproc.py b/venv/lib/python3.10/site-packages/numba/tests/test_interproc.py new file mode 100644 index 0000000000000000000000000000000000000000..56265dfbe4c7a7bad85ed863fdef7315121b4a07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_interproc.py @@ -0,0 +1,47 @@ +import gc + +from numba import jit, int32 +import unittest + + +def foo(a, b): + return a + b + + +def bar(a, b): + return cfoo(a, b) + b + +@jit +def inner(x, y): + return x + y + +@jit(nopython=True) +def outer(x, y): + return inner(x, y) + + +class TestInterProc(unittest.TestCase): + + def test_bar_call_foo(self): + global cfoo + cfoo = jit((int32, int32), nopython=True)(foo) + cbar = jit((int32, int32), nopython=True)(bar) + self.assertEqual(cbar(1, 2), 1 + 2 + 2) + + def test_bar_call_foo_compiled_twice(self): + # When a function is compiled twice, then called from another + # compiled function, check that the right target is called. + # (otherwise, LLVM would assert out or crash) + global cfoo + for i in range(2): + cfoo = jit((int32, int32), nopython=True)(foo) + gc.collect() + cbar = jit((int32, int32), nopython=True)(bar) + self.assertEqual(cbar(1, 2), 1 + 2 + 2) + + def test_callsite_compilation(self): + self.assertEqual(outer(1, 2), 1 + 2) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_intwidth.py b/venv/lib/python3.10/site-packages/numba/tests/test_intwidth.py new file mode 100644 index 0000000000000000000000000000000000000000..76c0effebf14364f0001db6736da2b68766f0dfe --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_intwidth.py @@ -0,0 +1,90 @@ +import unittest + +import math +import sys + +from numba import jit +from numba.core import utils +from numba.tests.support import TestCase, tag + + +max_uint64 = 18446744073709551615 + +def usecase_uint64_global(): + return max_uint64 + +def usecase_uint64_constant(): + return 18446744073709551615 + +def usecase_uint64_func(): + return max(18446744073709551614, 18446744073709551615) + +def usecase_int64_pos(): + return 9223372036854775807 + +def usecase_int64_neg(): + return -9223372036854775808 + +def usecase_int64_func(): + return (max(9223372036854775807, -9223372036854775808) + + min(9223372036854775807, -9223372036854775808)) + + +class IntWidthTest(TestCase): + + def check_nullary_func(self, pyfunc, **kwargs): + cfunc = jit(**kwargs)(pyfunc) + self.assertPreciseEqual(cfunc(), pyfunc()) + + def test_global_uint64(self, nopython=False): + pyfunc = usecase_uint64_global + self.check_nullary_func(pyfunc, nopython=nopython) + + def test_global_uint64_npm(self): + self.test_global_uint64(nopython=True) + + def test_constant_uint64(self, nopython=False): + pyfunc = usecase_uint64_constant + self.check_nullary_func(pyfunc, nopython=nopython) + + def test_constant_uint64_npm(self): + self.test_constant_uint64(nopython=True) + + def test_constant_uint64_function_call(self, nopython=False): + pyfunc = usecase_uint64_func + self.check_nullary_func(pyfunc, nopython=nopython) + + def test_constant_uint64_function_call_npm(self): + self.test_constant_uint64_function_call(nopython=True) + + def test_bit_length(self): + f = utils.bit_length + self.assertEqual(f(0x7f), 7) + self.assertEqual(f(-0x7f), 7) + self.assertEqual(f(0x80), 8) + self.assertEqual(f(-0x80), 7) + self.assertEqual(f(0xff), 8) + self.assertEqual(f(-0xff), 8) + self.assertEqual(f(0x100), 9) + self.assertEqual(f(-0x100), 8) + self.assertEqual(f(-0x101), 9) + self.assertEqual(f(0x7fffffff), 31) + self.assertEqual(f(-0x7fffffff), 31) + self.assertEqual(f(-0x80000000), 31) + self.assertEqual(f(0x80000000), 32) + self.assertEqual(f(0xffffffff), 32) + self.assertEqual(f(0xffffffffffffffff), 64) + self.assertEqual(f(0x10000000000000000), 65) + + def test_constant_int64(self, nopython=False): + self.check_nullary_func(usecase_int64_pos, nopython=nopython) + self.check_nullary_func(usecase_int64_neg, nopython=nopython) + self.check_nullary_func(usecase_int64_func, nopython=nopython) + + def test_constant_int64_npm(self): + self.test_constant_int64(nopython=True) + + +if __name__ == '__main__': + unittest.main() + diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ir.py b/venv/lib/python3.10/site-packages/numba/tests/test_ir.py new file mode 100644 index 0000000000000000000000000000000000000000..819c784694df5f7d1747cad1c0558264d32ffb31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ir.py @@ -0,0 +1,560 @@ +import unittest +from unittest.case import TestCase +import warnings +import numpy as np + +from numba import objmode +from numba.core import ir, compiler +from numba.core import errors +from numba.core.compiler import ( + CompilerBase, + ReconstructSSA, +) +from numba.core.compiler_machinery import ( + FunctionPass, + PassManager, + register_pass, +) +from numba.core.untyped_passes import ( + TranslateByteCode, + IRProcessing, +) +from numba import njit + + +class TestIR(unittest.TestCase): + + def test_IRScope(self): + filename = "" + top = ir.Scope(parent=None, loc=ir.Loc(filename=filename, line=1)) + local = ir.Scope(parent=top, loc=ir.Loc(filename=filename, line=2)) + + apple = local.define('apple', loc=ir.Loc(filename=filename, line=3)) + self.assertIs(local.get('apple'), apple) + self.assertEqual(len(local.localvars), 1) + + orange = top.define('orange', loc=ir.Loc(filename=filename, line=4)) + self.assertEqual(len(local.localvars), 1) + self.assertEqual(len(top.localvars), 1) + self.assertIs(top.get('orange'), orange) + self.assertIs(local.get('orange'), orange) + + more_orange = local.define('orange', loc=ir.Loc(filename=filename, + line=5)) + self.assertIs(top.get('orange'), orange) + self.assertIsNot(local.get('orange'), not orange) + self.assertIs(local.get('orange'), more_orange) + + try: + local.define('orange', loc=ir.Loc(filename=filename, line=5)) + except ir.RedefinedError: + pass + else: + self.fail("Expecting an %s" % ir.RedefinedError) + + +class CheckEquality(unittest.TestCase): + + var_a = ir.Var(None, 'a', ir.unknown_loc) + var_b = ir.Var(None, 'b', ir.unknown_loc) + var_c = ir.Var(None, 'c', ir.unknown_loc) + var_d = ir.Var(None, 'd', ir.unknown_loc) + var_e = ir.Var(None, 'e', ir.unknown_loc) + loc1 = ir.Loc('mock', 1, 0) + loc2 = ir.Loc('mock', 2, 0) + loc3 = ir.Loc('mock', 3, 0) + + def check(self, base, same=[], different=[]): + for s in same: + self.assertTrue(base == s) + for d in different: + self.assertTrue(base != d) + + +class TestIRMeta(CheckEquality): + """ + Tests IR node meta, like Loc and Scope + """ + def test_loc(self): + a = ir.Loc('file', 1, 0) + b = ir.Loc('file', 1, 0) + c = ir.Loc('pile', 1, 0) + d = ir.Loc('file', 2, 0) + e = ir.Loc('file', 1, 1) + self.check(a, same=[b,], different=[c, d, e]) + + f = ir.Loc('file', 1, 0, maybe_decorator=False) + g = ir.Loc('file', 1, 0, maybe_decorator=True) + self.check(a, same=[f, g]) + + def test_scope(self): + parent1 = ir.Scope(None, self.loc1) + parent2 = ir.Scope(None, self.loc1) + parent3 = ir.Scope(None, self.loc2) + self.check(parent1, same=[parent2, parent3,]) + + a = ir.Scope(parent1, self.loc1) + b = ir.Scope(parent1, self.loc1) + c = ir.Scope(parent1, self.loc2) + d = ir.Scope(parent3, self.loc1) + self.check(a, same=[b, c, d]) + + # parent1 and parent2 are equal, so children referring to either parent + # should be equal + e = ir.Scope(parent2, self.loc1) + self.check(a, same=[e,]) + + +class TestIRNodes(CheckEquality): + """ + Tests IR nodes + """ + def test_terminator(self): + # terminator base class inst should always be equal + t1 = ir.Terminator() + t2 = ir.Terminator() + self.check(t1, same=[t2]) + + def test_jump(self): + a = ir.Jump(1, self.loc1) + b = ir.Jump(1, self.loc1) + c = ir.Jump(1, self.loc2) + d = ir.Jump(2, self.loc1) + self.check(a, same=[b, c], different=[d]) + + def test_return(self): + a = ir.Return(self.var_a, self.loc1) + b = ir.Return(self.var_a, self.loc1) + c = ir.Return(self.var_a, self.loc2) + d = ir.Return(self.var_b, self.loc1) + self.check(a, same=[b, c], different=[d]) + + def test_raise(self): + a = ir.Raise(self.var_a, self.loc1) + b = ir.Raise(self.var_a, self.loc1) + c = ir.Raise(self.var_a, self.loc2) + d = ir.Raise(self.var_b, self.loc1) + self.check(a, same=[b, c], different=[d]) + + def test_staticraise(self): + a = ir.StaticRaise(AssertionError, None, self.loc1) + b = ir.StaticRaise(AssertionError, None, self.loc1) + c = ir.StaticRaise(AssertionError, None, self.loc2) + e = ir.StaticRaise(AssertionError, ("str",), self.loc1) + d = ir.StaticRaise(RuntimeError, None, self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_branch(self): + a = ir.Branch(self.var_a, 1, 2, self.loc1) + b = ir.Branch(self.var_a, 1, 2, self.loc1) + c = ir.Branch(self.var_a, 1, 2, self.loc2) + d = ir.Branch(self.var_b, 1, 2, self.loc1) + e = ir.Branch(self.var_a, 2, 2, self.loc1) + f = ir.Branch(self.var_a, 1, 3, self.loc1) + self.check(a, same=[b, c], different=[d, e, f]) + + def test_expr(self): + a = ir.Expr('some_op', self.loc1) + b = ir.Expr('some_op', self.loc1) + c = ir.Expr('some_op', self.loc2) + d = ir.Expr('some_other_op', self.loc1) + self.check(a, same=[b, c], different=[d]) + + def test_setitem(self): + a = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1) + b = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1) + c = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc2) + d = ir.SetItem(self.var_d, self.var_b, self.var_c, self.loc1) + e = ir.SetItem(self.var_a, self.var_d, self.var_c, self.loc1) + f = ir.SetItem(self.var_a, self.var_b, self.var_d, self.loc1) + self.check(a, same=[b, c], different=[d, e, f]) + + def test_staticsetitem(self): + a = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1) + b = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1) + c = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc2) + d = ir.StaticSetItem(self.var_d, 1, self.var_b, self.var_c, self.loc1) + e = ir.StaticSetItem(self.var_a, 2, self.var_b, self.var_c, self.loc1) + f = ir.StaticSetItem(self.var_a, 1, self.var_d, self.var_c, self.loc1) + g = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_d, self.loc1) + self.check(a, same=[b, c], different=[d, e, f, g]) + + def test_delitem(self): + a = ir.DelItem(self.var_a, self.var_b, self.loc1) + b = ir.DelItem(self.var_a, self.var_b, self.loc1) + c = ir.DelItem(self.var_a, self.var_b, self.loc2) + d = ir.DelItem(self.var_c, self.var_b, self.loc1) + e = ir.DelItem(self.var_a, self.var_c, self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_del(self): + a = ir.Del(self.var_a.name, self.loc1) + b = ir.Del(self.var_a.name, self.loc1) + c = ir.Del(self.var_a.name, self.loc2) + d = ir.Del(self.var_b.name, self.loc1) + self.check(a, same=[b, c], different=[d]) + + def test_setattr(self): + a = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1) + b = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1) + c = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc2) + d = ir.SetAttr(self.var_c, 'foo', self.var_b, self.loc1) + e = ir.SetAttr(self.var_a, 'bar', self.var_b, self.loc1) + f = ir.SetAttr(self.var_a, 'foo', self.var_c, self.loc1) + self.check(a, same=[b, c], different=[d, e, f]) + + def test_delattr(self): + a = ir.DelAttr(self.var_a, 'foo', self.loc1) + b = ir.DelAttr(self.var_a, 'foo', self.loc1) + c = ir.DelAttr(self.var_a, 'foo', self.loc2) + d = ir.DelAttr(self.var_c, 'foo', self.loc1) + e = ir.DelAttr(self.var_a, 'bar', self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_assign(self): + a = ir.Assign(self.var_a, self.var_b, self.loc1) + b = ir.Assign(self.var_a, self.var_b, self.loc1) + c = ir.Assign(self.var_a, self.var_b, self.loc2) + d = ir.Assign(self.var_c, self.var_b, self.loc1) + e = ir.Assign(self.var_a, self.var_c, self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_print(self): + a = ir.Print((self.var_a,), self.var_b, self.loc1) + b = ir.Print((self.var_a,), self.var_b, self.loc1) + c = ir.Print((self.var_a,), self.var_b, self.loc2) + d = ir.Print((self.var_c,), self.var_b, self.loc1) + e = ir.Print((self.var_a,), self.var_c, self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_storemap(self): + a = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1) + b = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1) + c = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc2) + d = ir.StoreMap(self.var_d, self.var_b, self.var_c, self.loc1) + e = ir.StoreMap(self.var_a, self.var_d, self.var_c, self.loc1) + f = ir.StoreMap(self.var_a, self.var_b, self.var_d, self.loc1) + self.check(a, same=[b, c], different=[d, e, f]) + + def test_yield(self): + a = ir.Yield(self.var_a, self.loc1, 0) + b = ir.Yield(self.var_a, self.loc1, 0) + c = ir.Yield(self.var_a, self.loc2, 0) + d = ir.Yield(self.var_b, self.loc1, 0) + e = ir.Yield(self.var_a, self.loc1, 1) + self.check(a, same=[b, c], different=[d, e]) + + def test_enterwith(self): + a = ir.EnterWith(self.var_a, 0, 1, self.loc1) + b = ir.EnterWith(self.var_a, 0, 1, self.loc1) + c = ir.EnterWith(self.var_a, 0, 1, self.loc2) + d = ir.EnterWith(self.var_b, 0, 1, self.loc1) + e = ir.EnterWith(self.var_a, 1, 1, self.loc1) + f = ir.EnterWith(self.var_a, 0, 2, self.loc1) + self.check(a, same=[b, c], different=[d, e, f]) + + def test_arg(self): + a = ir.Arg('foo', 0, self.loc1) + b = ir.Arg('foo', 0, self.loc1) + c = ir.Arg('foo', 0, self.loc2) + d = ir.Arg('bar', 0, self.loc1) + e = ir.Arg('foo', 1, self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_const(self): + a = ir.Const(1, self.loc1) + b = ir.Const(1, self.loc1) + c = ir.Const(1, self.loc2) + d = ir.Const(2, self.loc1) + self.check(a, same=[b, c], different=[d]) + + def test_global(self): + a = ir.Global('foo', 0, self.loc1) + b = ir.Global('foo', 0, self.loc1) + c = ir.Global('foo', 0, self.loc2) + d = ir.Global('bar', 0, self.loc1) + e = ir.Global('foo', 1, self.loc1) + self.check(a, same=[b, c], different=[d, e]) + + def test_var(self): + a = ir.Var(None, 'foo', self.loc1) + b = ir.Var(None, 'foo', self.loc1) + c = ir.Var(None, 'foo', self.loc2) + d = ir.Var(ir.Scope(None, ir.unknown_loc), 'foo', self.loc1) + e = ir.Var(None, 'bar', self.loc1) + self.check(a, same=[b, c, d], different=[e]) + + def test_undefinedtype(self): + a = ir.UndefinedType() + b = ir.UndefinedType() + self.check(a, same=[b]) + + def test_loop(self): + a = ir.Loop(1, 3) + b = ir.Loop(1, 3) + c = ir.Loop(2, 3) + d = ir.Loop(1, 4) + self.check(a, same=[b], different=[c, d]) + + def test_with(self): + a = ir.With(1, 3) + b = ir.With(1, 3) + c = ir.With(2, 3) + d = ir.With(1, 4) + self.check(a, same=[b], different=[c, d]) + + +# used later +_GLOBAL = 1234 + + +class TestIRCompounds(CheckEquality): + """ + Tests IR concepts that have state + """ + def test_varmap(self): + a = ir.VarMap() + a.define(self.var_a, 'foo') + a.define(self.var_b, 'bar') + + b = ir.VarMap() + b.define(self.var_a, 'foo') + b.define(self.var_b, 'bar') + + c = ir.VarMap() + c.define(self.var_a, 'foo') + c.define(self.var_c, 'bar') + + self.check(a, same=[b], different=[c]) + + def test_block(self): + def gen_block(): + parent = ir.Scope(None, self.loc1) + tmp = ir.Block(parent, self.loc2) + assign1 = ir.Assign(self.var_a, self.var_b, self.loc3) + assign2 = ir.Assign(self.var_a, self.var_c, self.loc3) + assign3 = ir.Assign(self.var_c, self.var_b, self.loc3) + tmp.append(assign1) + tmp.append(assign2) + tmp.append(assign3) + return tmp + + a = gen_block() + b = gen_block() + c = gen_block().append(ir.Assign(self.var_a, self.var_b, self.loc3)) + + self.check(a, same=[b], different=[c]) + + def test_functionir(self): + + def run_frontend(x): + return compiler.run_frontend(x, emit_dels=True) + + # this creates a function full of all sorts of things to ensure the IR + # is pretty involved, it then compares two instances of the compiled + # function IR to check the IR is the same invariant of objects, and then + # a tiny mutation is made to the IR in the second function and detection + # of this change is checked. + def gen(): + _FREEVAR = 0xCAFE + + def foo(a, b, c=12, d=1j, e=None): + f = a + b + a += _FREEVAR + g = np.zeros(c, dtype=np.complex64) + h = f + g + i = 1j / d + if np.abs(i) > 0: + k = h / i + l = np.arange(1, c + 1) + with objmode(): + print(e, k) + m = np.sqrt(l - g) + if np.abs(m[0]) < 1: + n = 0 + for o in range(a): + n += 0 + if np.abs(n) < 3: + break + n += m[2] + p = g / l + q = [] + for r in range(len(p)): + q.append(p[r]) + if r > 4 + 1: + with objmode(s='intp', t='complex128'): + s = 123 + t = 5 + if s > 122: + t += s + t += q[0] + _GLOBAL + + return f + o + r + t + r + a + n + + return foo + + x = gen() + y = gen() + x_ir = run_frontend(x) + y_ir = run_frontend(y) + + self.assertTrue(x_ir.equal_ir(y_ir)) + + def check_diffstr(string, pointing_at=[]): + lines = string.splitlines() + for item in pointing_at: + for l in lines: + if l.startswith('->'): + if item in l: + break + else: + raise AssertionError("Could not find %s " % item) + + self.assertIn("IR is considered equivalent", x_ir.diff_str(y_ir)) + + # minor mutation, simply switch branch targets on last branch + for label in reversed(list(y_ir.blocks.keys())): + blk = y_ir.blocks[label] + if isinstance(blk.body[-1], ir.Branch): + ref = blk.body[-1] + ref.truebr, ref.falsebr = ref.falsebr, ref.truebr + break + + check_diffstr(x_ir.diff_str(y_ir), ['branch']) + + z = gen() + self.assertFalse(x_ir.equal_ir(y_ir)) + + z_ir = run_frontend(z) + + change_set = set() + for label in reversed(list(z_ir.blocks.keys())): + blk = z_ir.blocks[label] + ref = blk.body[:-1] + idx = None + for i in range(len(ref) - 1): + # look for two adjacent Del + if (isinstance(ref[i], ir.Del) and + isinstance(ref[i + 1], ir.Del)): + idx = i + break + if idx is not None: + b = blk.body + change_set.add(str(b[idx + 1])) + change_set.add(str(b[idx])) + b[idx], b[idx + 1] = b[idx + 1], b[idx] + break + + # ensure that a mutation occurred. + self.assertTrue(change_set) + + self.assertFalse(x_ir.equal_ir(z_ir)) + self.assertEqual(len(change_set), 2) + for item in change_set: + self.assertTrue(item.startswith('del ')) + check_diffstr(x_ir.diff_str(z_ir), change_set) + + def foo(a, b): + c = a * 2 + d = c + b + e = np.sqrt(d) + return e + + def bar(a, b): # same as foo + c = a * 2 + d = c + b + e = np.sqrt(d) + return e + + def baz(a, b): + c = a * 2 + d = b + c + e = np.sqrt(d + 1) + return e + + foo_ir = run_frontend(foo) + bar_ir = run_frontend(bar) + self.assertTrue(foo_ir.equal_ir(bar_ir)) + self.assertIn("IR is considered equivalent", foo_ir.diff_str(bar_ir)) + + baz_ir = run_frontend(baz) + self.assertFalse(foo_ir.equal_ir(baz_ir)) + tmp = foo_ir.diff_str(baz_ir) + self.assertIn("Other block contains more statements", tmp) + check_diffstr(tmp, ["c + b", "b + c"]) + + +class TestIRPedanticChecks(TestCase): + def test_var_in_scope_assumption(self): + # Create a pass that clears ir.Scope in ir.Block + @register_pass(mutates_CFG=False, analysis_only=False) + class RemoveVarInScope(FunctionPass): + _name = "_remove_var_in_scope" + + def __init__(self): + FunctionPass.__init__(self) + + # implement method to do the work, "state" is the internal compiler + # state from the CompilerBase instance. + def run_pass(self, state): + func_ir = state.func_ir + # walk the blocks + for blk in func_ir.blocks.values(): + oldscope = blk.scope + # put in an empty Scope + blk.scope = ir.Scope(parent=oldscope.parent, + loc=oldscope.loc) + return True + + # Create a pass that always fails, to stop the compiler + @register_pass(mutates_CFG=False, analysis_only=False) + class FailPass(FunctionPass): + _name = "_fail" + + def __init__(self, *args, **kwargs): + FunctionPass.__init__(self) + + def run_pass(self, state): + # This is unreachable. SSA pass should have raised before this + # pass when run with `error.NumbaPedanticWarning`s raised as + # errors. + raise AssertionError("unreachable") + + class MyCompiler(CompilerBase): + def define_pipelines(self): + pm = PassManager("testing pm") + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(IRProcessing, "processing IR") + pm.add_pass(RemoveVarInScope, "_remove_var_in_scope") + pm.add_pass(ReconstructSSA, "ssa") + pm.add_pass(FailPass, "_fail") + pm.finalize() + return [pm] + + @njit(pipeline_class=MyCompiler) + def dummy(x): + # To trigger SSA and the pedantic check, this function must have + # multiple assignments to the same variable in different blocks. + a = 1 + b = 2 + if a < b: + a = 2 + else: + b = 3 + return a, b + + with warnings.catch_warnings(): + # Make NumbaPedanticWarning an error + warnings.simplefilter("error", errors.NumbaPedanticWarning) + # Catch NumbaIRAssumptionWarning + with self.assertRaises(errors.NumbaIRAssumptionWarning) as raises: + dummy(1) + # Verify the error message + self.assertRegex( + str(raises.exception), + r"variable '[a-z]' is not in scope", + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ir_inlining.py b/venv/lib/python3.10/site-packages/numba/tests/test_ir_inlining.py new file mode 100644 index 0000000000000000000000000000000000000000..40dc4316f8dc9443bd9f9b0a16dce46cc8daaee5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ir_inlining.py @@ -0,0 +1,1524 @@ +""" +This tests the inline kwarg to @jit and @overload etc, it has nothing to do with +LLVM or low level inlining. +""" + +import operator +import warnings +from itertools import product +import numpy as np + +from numba import njit, typeof, literally, prange +from numba.core import types, ir, ir_utils, cgutils, errors, utils +from numba.core.extending import ( + overload, + overload_method, + overload_attribute, + register_model, + models, + make_attribute_wrapper, + intrinsic, + register_jitable, +) +from numba.core.cpu import InlineOptions +from numba.core.compiler import DefaultPassBuilder, CompilerBase +from numba.core.typed_passes import InlineOverloads +from numba.core.typing import signature +from numba.tests.support import (TestCase, unittest, + MemoryLeakMixin, IRPreservingTestPipeline, + skip_parfors_unsupported, + ignore_internal_warnings) + + +# this global has the same name as the global in inlining_usecases.py, it +# is here to check that inlined functions bind to their own globals +_GLOBAL1 = -50 + + +@njit(inline='always') +def _global_func(x): + return x + 1 + + +# to be overloaded +def _global_defn(x): + return x + 1 + + +@overload(_global_defn, inline='always') +def _global_overload(x): + return _global_defn + + +class InliningBase(TestCase): + + _DEBUG = False + + inline_opt_as_bool = {'always': True, 'never': False} + + # -------------------------------------------------------------------------- + # Example cost model + + def sentinel_17_cost_model(self, func_ir): + # sentinel 17 cost model, this is a fake cost model that will return + # True (i.e. inline) if the ir.FreeVar(17) is found in the func_ir, + for blk in func_ir.blocks.values(): + for stmt in blk.body: + if isinstance(stmt, ir.Assign): + if isinstance(stmt.value, ir.FreeVar): + if stmt.value.value == 17: + return True + return False + + # -------------------------------------------------------------------------- + + def check(self, test_impl, *args, **kwargs): + inline_expect = kwargs.pop('inline_expect', None) + assert inline_expect + block_count = kwargs.pop('block_count', 1) + assert not kwargs + for k, v in inline_expect.items(): + assert isinstance(k, str) + assert isinstance(v, bool) + + j_func = njit(pipeline_class=IRPreservingTestPipeline)(test_impl) + + # check they produce the same answer first! + self.assertEqual(test_impl(*args), j_func(*args)) + + # make sure IR doesn't have branches + fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir'] + fir.blocks = ir_utils.simplify_CFG(fir.blocks) + if self._DEBUG: + print("FIR".center(80, "-")) + fir.dump() + if block_count != 'SKIP': + self.assertEqual(len(fir.blocks), block_count) + block = next(iter(fir.blocks.values())) + + # if we don't expect the function to be inlined then make sure there is + # 'call' present still + exprs = [x for x in block.find_exprs()] + assert exprs + for k, v in inline_expect.items(): + found = False + for expr in exprs: + if getattr(expr, 'op', False) == 'call': + func_defn = fir.get_definition(expr.func) + found |= func_defn.name == k + elif ir_utils.is_operator_or_getitem(expr): + found |= expr.fn.__name__ == k + self.assertFalse(found == v) + + return fir # for use in further analysis + + +# used in _gen_involved +_GLOBAL = 1234 + + +def _gen_involved(): + _FREEVAR = 0xCAFE + + def foo(a, b, c=12, d=1j, e=None): + f = a + b + a += _FREEVAR + g = np.zeros(c, dtype=np.complex64) + h = f + g + i = 1j / d + # For SSA, zero init, n and t + n = 0 + t = 0 + if np.abs(i) > 0: + k = h / i + l = np.arange(1, c + 1) + m = np.sqrt(l - g) + e * k + if np.abs(m[0]) < 1: + for o in range(a): + n += 0 + if np.abs(n) < 3: + break + n += m[2] + p = g / l + q = [] + for r in range(len(p)): + q.append(p[r]) + if r > 4 + 1: + s = 123 + t = 5 + if s > 122 - c: + t += s + t += q[0] + _GLOBAL + + return f + o + r + t + r + a + n + + return foo + + +class TestFunctionInlining(MemoryLeakMixin, InliningBase): + + def test_basic_inline_never(self): + @njit(inline='never') + def foo(): + return + + def impl(): + return foo() + self.check(impl, inline_expect={'foo': False}) + + def test_basic_inline_always(self): + @njit(inline='always') + def foo(): + return + + def impl(): + return foo() + self.check(impl, inline_expect={'foo': True}) + + def test_basic_inline_combos(self): + + def impl(): + x = foo() + y = bar() + z = baz() + return x, y, z + + opts = (('always'), ('never')) + + for inline_foo, inline_bar, inline_baz in product(opts, opts, opts): + + @njit(inline=inline_foo) + def foo(): + return + + @njit(inline=inline_bar) + def bar(): + return + + @njit(inline=inline_baz) + def baz(): + return + + inline_expect = {'foo': self.inline_opt_as_bool[inline_foo], + 'bar': self.inline_opt_as_bool[inline_bar], + 'baz': self.inline_opt_as_bool[inline_baz]} + self.check(impl, inline_expect=inline_expect) + + @unittest.skip("Need to work out how to prevent this") + def test_recursive_inline(self): + + @njit(inline='always') + def foo(x): + if x == 0: + return 12 + else: + foo(x - 1) + + a = 3 + + def impl(): + b = 0 + if a > 1: + b += 1 + foo(5) + if b < a: + b -= 1 + + self.check(impl, inline_expect={'foo': True}) + + def test_freevar_bindings(self): + + def factory(inline, x, y): + z = x + 12 + + @njit(inline=inline) + def func(): + return (x, y + 3, z) + return func + + def impl(): + x = foo() + y = bar() + z = baz() + return x, y, z + + opts = (('always'), ('never')) + + for inline_foo, inline_bar, inline_baz in product(opts, opts, opts): + + foo = factory(inline_foo, 10, 20) + bar = factory(inline_bar, 30, 40) + baz = factory(inline_baz, 50, 60) + + inline_expect = {'foo': self.inline_opt_as_bool[inline_foo], + 'bar': self.inline_opt_as_bool[inline_bar], + 'baz': self.inline_opt_as_bool[inline_baz]} + self.check(impl, inline_expect=inline_expect) + + def test_global_binding(self): + + def impl(): + x = 19 + return _global_func(x) + + self.check(impl, inline_expect={'_global_func': True}) + + def test_inline_from_another_module(self): + + from .inlining_usecases import bar + + def impl(): + z = _GLOBAL1 + 2 + return bar(), z + + self.check(impl, inline_expect={'bar': True}) + + def test_inline_from_another_module_w_getattr(self): + + import numba.tests.inlining_usecases as iuc + + def impl(): + z = _GLOBAL1 + 2 + return iuc.bar(), z + + self.check(impl, inline_expect={'bar': True}) + + def test_inline_from_another_module_w_2_getattr(self): + + import numba.tests.inlining_usecases # noqa forces registration + import numba.tests as nt + + def impl(): + z = _GLOBAL1 + 2 + return nt.inlining_usecases.bar(), z + + self.check(impl, inline_expect={'bar': True}) + + def test_inline_from_another_module_as_freevar(self): + + def factory(): + from .inlining_usecases import bar + + @njit(inline='always') + def tmp(): + return bar() + return tmp + + baz = factory() + + def impl(): + z = _GLOBAL1 + 2 + return baz(), z + + self.check(impl, inline_expect={'bar': True}) + + def test_inline_w_freevar_from_another_module(self): + + from .inlining_usecases import baz_factory + + def gen(a, b): + bar = baz_factory(a) + + def impl(): + z = _GLOBAL1 + a * b + return bar(), z, a + return impl + + impl = gen(10, 20) + self.check(impl, inline_expect={'bar': True}) + + def test_inlining_models(self): + + def s17_caller_model(expr, caller_info, callee_info): + self.assertIsInstance(expr, ir.Expr) + self.assertEqual(expr.op, "call") + return self.sentinel_17_cost_model(caller_info) + + def s17_callee_model(expr, caller_info, callee_info): + self.assertIsInstance(expr, ir.Expr) + self.assertEqual(expr.op, "call") + return self.sentinel_17_cost_model(callee_info) + + # caller has sentinel + for caller, callee in ((11, 17), (17, 11)): + + @njit(inline=s17_caller_model) + def foo(): + return callee + + def impl(z): + x = z + caller + y = foo() + return y + 3, x + + self.check(impl, 10, inline_expect={'foo': caller == 17}) + + # callee has sentinel + for caller, callee in ((11, 17), (17, 11)): + + @njit(inline=s17_callee_model) + def bar(): + return callee + + def impl(z): + x = z + caller + y = bar() + return y + 3, x + + self.check(impl, 10, inline_expect={'bar': callee == 17}) + + def test_inline_inside_loop(self): + @njit(inline='always') + def foo(): + return 12 + + def impl(): + acc = 0.0 + for i in range(5): + acc += foo() + return acc + + self.check(impl, inline_expect={'foo': True}, block_count=4) + + def test_inline_inside_closure_inside_loop(self): + @njit(inline='always') + def foo(): + return 12 + + def impl(): + acc = 0.0 + for i in range(5): + def bar(): + return foo() + 7 + acc += bar() + return acc + + self.check(impl, inline_expect={'foo': True}, block_count=4) + + def test_inline_closure_inside_inlinable_inside_closure(self): + @njit(inline='always') + def foo(a): + def baz(): + return 12 + a + return baz() + 8 + + def impl(): + z = 9 + + def bar(x): + return foo(z) + 7 + x + return bar(z + 2) + + self.check(impl, inline_expect={'foo': True}, block_count=1) + + def test_inline_involved(self): + + fortran = njit(inline='always')(_gen_involved()) + + @njit(inline='always') + def boz(j): + acc = 0 + + def biz(t): + return t + acc + for x in range(j): + acc += biz(8 + acc) + fortran(2., acc, 1, 12j, biz(acc)) + return acc + + @njit(inline='always') + def foo(a): + acc = 0 + for p in range(12): + tmp = fortran(1, 1, 1, 1, 1) + + def baz(x): + return 12 + a + x + tmp + acc += baz(p) + 8 + boz(p) + tmp + return acc + baz(2) + + def impl(): + z = 9 + + def bar(x): + return foo(z) + 7 + x + return bar(z + 2) + + # block count changes with Python version due to bytecode differences. + if utils.PYVERSION in ((3, 12), (3, 13)): + bc = 39 + elif utils.PYVERSION in ((3, 10), (3, 11)): + bc = 35 + else: + raise NotImplementedError(utils.PYVERSION) + + self.check(impl, inline_expect={'foo': True, 'boz': True, + 'fortran': True}, block_count=bc) + + def test_inline_renaming_scheme(self): + # See #7380, this checks that inlined variables have a name derived from + # the function they were defined in. + + @njit(inline="always") + def bar(z): + x = 5 + y = 10 + return x + y + z + + @njit(pipeline_class=IRPreservingTestPipeline) + def foo(a, b): + return bar(a), bar(b) + + self.assertEqual(foo(10, 20), (25, 35)) + + # check IR. Look for the `x = 5`... there should be + # Two lots of `const(int, 5)`, one for each inline + # The LHS of the assignment will have a name like: + # TestFunctionInlining_test_inline_renaming_scheme__locals__bar_v2.x + # Ensure that this is the case! + func_ir = foo.overloads[foo.signatures[0]].metadata['preserved_ir'] + store = [] + for blk in func_ir.blocks.values(): + for stmt in blk.body: + if isinstance(stmt, ir.Assign): + if isinstance(stmt.value, ir.Const): + if stmt.value.value == 5: + store.append(stmt) + + self.assertEqual(len(store), 2) + for i in store: + name = i.target.name + basename = self.id().lstrip(self.__module__) + regex = rf'{basename}__locals__bar_v[0-9]+.x' + self.assertRegex(name, regex) + + +class TestRegisterJitableInlining(MemoryLeakMixin, InliningBase): + + def test_register_jitable_inlines(self): + + @register_jitable(inline='always') + def foo(): + return 1 + + def impl(): + foo() + + self.check(impl, inline_expect={'foo': True}) + + +class TestOverloadInlining(MemoryLeakMixin, InliningBase): + + def test_basic_inline_never(self): + def foo(): + pass + + @overload(foo, inline='never') + def foo_overload(): + def foo_impl(): + pass + return foo_impl + + def impl(): + return foo() + + self.check(impl, inline_expect={'foo': False}) + + def test_basic_inline_always(self): + + def foo(): + pass + + @overload(foo, inline='always') + def foo_overload(): + def impl(): + pass + return impl + + def impl(): + return foo() + + self.check(impl, inline_expect={'foo': True}) + + def test_inline_always_kw_no_default(self): + # pass call arg by name that doesn't have default value + def foo(a, b): + return a + b + + @overload(foo, inline='always') + def overload_foo(a, b): + return lambda a, b: a + b + + def impl(): + return foo(3, b=4) + + self.check(impl, inline_expect={'foo': True}) + + def test_inline_operators_unary(self): + + def impl_inline(x): + return -x + + def impl_noinline(x): + return +x + + dummy_unary_impl = lambda x: True + Dummy, DummyType = self.make_dummy_type() + setattr(Dummy, '__neg__', dummy_unary_impl) + setattr(Dummy, '__pos__', dummy_unary_impl) + + @overload(operator.neg, inline='always') + def overload_dummy_neg(x): + if isinstance(x, DummyType): + return dummy_unary_impl + + @overload(operator.pos, inline='never') + def overload_dummy_pos(x): + if isinstance(x, DummyType): + return dummy_unary_impl + + self.check(impl_inline, Dummy(), inline_expect={'neg': True}) + self.check(impl_noinline, Dummy(), inline_expect={'pos': False}) + + def test_inline_operators_binop(self): + + def impl_inline(x): + return x == 1 + + def impl_noinline(x): + return x != 1 + + Dummy, DummyType = self.make_dummy_type() + + dummy_binop_impl = lambda a, b: True + setattr(Dummy, '__eq__', dummy_binop_impl) + setattr(Dummy, '__ne__', dummy_binop_impl) + + @overload(operator.eq, inline='always') + def overload_dummy_eq(a, b): + if isinstance(a, DummyType): + return dummy_binop_impl + + @overload(operator.ne, inline='never') + def overload_dummy_ne(a, b): + if isinstance(a, DummyType): + return dummy_binop_impl + + self.check(impl_inline, Dummy(), inline_expect={'eq': True}) + self.check(impl_noinline, Dummy(), inline_expect={'ne': False}) + + def test_inline_operators_inplace_binop(self): + + def impl_inline(x): + x += 1 + + def impl_noinline(x): + x -= 1 + + Dummy, DummyType = self.make_dummy_type() + + dummy_inplace_binop_impl = lambda a, b: True + setattr(Dummy, '__iadd__', dummy_inplace_binop_impl) + setattr(Dummy, '__isub__', dummy_inplace_binop_impl) + + @overload(operator.iadd, inline='always') + def overload_dummy_iadd(a, b): + if isinstance(a, DummyType): + return dummy_inplace_binop_impl + + @overload(operator.isub, inline='never') + def overload_dummy_isub(a, b): + if isinstance(a, DummyType): + return dummy_inplace_binop_impl + + # DummyType is not mutable, so lowering 'inplace_binop' Expr + # re-uses (requires) copying function definition + @overload(operator.add, inline='always') + def overload_dummy_add(a, b): + if isinstance(a, DummyType): + return dummy_inplace_binop_impl + + @overload(operator.sub, inline='never') + def overload_dummy_sub(a, b): + if isinstance(a, DummyType): + return dummy_inplace_binop_impl + + self.check(impl_inline, Dummy(), inline_expect={'iadd': True}) + self.check(impl_noinline, Dummy(), inline_expect={'isub': False}) + + def test_inline_always_operators_getitem(self): + + def impl(x, idx): + return x[idx] + + def impl_static_getitem(x): + return x[1] + + Dummy, DummyType = self.make_dummy_type() + + dummy_getitem_impl = lambda obj, idx: None + setattr(Dummy, '__getitem__', dummy_getitem_impl) + + @overload(operator.getitem, inline='always') + def overload_dummy_getitem(obj, idx): + if isinstance(obj, DummyType): + return dummy_getitem_impl + + # note getitem and static_getitem Exprs refer to operator.getitem + # hence they are checked using the same expected key + self.check(impl, Dummy(), 1, inline_expect={'getitem': True}) + self.check(impl_static_getitem, Dummy(), + inline_expect={'getitem': True}) + + def test_inline_never_operators_getitem(self): + + def impl(x, idx): + return x[idx] + + def impl_static_getitem(x): + return x[1] + + Dummy, DummyType = self.make_dummy_type() + + dummy_getitem_impl = lambda obj, idx: None + setattr(Dummy, '__getitem__', dummy_getitem_impl) + + @overload(operator.getitem, inline='never') + def overload_dummy_getitem(obj, idx): + if isinstance(obj, DummyType): + return dummy_getitem_impl + + # both getitem and static_getitem Exprs refer to operator.getitem + # hence they are checked using the same expect key + self.check(impl, Dummy(), 1, inline_expect={'getitem': False}) + self.check(impl_static_getitem, Dummy(), + inline_expect={'getitem': False}) + + def test_inline_stararg_error(self): + def foo(a, *b): + return a + b[0] + + @overload(foo, inline='always') + def overload_foo(a, *b): + return lambda a, *b: a + b[0] + + def impl(): + return foo(3, 3, 5) + + with self.assertRaises(NotImplementedError) as e: + self.check(impl, inline_expect={'foo': True}) + + self.assertIn("Stararg not supported in inliner for arg 1 *b", + str(e.exception)) + + def test_basic_inline_combos(self): + + def impl(): + x = foo() + y = bar() + z = baz() + return x, y, z + + opts = (('always'), ('never')) + + for inline_foo, inline_bar, inline_baz in product(opts, opts, opts): + + def foo(): + pass + + def bar(): + pass + + def baz(): + pass + + @overload(foo, inline=inline_foo) + def foo_overload(): + def impl(): + return + return impl + + @overload(bar, inline=inline_bar) + def bar_overload(): + def impl(): + return + return impl + + @overload(baz, inline=inline_baz) + def baz_overload(): + def impl(): + return + return impl + + inline_expect = {'foo': self.inline_opt_as_bool[inline_foo], + 'bar': self.inline_opt_as_bool[inline_bar], + 'baz': self.inline_opt_as_bool[inline_baz]} + self.check(impl, inline_expect=inline_expect) + + def test_freevar_bindings(self): + + def impl(): + x = foo() + y = bar() + z = baz() + return x, y, z + + opts = (('always'), ('never')) + + for inline_foo, inline_bar, inline_baz in product(opts, opts, opts): + # need to repeatedly clobber definitions of foo, bar, baz so + # @overload binds to the right instance WRT inlining + + def foo(): + x = 10 + y = 20 + z = x + 12 + return (x, y + 3, z) + + def bar(): + x = 30 + y = 40 + z = x + 12 + return (x, y + 3, z) + + def baz(): + x = 60 + y = 80 + z = x + 12 + return (x, y + 3, z) + + def factory(target, x, y, inline=None): + z = x + 12 + + @overload(target, inline=inline) + def func(): + def impl(): + return (x, y + 3, z) + return impl + + factory(foo, 10, 20, inline=inline_foo) + factory(bar, 30, 40, inline=inline_bar) + factory(baz, 60, 80, inline=inline_baz) + + inline_expect = {'foo': self.inline_opt_as_bool[inline_foo], + 'bar': self.inline_opt_as_bool[inline_bar], + 'baz': self.inline_opt_as_bool[inline_baz]} + + self.check(impl, inline_expect=inline_expect) + + def test_global_overload_binding(self): + + def impl(): + z = 19 + return _global_defn(z) + + self.check(impl, inline_expect={'_global_defn': True}) + + def test_inline_from_another_module(self): + + from .inlining_usecases import baz + + def impl(): + z = _GLOBAL1 + 2 + return baz(), z + + self.check(impl, inline_expect={'baz': True}) + + def test_inline_from_another_module_w_getattr(self): + + import numba.tests.inlining_usecases as iuc + + def impl(): + z = _GLOBAL1 + 2 + return iuc.baz(), z + + self.check(impl, inline_expect={'baz': True}) + + def test_inline_from_another_module_w_2_getattr(self): + + import numba.tests.inlining_usecases # noqa forces registration + import numba.tests as nt + + def impl(): + z = _GLOBAL1 + 2 + return nt.inlining_usecases.baz(), z + + self.check(impl, inline_expect={'baz': True}) + + def test_inline_from_another_module_as_freevar(self): + + def factory(): + from .inlining_usecases import baz + + @njit(inline='always') + def tmp(): + return baz() + return tmp + + bop = factory() + + def impl(): + z = _GLOBAL1 + 2 + return bop(), z + + self.check(impl, inline_expect={'baz': True}) + + def test_inline_w_freevar_from_another_module(self): + + from .inlining_usecases import bop_factory + + def gen(a, b): + bar = bop_factory(a) + + def impl(): + z = _GLOBAL1 + a * b + return bar(), z, a + return impl + + impl = gen(10, 20) + self.check(impl, inline_expect={'bar': True}) + + def test_inlining_models(self): + + def s17_caller_model(expr, caller_info, callee_info): + self.assertIsInstance(expr, ir.Expr) + self.assertEqual(expr.op, "call") + return self.sentinel_17_cost_model(caller_info.func_ir) + + def s17_callee_model(expr, caller_info, callee_info): + self.assertIsInstance(expr, ir.Expr) + self.assertEqual(expr.op, "call") + return self.sentinel_17_cost_model(callee_info.func_ir) + + # caller has sentinel + for caller, callee in ((10, 11), (17, 11)): + + def foo(): + return callee + + @overload(foo, inline=s17_caller_model) + def foo_ol(): + def impl(): + return callee + return impl + + def impl(z): + x = z + caller + y = foo() + return y + 3, x + + self.check(impl, 10, inline_expect={'foo': caller == 17}) + + # callee has sentinel + for caller, callee in ((11, 17), (11, 10)): + + def bar(): + return callee + + @overload(bar, inline=s17_callee_model) + def bar_ol(): + def impl(): + return callee + return impl + + def impl(z): + x = z + caller + y = bar() + return y + 3, x + + self.check(impl, 10, inline_expect={'bar': callee == 17}) + + def test_multiple_overloads_with_different_inline_characteristics(self): + # check that having different inlining options for different overloads + # of the same function works ok + + # this is the Python equiv of the overloads below + def bar(x): + if isinstance(typeof(x), types.Float): + return x + 1234 + else: + return x + 1 + + @overload(bar, inline='always') + def bar_int_ol(x): + if isinstance(x, types.Integer): + def impl(x): + return x + 1 + return impl + + @overload(bar, inline='never') + def bar_float_ol(x): + if isinstance(x, types.Float): + def impl(x): + return x + 1234 + return impl + + def always_inline_cost_model(*args): + return True + + @overload(bar, inline=always_inline_cost_model) + def bar_complex_ol(x): + if isinstance(x, types.Complex): + def impl(x): + return x + 1 + return impl + + def impl(): + a = bar(1) # integer literal, should inline + b = bar(2.3) # float literal, should not inline + # complex literal, should inline by virtue of cost model + c = bar(3j) + return a + b + c + + # there should still be a `bar` not inlined + fir = self.check(impl, inline_expect={'bar': False}, block_count=1) + + # check there is one call left in the IR + block = next(iter(fir.blocks.items()))[1] + calls = [x for x in block.find_exprs(op='call')] + self.assertTrue(len(calls) == 1) + + # check that the constant "1234" is not in the IR + consts = [x.value for x in block.find_insts(ir.Assign) + if isinstance(getattr(x, 'value', None), ir.Const)] + for val in consts: + self.assertNotEqual(val.value, 1234) + + def test_overload_inline_always_with_literally_in_inlinee(self): + # See issue #5887 + + def foo_ovld(dtype): + + if not isinstance(dtype, types.StringLiteral): + def foo_noop(dtype): + return literally(dtype) + return foo_noop + + if dtype.literal_value == 'str': + def foo_as_str_impl(dtype): + return 10 + return foo_as_str_impl + + if dtype.literal_value in ('int64', 'float64'): + def foo_as_num_impl(dtype): + return 20 + return foo_as_num_impl + + # define foo for literal str 'str' + def foo(dtype): + return 10 + + overload(foo, inline='always')(foo_ovld) + + def test_impl(dtype): + return foo(dtype) + + # check literal dispatch on 'str' + dtype = 'str' + self.check(test_impl, dtype, inline_expect={'foo': True}) + + # redefine foo to be correct for literal str 'int64' + def foo(dtype): + return 20 + overload(foo, inline='always')(foo_ovld) + + # check literal dispatch on 'int64' + dtype = 'int64' + self.check(test_impl, dtype, inline_expect={'foo': True}) + + def test_inline_always_ssa(self): + # Make sure IR inlining uses SSA properly. Test for #6721. + + dummy_true = True + + def foo(A): + return True + + @overload(foo, inline="always") + def foo_overload(A): + + def impl(A): + s = dummy_true + for i in range(len(A)): + dummy = dummy_true + if A[i]: + dummy = A[i] + s *= dummy + return s + return impl + + def impl(): + return foo(np.array([True, False, True])) + + self.check(impl, block_count='SKIP', inline_expect={'foo': True}) + + def test_inline_always_ssa_scope_validity(self): + # Make sure IR inlining correctly updates the scope(s). See #7802 + + def bar(): + b = 5 + while b > 1: + b //= 2 + + return 10 + + @overload(bar, inline="always") + def bar_impl(): + return bar + + @njit + def foo(): + bar() + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', errors.NumbaIRAssumptionWarning) + ignore_internal_warnings() + self.assertEqual(foo(), foo.py_func()) + + # There should be no warnings as the IR scopes should be consistent with + # the IR involved. + self.assertEqual(len(w), 0) + + +class TestOverloadMethsAttrsInlining(InliningBase): + def setUp(self): + self.make_dummy_type() + super(TestOverloadMethsAttrsInlining, self).setUp() + + def check_method(self, test_impl, args, expected, block_count, + expects_inlined=True): + j_func = njit(pipeline_class=IRPreservingTestPipeline)(test_impl) + # check they produce the same answer first! + self.assertEqual(j_func(*args), expected) + + # make sure IR doesn't have branches + fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir'] + fir.blocks = fir.blocks + self.assertEqual(len(fir.blocks), block_count) + if expects_inlined: + # assert no calls + for block in fir.blocks.values(): + calls = list(block.find_exprs('call')) + self.assertFalse(calls) + else: + # assert has call + allcalls = [] + for block in fir.blocks.values(): + allcalls += list(block.find_exprs('call')) + self.assertTrue(allcalls) + + def check_getattr(self, test_impl, args, expected, block_count, + expects_inlined=True): + j_func = njit(pipeline_class=IRPreservingTestPipeline)(test_impl) + # check they produce the same answer first! + self.assertEqual(j_func(*args), expected) + + # make sure IR doesn't have branches + fir = j_func.overloads[j_func.signatures[0]].metadata['preserved_ir'] + fir.blocks = fir.blocks + self.assertEqual(len(fir.blocks), block_count) + if expects_inlined: + # assert no getattr + for block in fir.blocks.values(): + getattrs = list(block.find_exprs('getattr')) + self.assertFalse(getattrs) + else: + # assert has getattr + allgetattrs = [] + for block in fir.blocks.values(): + allgetattrs += list(block.find_exprs('getattr')) + self.assertTrue(allgetattrs) + + def test_overload_method_default_args_always(self): + Dummy, DummyType = self.make_dummy_type() + + @overload_method(DummyType, "inline_method", inline='always') + def _get_inlined_method(obj, val=None, val2=None): + def get(obj, val=None, val2=None): + return ("THIS IS INLINED", val, val2) + return get + + def foo(obj): + return obj.inline_method(123), obj.inline_method(val2=321) + + self.check_method( + test_impl=foo, + args=[Dummy()], + expected=(("THIS IS INLINED", 123, None), + ("THIS IS INLINED", None, 321)), + block_count=1, + ) + + def make_overload_method_test(self, costmodel, should_inline): + def costmodel(*args): + return should_inline + + Dummy, DummyType = self.make_dummy_type() + + @overload_method(DummyType, "inline_method", inline=costmodel) + def _get_inlined_method(obj, val): + def get(obj, val): + return ("THIS IS INLINED!!!", val) + return get + + def foo(obj): + return obj.inline_method(123) + + self.check_method( + test_impl=foo, + args=[Dummy()], + expected=("THIS IS INLINED!!!", 123), + block_count=1, + expects_inlined=should_inline, + ) + + def test_overload_method_cost_driven_always(self): + self.make_overload_method_test( + costmodel='always', + should_inline=True, + ) + + def test_overload_method_cost_driven_never(self): + self.make_overload_method_test( + costmodel='never', + should_inline=False, + ) + + def test_overload_method_cost_driven_must_inline(self): + self.make_overload_method_test( + costmodel=lambda *args: True, + should_inline=True, + ) + + def test_overload_method_cost_driven_no_inline(self): + self.make_overload_method_test( + costmodel=lambda *args: False, + should_inline=False, + ) + + def make_overload_attribute_test(self, costmodel, should_inline): + Dummy, DummyType = self.make_dummy_type() + + @overload_attribute(DummyType, "inlineme", inline=costmodel) + def _get_inlineme(obj): + def get(obj): + return "MY INLINED ATTRS" + return get + + def foo(obj): + return obj.inlineme + + self.check_getattr( + test_impl=foo, + args=[Dummy()], + expected="MY INLINED ATTRS", + block_count=1, + expects_inlined=should_inline, + ) + + def test_overload_attribute_always(self): + self.make_overload_attribute_test( + costmodel='always', + should_inline=True, + ) + + def test_overload_attribute_never(self): + self.make_overload_attribute_test( + costmodel='never', + should_inline=False, + ) + + def test_overload_attribute_costmodel_must_inline(self): + self.make_overload_attribute_test( + costmodel=lambda *args: True, + should_inline=True, + ) + + def test_overload_attribute_costmodel_no_inline(self): + self.make_overload_attribute_test( + costmodel=lambda *args: False, + should_inline=False, + ) + + +class TestGeneralInlining(MemoryLeakMixin, InliningBase): + + def test_with_inlined_and_noninlined_variants(self): + # This test is contrived and was to demonstrate fixing a bug in the + # template walking logic where inlinable and non-inlinable definitions + # would not mix. + + @overload(len, inline='always') + def overload_len(A): + if False: + return lambda A: 10 + + def impl(): + return len([2, 3, 4]) + + # len(list) won't be inlined because the overload above doesn't apply + self.check(impl, inline_expect={'len': False}) + + def test_with_kwargs(self): + + def foo(a, b=3, c=5): + return a + b + c + + @overload(foo, inline='always') + def overload_foo(a, b=3, c=5): + def impl(a, b=3, c=5): + return a + b + c + return impl + + def impl(): + return foo(3, c=10) + + self.check(impl, inline_expect={'foo': True}) + + def test_with_kwargs2(self): + + @njit(inline='always') + def bar(a, b=12, c=9): + return a + b + + def impl(a, b=7, c=5): + return bar(a + b, c=19) + + self.check(impl, 3, 4, inline_expect={'bar': True}) + + def test_inlining_optional_constant(self): + # This testcase causes `b` to be a Optional(bool) constant once it is + # inlined into foo(). + @njit(inline='always') + def bar(a=None, b=None): + if b is None: + b = 123 # this changes the type of `b` due to lack of SSA + return (a, b) + + def impl(): + return bar(), bar(123), bar(b=321) + + self.check(impl, block_count='SKIP', inline_expect={'bar': True}) + + +class TestInlineOptions(TestCase): + + def test_basic(self): + always = InlineOptions('always') + self.assertTrue(always.is_always_inline) + self.assertFalse(always.is_never_inline) + self.assertFalse(always.has_cost_model) + self.assertEqual(always.value, 'always') + + never = InlineOptions('never') + self.assertFalse(never.is_always_inline) + self.assertTrue(never.is_never_inline) + self.assertFalse(never.has_cost_model) + self.assertEqual(never.value, 'never') + + def cost_model(x): + return x + model = InlineOptions(cost_model) + self.assertFalse(model.is_always_inline) + self.assertFalse(model.is_never_inline) + self.assertTrue(model.has_cost_model) + self.assertIs(model.value, cost_model) + + +class TestInlineMiscIssues(TestCase): + + def test_issue4691(self): + def output_factory(array, dtype): + pass + + @overload(output_factory, inline='always') + def ol_output_factory(array, dtype): + if isinstance(array, types.npytypes.Array): + def impl(array, dtype): + shape = array.shape[3:] + return np.zeros(shape, dtype=dtype) + + return impl + + @njit(nogil=True) + def fn(array): + out = output_factory(array, array.dtype) + return out + + @njit(nogil=True) + def fn2(array): + return np.zeros(array.shape[3:], dtype=array.dtype) + + fn(np.ones((10, 20, 30, 40, 50))) + fn2(np.ones((10, 20, 30, 40, 50))) + + def test_issue4693(self): + + @njit(inline='always') + def inlining(array): + if array.ndim != 1: + raise ValueError("Invalid number of dimensions") + + return array + + @njit + def fn(array): + return inlining(array) + + fn(np.zeros(10)) + + def test_issue5476(self): + # Actual issue has the ValueError passed as an arg to `inlining` so is + # a constant inference error + @njit(inline='always') + def inlining(): + msg = 'Something happened' + raise ValueError(msg) + + @njit + def fn(): + return inlining() + + with self.assertRaises(ValueError) as raises: + fn() + + self.assertIn("Something happened", str(raises.exception)) + + def test_issue5792(self): + # Issue is that overloads cache their IR and closure inliner was + # manipulating the cached IR in a way that broke repeated inlines. + + class Dummy: + def __init__(self, data): + self.data = data + + def div(self, other): + return data / other.data + + class DummyType(types.Type): + def __init__(self, data): + self.data = data + super().__init__(name=f'Dummy({self.data})') + + @register_model(DummyType) + class DummyTypeModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('data', fe_type.data), + ] + super().__init__(dmm, fe_type, members) + + make_attribute_wrapper(DummyType, 'data', '_data') + + @intrinsic + def init_dummy(typingctx, data): + def codegen(context, builder, sig, args): + typ = sig.return_type + data, = args + dummy = cgutils.create_struct_proxy(typ)(context, builder) + dummy.data = data + + if context.enable_nrt: + context.nrt.incref(builder, sig.args[0], data) + + return dummy._getvalue() + + ret_typ = DummyType(data) + sig = signature(ret_typ, data) + + return sig, codegen + + @overload(Dummy, inline='always') + def dummy_overload(data): + def ctor(data): + return init_dummy(data) + + return ctor + + @overload_method(DummyType, 'div', inline='always') + def div_overload(self, other): + def impl(self, other): + return self._data / other._data + + return impl + + @njit + def test_impl(data, other_data): + dummy = Dummy(data) # ctor inlined once + other = Dummy(other_data) # ctor inlined again + + return dummy.div(other) + + data = 1. + other_data = 2. + res = test_impl(data, other_data) + self.assertEqual(res, data / other_data) + + def test_issue5824(self): + """ Similar to the above test_issue5792, checks mutation of the inlinee + IR is local only""" + + class CustomCompiler(CompilerBase): + + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + # Run the inliner twice! + pm.add_pass_after(InlineOverloads, InlineOverloads) + pm.finalize() + return [pm] + + def bar(x): + ... + + @overload(bar, inline='always') + def ol_bar(x): + if isinstance(x, types.Integer): + def impl(x): + return x + 1.3 + return impl + + @njit(pipeline_class=CustomCompiler) + def foo(z): + return bar(z), bar(z) + + self.assertEqual(foo(10), (11.3, 11.3)) + + @skip_parfors_unsupported + def test_issue7380(self): + # This checks that inlining a function containing a loop into another + # loop where the induction variable in both loops is the same doesn't + # end up with a name collision. Parfors can detect this so it is used. + # See: https://github.com/numba/numba/issues/7380 + + # Check Numba inlined function passes + + @njit(inline="always") + def bar(x): + for i in range(x.size): + x[i] += 1 + + @njit(parallel=True) + def foo(a): + for i in prange(a.shape[0]): + bar(a[i]) + + a = np.ones((10, 10)) + foo(a) # run + # check mutation of data is correct + self.assertPreciseEqual(a, 2 * np.ones_like(a)) + + # Check manually inlined equivalent function fails + @njit(parallel=True) + def foo_bad(a): + for i in prange(a.shape[0]): + x = a[i] + for i in range(x.size): + x[i] += 1 + + with self.assertRaises(errors.UnsupportedRewriteError) as e: + foo_bad(a) + + self.assertIn("Overwrite of parallel loop index", str(e.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ir_utils.py b/venv/lib/python3.10/site-packages/numba/tests/test_ir_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..96deeaca0e3f60037938c61d03838f5c22451ddc --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ir_utils.py @@ -0,0 +1,273 @@ +import numba +from numba.tests.support import TestCase, unittest +from numba.core.registry import cpu_target +from numba.core.compiler import CompilerBase, Flags +from numba.core.compiler_machinery import PassManager +from numba.core import types, ir, bytecode, compiler, ir_utils, registry +from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode, + FixupArgs, IRProcessing,) + +from numba.core.typed_passes import (NopythonTypeInference, + type_inference_stage, DeadCodeElimination) +from numba.experimental import jitclass + +# global constant for testing find_const +GLOBAL_B = 11 + + +@jitclass([('val', numba.core.types.List(numba.intp))]) +class Dummy(object): + def __init__(self, val): + self.val = val + + +class TestIrUtils(TestCase): + """ + Tests ir handling utility functions like find_callname. + """ + + def test_obj_func_match(self): + """Test matching of an object method (other than Array see #3449) + """ + + def test_func(): + d = Dummy([1]) + d.val.append(2) + + test_ir = compiler.run_frontend(test_func) + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + typing_res = type_inference_stage( + typingctx, targetctx, test_ir, (), None) + matched_call = ir_utils.find_callname( + test_ir, test_ir.blocks[0].body[7].value, typing_res.typemap) + self.assertTrue(isinstance(matched_call, tuple) and + len(matched_call) == 2 and + matched_call[0] == 'append') + + def test_dead_code_elimination(self): + + class Tester(CompilerBase): + + @classmethod + def mk_pipeline(cls, args, return_type=None, flags=None, locals={}, + library=None, typing_context=None, + target_context=None): + if not flags: + flags = Flags() + flags.nrt = True + if typing_context is None: + typing_context = registry.cpu_target.typing_context + if target_context is None: + target_context = registry.cpu_target.target_context + return cls(typing_context, target_context, library, args, + return_type, flags, locals) + + def compile_to_ir(self, func, DCE=False): + """ + Compile and return IR + """ + func_id = bytecode.FunctionIdentity.from_function(func) + self.state.func_id = func_id + ExtractByteCode().run_pass(self.state) + state = self.state + + name = "DCE_testing" + pm = PassManager(name) + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(FixupArgs, "fix up args") + pm.add_pass(IRProcessing, "processing IR") + pm.add_pass(NopythonTypeInference, "nopython frontend") + if DCE is True: + pm.add_pass(DeadCodeElimination, "DCE after typing") + pm.finalize() + pm.run(state) + return state.func_ir + + def check_initial_ir(the_ir): + # dead stuff: + # a const int value 0xdead + # an assign of above into to variable `dead` + # a const int above 0xdeaddead + # an assign of said int to variable `deaddead` + # this is 2 statements to remove + + self.assertEqual(len(the_ir.blocks), 1) + block = the_ir.blocks[0] + deads = [] + for x in block.find_insts(ir.Assign): + if isinstance(getattr(x, 'target', None), ir.Var): + if 'dead' in getattr(x.target, 'name', ''): + deads.append(x) + + self.assertEqual(len(deads), 2) + for d in deads: + # check the ir.Const is the definition and the value is expected + const_val = the_ir.get_definition(d.value) + self.assertTrue(int('0x%s' % d.target.name, 16), + const_val.value) + + return deads + + def check_dce_ir(the_ir): + self.assertEqual(len(the_ir.blocks), 1) + block = the_ir.blocks[0] + deads = [] + consts = [] + for x in block.find_insts(ir.Assign): + if isinstance(getattr(x, 'target', None), ir.Var): + if 'dead' in getattr(x.target, 'name', ''): + deads.append(x) + if isinstance(getattr(x, 'value', None), ir.Const): + consts.append(x) + self.assertEqual(len(deads), 0) + + # check the consts to make sure there's no reference to 0xdead or + # 0xdeaddead + for x in consts: + self.assertTrue(x.value.value not in [0xdead, 0xdeaddead]) + + def foo(x): + y = x + 1 + dead = 0xdead # noqa + z = y + 2 + deaddead = 0xdeaddead # noqa + ret = z * z + return ret + + test_pipeline = Tester.mk_pipeline((types.intp,)) + no_dce = test_pipeline.compile_to_ir(foo) + removed = check_initial_ir(no_dce) + + test_pipeline = Tester.mk_pipeline((types.intp,)) + w_dce = test_pipeline.compile_to_ir(foo, DCE=True) + check_dce_ir(w_dce) + + # check that the count of initial - removed = dce + self.assertEqual(len(no_dce.blocks[0].body) - len(removed), + len(w_dce.blocks[0].body)) + + def test_find_const_global(self): + """ + Test find_const() for values in globals (ir.Global) and freevars + (ir.FreeVar) that are considered constants for compilation. + """ + FREEVAR_C = 12 + + def foo(a): + b = GLOBAL_B + c = FREEVAR_C + return a + b + c + + f_ir = compiler.run_frontend(foo) + block = f_ir.blocks[0] + const_b = None + const_c = None + + for inst in block.body: + if isinstance(inst, ir.Assign) and inst.target.name == 'b': + const_b = ir_utils.guard( + ir_utils.find_const, f_ir, inst.target) + if isinstance(inst, ir.Assign) and inst.target.name == 'c': + const_c = ir_utils.guard( + ir_utils.find_const, f_ir, inst.target) + + self.assertEqual(const_b, GLOBAL_B) + self.assertEqual(const_c, FREEVAR_C) + + def test_flatten_labels(self): + """ tests flatten_labels """ + def foo(a): + acc = 0 + if a > 3: + acc += 1 + if a > 19: + return 53 + elif a < 1000: + if a >= 12: + acc += 1 + for x in range(10): + acc -= 1 + if acc < 2: + break + else: + acc += 7 + else: + raise ValueError("some string") + # prevents inline of return on py310 + py310_defeat1 = 1 # noqa + py310_defeat2 = 2 # noqa + py310_defeat3 = 3 # noqa + py310_defeat4 = 4 # noqa + return acc + + def bar(a): + acc = 0 + z = 12 + if a > 3: + acc += 1 + z += 12 + if a > 19: + z += 12 + return 53 + elif a < 1000: + if a >= 12: + z += 12 + acc += 1 + for x in range(10): + z += 12 + acc -= 1 + if acc < 2: + break + else: + z += 12 + acc += 7 + else: + raise ValueError("some string") + py310_defeat1 = 1 # noqa + py310_defeat2 = 2 # noqa + py310_defeat3 = 3 # noqa + py310_defeat4 = 4 # noqa + return acc + + def baz(a): + acc = 0 + if a > 3: + acc += 1 + if a > 19: + return 53 + else: # extra control flow in comparison to foo + return 55 + elif a < 1000: + if a >= 12: + acc += 1 + for x in range(10): + acc -= 1 + if acc < 2: + break + else: + acc += 7 + else: + raise ValueError("some string") + py310_defeat1 = 1 # noqa + py310_defeat2 = 2 # noqa + py310_defeat3 = 3 # noqa + py310_defeat4 = 4 # noqa + return acc + + def get_flat_cfg(func): + func_ir = ir_utils.compile_to_numba_ir(func, dict()) + flat_blocks = ir_utils.flatten_labels(func_ir.blocks) + self.assertEqual(max(flat_blocks.keys()) + 1, len(func_ir.blocks)) + return ir_utils.compute_cfg_from_blocks(flat_blocks) + + foo_cfg = get_flat_cfg(foo) + bar_cfg = get_flat_cfg(bar) + baz_cfg = get_flat_cfg(baz) + + self.assertEqual(foo_cfg, bar_cfg) + self.assertNotEqual(foo_cfg, baz_cfg) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_itanium_mangler.py b/venv/lib/python3.10/site-packages/numba/tests/test_itanium_mangler.py new file mode 100644 index 0000000000000000000000000000000000000000..2737b88e31856d3a88b7a31b2aa585fe52acb683 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_itanium_mangler.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +from numba import int32, int64, uint32, uint64, float32, float64 +from numba.core.types import range_iter32_type +from numba.core import itanium_mangler +import unittest + + +class TestItaniumManager(unittest.TestCase): + def test_ident(self): + got = itanium_mangler.mangle_identifier("apple") + expect = "5apple" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_identifier("ap_ple") + expect = "6ap_ple" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_identifier("apple213") + expect = "8apple213" + self.assertEqual(expect, got) + + def test_types(self): + got = itanium_mangler.mangle_type(int32) + expect = "i" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_type(int64) + expect = "x" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_type(uint32) + expect = "j" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_type(uint64) + expect = "y" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_type(float32) + expect = "f" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle_type(float64) + expect = "d" + self.assertEqual(expect, got) + + def test_function(self): + got = itanium_mangler.mangle("what", [int32, float32]) + expect = "_Z4whatif" + self.assertEqual(expect, got) + + got = itanium_mangler.mangle("a_little_brown_fox", [uint64, + uint32, + float64]) + expect = "_Z18a_little_brown_foxyjd" + self.assertEqual(expect, got) + + def test_custom_type(self): + got = itanium_mangler.mangle_type(range_iter32_type) + name = str(range_iter32_type) + expect = "{n}{name}".format(n=len(name), name=name) + self.assertEqual(expect, got) + + def test_mangle_literal(self): + # check int + got = itanium_mangler.mangle_value(123) + expect = "Li123E" + self.assertEqual(expect, got) + # check float (not handled using standard) + got = itanium_mangler.mangle_value(12.3) + self.assertRegex(got, r'^\d+_12_[0-9a-z][0-9a-z]3$') + + def test_mangle_unicode(self): + name = u'f∂ƒ©z' + got = itanium_mangler.mangle_identifier(name) + self.assertRegex(got, r'^\d+f(_[a-z0-9][a-z0-9])+z$') + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_iteration.py b/venv/lib/python3.10/site-packages/numba/tests/test_iteration.py new file mode 100644 index 0000000000000000000000000000000000000000..aea9869e9cc3d55a1e763f6870cb2873b4823680 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_iteration.py @@ -0,0 +1,241 @@ +import unittest +import numpy as np + +from numba import jit, njit +from numba.core import types, errors +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.np import numpy_support + + +force_pyobj_flags = {'forceobj': True} +no_pyobj_flags = {'nopython': True} + + +def int_tuple_iter_usecase(): + res = 0 + for i in (1, 2, 99, 3): + res += i + return res + +def float_tuple_iter_usecase(): + res = 0.0 + for i in (1.5, 2.0, 99.3, 3.4): + res += i + return res + +def tuple_tuple_iter_usecase(): + # Recursively homogeneous tuple type + res = 0.0 + for i in ((1.5, 2.0), (99.3, 3.4), (1.8, 2.5)): + for j in i: + res += j + res = res * 2 + return res + +def enumerate_nested_tuple_usecase(): + res = 0.0 + for i, j in enumerate(((1.5, 2.0), (99.3, 3.4), (1.8, 2.5))): + for l in j: + res += i * l + res = res * 2 + return res + +def nested_enumerate_usecase(): + res = 0.0 + for i, (j, k) in enumerate(enumerate(((1.5, 2.0), (99.3, 3.4), (1.8, 2.5)))): + for l in k: + res += i * j * l + res = res * 2 + return res + + +def enumerate_array_usecase(): + res = 0 + arrays = (np.ones(4), np.ones(5)) + for i, v in enumerate(arrays): + res += v.sum() + return res + + +def scalar_iter_usecase(iterable): + res = 0.0 + for x in iterable: + res += x + return res + +def record_iter_usecase(iterable): + res = 0.0 + for x in iterable: + res += x.a * x.b + return res + +def record_iter_mutate_usecase(iterable): + for x in iterable: + x.a = x.a + x.b + + +record_dtype = np.dtype([('a', np.float64), + ('b', np.int32), + ]) + + +class IterationTest(MemoryLeakMixin, TestCase): + + def run_nullary_func(self, pyfunc, flags): + cfunc = jit((), **flags)(pyfunc) + expected = pyfunc() + self.assertPreciseEqual(cfunc(), expected) + + def test_int_tuple_iter(self, flags=force_pyobj_flags): + self.run_nullary_func(int_tuple_iter_usecase, flags) + + def test_int_tuple_iter_npm(self): + self.test_int_tuple_iter(flags=no_pyobj_flags) + + # Type inference on tuples used to be hardcoded for ints, check + # that it works for other types. + + def test_float_tuple_iter(self, flags=force_pyobj_flags): + self.run_nullary_func(float_tuple_iter_usecase, flags) + + def test_float_tuple_iter_npm(self): + self.test_float_tuple_iter(flags=no_pyobj_flags) + + def test_tuple_tuple_iter(self, flags=force_pyobj_flags): + self.run_nullary_func(tuple_tuple_iter_usecase, flags) + + def test_tuple_tuple_iter_npm(self): + self.test_tuple_tuple_iter(flags=no_pyobj_flags) + + def test_enumerate_nested_tuple(self, flags=force_pyobj_flags): + self.run_nullary_func(enumerate_nested_tuple_usecase, flags) + + def test_enumerate_nested_tuple_npm(self): + self.test_enumerate_nested_tuple(flags=no_pyobj_flags) + + def test_nested_enumerate(self, flags=force_pyobj_flags): + self.run_nullary_func(nested_enumerate_usecase, flags) + + def test_nested_enumerate_npm(self): + self.test_nested_enumerate(flags=no_pyobj_flags) + + def test_enumerate_refct(self): + # Test issue 3473 + pyfunc = enumerate_array_usecase + cfunc = njit((),)(pyfunc) + expected = pyfunc() + self.assertPreciseEqual(cfunc(), expected) + + def run_array_1d(self, item_type, arg, flags): + # Iteration over a 1d numpy array + pyfunc = scalar_iter_usecase + cfunc = jit(item_type(types.Array(item_type, 1, 'A'),), **flags)(pyfunc) + self.assertPreciseEqual(cfunc(arg), pyfunc(arg)) + + def test_array_1d_float(self, flags=force_pyobj_flags): + self.run_array_1d(types.float64, np.arange(5.0), flags) + + def test_array_1d_float_npm(self): + self.test_array_1d_float(no_pyobj_flags) + + def test_array_1d_complex(self, flags=force_pyobj_flags): + self.run_array_1d(types.complex128, np.arange(5.0) * 1.0j, flags) + + def test_array_1d_complex_npm(self): + self.test_array_1d_complex(no_pyobj_flags) + + def test_array_1d_record(self, flags=force_pyobj_flags): + pyfunc = record_iter_usecase + item_type = numpy_support.from_dtype(record_dtype) + cfunc = jit((types.Array(item_type, 1, 'A'),), **flags)(pyfunc) + arr = np.recarray(3, dtype=record_dtype) + for i in range(3): + arr[i].a = float(i * 2) + arr[i].b = i + 2 + got = pyfunc(arr) + self.assertPreciseEqual(cfunc(arr), got) + + def test_array_1d_record_npm(self): + self.test_array_1d_record(no_pyobj_flags) + + def test_array_1d_record_mutate_npm(self, flags=no_pyobj_flags): + pyfunc = record_iter_mutate_usecase + item_type = numpy_support.from_dtype(record_dtype) + cfunc = jit((types.Array(item_type, 1, 'A'),), **flags)(pyfunc) + arr = np.recarray(3, dtype=record_dtype) + for i in range(3): + arr[i].a = float(i * 2) + arr[i].b = i + 2 + expected = arr.copy() + pyfunc(expected) + got = arr.copy() + cfunc(got) + self.assertPreciseEqual(expected, got) + + def test_array_1d_record_mutate(self): + self.test_array_1d_record_mutate_npm(flags=force_pyobj_flags) + + def test_array_0d_raises(self): + + def foo(x): + for i in x: + pass + + # 0d is typing error + with self.assertRaises(errors.TypingError) as raises: + aryty = types.Array(types.int32, 0, 'C') + njit((aryty,))(foo) + + self.assertIn("0-d array", str(raises.exception)) + + def test_tuple_iter_issue1504(self): + # The issue is due to `row` being typed as heterogeneous tuple. + def bar(x, y): + total = 0 + for row in zip(x, y): + total += row[0] + row[1] + + return total + + x = y = np.arange(3, dtype=np.int32) + aryty = types.Array(types.int32, 1, 'C') + cfunc = njit((aryty, aryty))(bar) + + expect = bar(x, y) + got = cfunc(x, y) + self.assertEqual(expect, got) + + def test_tuple_of_arrays_iter(self): + # We used to leak a reference to each element of the tuple + def bar(arrs): + total = 0 + for arr in arrs: + total += arr[0] + + return total + + x = y = np.arange(3, dtype=np.int32) + aryty = types.Array(types.int32, 1, 'C') + cfunc = njit((types.containers.UniTuple(aryty, 2),))(bar) + + expect = bar((x, y)) + got = cfunc((x, y)) + self.assertEqual(expect, got) + + + +class TestIterationRefct(MemoryLeakMixin, TestCase): + def test_zip_with_arrays(self): + @njit + def foo(sequence): + c = 0 + for a, b in zip(range(len(sequence)), sequence): + c += (a + 1) * b.sum() + return + + sequence = [np.arange(1 + i) for i in range(10)] + self.assertEqual(foo(sequence), foo.py_func(sequence)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_jit_module.py b/venv/lib/python3.10/site-packages/numba/tests/test_jit_module.py new file mode 100644 index 0000000000000000000000000000000000000000..eb9697aecd49ce4e15494a484f46c41d8d82e9f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_jit_module.py @@ -0,0 +1,146 @@ +import os +import sys +import inspect +import contextlib +import numpy as np +import logging +from io import StringIO + +import unittest +from numba.tests.support import SerialMixin, create_temp_module +from numba.core import dispatcher + + +@contextlib.contextmanager +def captured_logs(l): + try: + buffer = StringIO() + handler = logging.StreamHandler(buffer) + l.addHandler(handler) + yield buffer + finally: + l.removeHandler(handler) + + +class TestJitModule(SerialMixin, unittest.TestCase): + + source_lines = """ +from numba import jit_module + +def inc(x): + return x + 1 + +def add(x, y): + return x + y + +def inc_add(x): + y = inc(x) + return add(x, y) + +import numpy as np +mean = np.mean + +class Foo(object): + pass + +jit_module({jit_options}) +""" + + def test_create_temp_jitted_module(self): + sys_path_original = list(sys.path) + sys_modules_original = dict(sys.modules) + with create_temp_module(self.source_lines) as test_module: + temp_module_dir = os.path.dirname(test_module.__file__) + self.assertEqual(temp_module_dir, sys.path[0]) + self.assertEqual(sys.path[1:], sys_path_original) + self.assertTrue(test_module.__name__ in sys.modules) + # Test that modifications to sys.path / sys.modules are reverted + self.assertEqual(sys.path, sys_path_original) + self.assertEqual(sys.modules, sys_modules_original) + + def test_create_temp_jitted_module_with_exception(self): + try: + sys_path_original = list(sys.path) + sys_modules_original = dict(sys.modules) + with create_temp_module(self.source_lines): + raise ValueError("Something went wrong!") + except ValueError: + # Test that modifications to sys.path / sys.modules are reverted + self.assertEqual(sys.path, sys_path_original) + self.assertEqual(sys.modules, sys_modules_original) + + def test_jit_module(self): + with create_temp_module(self.source_lines) as test_module: + self.assertIsInstance(test_module.inc, dispatcher.Dispatcher) + self.assertIsInstance(test_module.add, dispatcher.Dispatcher) + self.assertIsInstance(test_module.inc_add, dispatcher.Dispatcher) + self.assertTrue(test_module.mean is np.mean) + self.assertTrue(inspect.isclass(test_module.Foo)) + + # Test output of jitted functions is as expected + x, y = 1.7, 2.3 + self.assertEqual(test_module.inc(x), + test_module.inc.py_func(x)) + self.assertEqual(test_module.add(x, y), + test_module.add.py_func(x, y)) + self.assertEqual(test_module.inc_add(x), + test_module.inc_add.py_func(x)) + + def test_jit_module_jit_options(self): + jit_options = {"nopython": True, + "nogil": False, + "error_model": "numpy", + "boundscheck": False, + } + with create_temp_module(self.source_lines, + **jit_options) as test_module: + self.assertEqual(test_module.inc.targetoptions, jit_options) + + def test_jit_module_jit_options_override(self): + source_lines = """ +from numba import jit, jit_module + +@jit(nogil=True, forceobj=True) +def inc(x): + return x + 1 + +def add(x, y): + return x + y + +jit_module({jit_options}) +""" + jit_options = {"nopython": True, + "error_model": "numpy", + "boundscheck": False, + } + with create_temp_module(source_lines=source_lines, + **jit_options) as test_module: + self.assertEqual(test_module.add.targetoptions, jit_options) + # Test that manual jit-wrapping overrides jit_module options, + # `forceobj` will automatically apply `nopython=False`. + self.assertEqual(test_module.inc.targetoptions, + {'nogil': True, 'forceobj': True, + 'boundscheck': None, 'nopython': False}) + + def test_jit_module_logging_output(self): + logger = logging.getLogger('numba.core.decorators') + logger.setLevel(logging.DEBUG) + jit_options = {"nopython": True, + "error_model": "numpy", + } + with captured_logs(logger) as logs: + with create_temp_module(self.source_lines, + **jit_options) as test_module: + logs = logs.getvalue() + expected = ["Auto decorating function", + "from module {}".format(test_module.__name__), + "with jit and options: {}".format(jit_options)] + self.assertTrue(all(i in logs for i in expected)) + + def test_jit_module_logging_level(self): + logger = logging.getLogger('numba.core.decorators') + # Test there's no logging for INFO level + logger.setLevel(logging.INFO) + with captured_logs(logger) as logs: + with create_temp_module(self.source_lines): + self.assertEqual(logs.getvalue(), '') diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_jitclasses.py b/venv/lib/python3.10/site-packages/numba/tests/test_jitclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..2f224af9e58b9f636e65d43aed66f39b17707fa8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_jitclasses.py @@ -0,0 +1,2020 @@ +import ctypes +import itertools +import pickle +import random +import typing as pt +import unittest + +from collections import OrderedDict + +import numpy as np +from numba import (boolean, deferred_type, float32, float64, int16, int32, + njit, optional, typeof) +from numba.core import errors, types +from numba.core.dispatcher import Dispatcher +from numba.core.errors import LoweringError, TypingError +from numba.core.runtime.nrt import MemInfo +from numba.experimental import jitclass +from numba.experimental.jitclass import _box +from numba.experimental.jitclass.base import JitClassType +from numba.tests.support import MemoryLeakMixin, TestCase, skip_if_typeguard +from numba.tests.support import skip_unless_scipy + + +class TestClass1(object): + def __init__(self, x, y, z=1, *, a=5): + self.x = x + self.y = y + self.z = z + self.a = a + + +class TestClass2(object): + def __init__(self, x, y, z=1, *args, a=5): + self.x = x + self.y = y + self.z = z + self.args = args + self.a = a + + +def _get_meminfo(box): + ptr = _box.box_get_meminfoptr(box) + mi = MemInfo(ptr) + mi.acquire() + return mi + + +class TestJitClass(TestCase, MemoryLeakMixin): + + def _check_spec(self, spec=None, test_cls=None, all_expected=None): + if test_cls is None: + @jitclass(spec) + class Test(object): + + def __init__(self): + pass + test_cls = Test + + clsty = test_cls.class_type.instance_type + names = list(clsty.struct.keys()) + values = list(clsty.struct.values()) + + if all_expected is None: + if isinstance(spec, OrderedDict): + all_expected = spec.items() + else: + all_expected = spec + + assert all_expected is not None + + self.assertEqual(len(names), len(all_expected)) + for got, expected in zip(zip(names, values), all_expected): + self.assertEqual(got[0], expected[0]) + self.assertEqual(got[1], expected[1]) + + def test_ordereddict_spec(self): + spec = OrderedDict() + spec["x"] = int32 + spec["y"] = float32 + self._check_spec(spec) + + def test_list_spec(self): + spec = [("x", int32), + ("y", float32)] + self._check_spec(spec) + + def test_type_annotations(self): + spec = [("x", int32)] + + @jitclass(spec) + class Test1(object): + x: int + y: pt.List[float] + + def __init__(self): + pass + + self._check_spec(spec, Test1, spec + [("y", types.ListType(float64))]) + + def test_type_annotation_inheritance(self): + + class Foo: + x: int + + @jitclass + class Bar(Foo): + y: float + + def __init__(self, value: float) -> None: + self.x = int(value) + self.y = value + + self._check_spec( + test_cls=Bar, all_expected=[("x", typeof(0)), ("y", typeof(0.0))] + ) + + def test_spec_errors(self): + spec1 = [("x", int), ("y", float32[:])] + spec2 = [(1, int32), ("y", float32[:])] + + class Test(object): + + def __init__(self): + pass + + with self.assertRaises(TypeError) as raises: + jitclass(Test, spec1) + self.assertIn("spec values should be Numba type instances", + str(raises.exception)) + with self.assertRaises(TypeError) as raises: + jitclass(Test, spec2) + self.assertEqual(str(raises.exception), + "spec keys should be strings, got 1") + + def test_init_errors(self): + + @jitclass([]) + class Test: + def __init__(self): + return 7 + + with self.assertRaises(errors.TypingError) as raises: + Test() + + self.assertIn("__init__() should return None, not", + str(raises.exception)) + + def _make_Float2AndArray(self): + spec = OrderedDict() + spec["x"] = float32 + spec["y"] = float32 + spec["arr"] = float32[:] + + @jitclass(spec) + class Float2AndArray(object): + + def __init__(self, x, y, arr): + self.x = x + self.y = y + self.arr = arr + + def add(self, val): + self.x += val + self.y += val + return val + + return Float2AndArray + + def _make_Vector2(self): + spec = OrderedDict() + spec["x"] = int32 + spec["y"] = int32 + + @jitclass(spec) + class Vector2(object): + + def __init__(self, x, y): + self.x = x + self.y = y + + return Vector2 + + def test_jit_class_1(self): + Float2AndArray = self._make_Float2AndArray() + Vector2 = self._make_Vector2() + + @njit + def bar(obj): + return obj.x + obj.y + + @njit + def foo(a): + obj = Float2AndArray(1, 2, a) + obj.add(123) + + vec = Vector2(3, 4) + return bar(obj), bar(vec), obj.arr + + inp = np.ones(10, dtype=np.float32) + a, b, c = foo(inp) + self.assertEqual(a, 123 + 1 + 123 + 2) + self.assertEqual(b, 3 + 4) + self.assertPreciseEqual(c, inp) + + def test_jitclass_usage_from_python(self): + Float2AndArray = self._make_Float2AndArray() + + @njit + def identity(obj): + return obj + + @njit + def retrieve_attributes(obj): + return obj.x, obj.y, obj.arr + + arr = np.arange(10, dtype=np.float32) + obj = Float2AndArray(1, 2, arr) + obj_meminfo = _get_meminfo(obj) + self.assertEqual(obj_meminfo.refcount, 2) + self.assertEqual(obj_meminfo.data, _box.box_get_dataptr(obj)) + self.assertEqual(obj._numba_type_.class_type, + Float2AndArray.class_type) + # Use jit class instance in numba + other = identity(obj) + other_meminfo = _get_meminfo(other) # duplicates MemInfo object to obj + self.assertEqual(obj_meminfo.refcount, 4) + self.assertEqual(other_meminfo.refcount, 4) + self.assertEqual(other_meminfo.data, _box.box_get_dataptr(other)) + self.assertEqual(other_meminfo.data, obj_meminfo.data) + + # Check dtor + del other, other_meminfo + self.assertEqual(obj_meminfo.refcount, 2) + + # Check attributes + out_x, out_y, out_arr = retrieve_attributes(obj) + self.assertEqual(out_x, 1) + self.assertEqual(out_y, 2) + self.assertIs(out_arr, arr) + + # Access attributes from python + self.assertEqual(obj.x, 1) + self.assertEqual(obj.y, 2) + self.assertIs(obj.arr, arr) + + # Access methods from python + self.assertEqual(obj.add(123), 123) + self.assertEqual(obj.x, 1 + 123) + self.assertEqual(obj.y, 2 + 123) + + # Setter from python + obj.x = 333 + obj.y = 444 + obj.arr = newarr = np.arange(5, dtype=np.float32) + self.assertEqual(obj.x, 333) + self.assertEqual(obj.y, 444) + self.assertIs(obj.arr, newarr) + + def test_jitclass_datalayout(self): + spec = OrderedDict() + # Boolean has different layout as value vs data + spec["val"] = boolean + + @jitclass(spec) + class Foo(object): + + def __init__(self, val): + self.val = val + + self.assertTrue(Foo(True).val) + self.assertFalse(Foo(False).val) + + def test_deferred_type(self): + node_type = deferred_type() + + spec = OrderedDict() + spec["data"] = float32 + spec["next"] = optional(node_type) + + @njit + def get_data(node): + return node.data + + @jitclass(spec) + class LinkedNode(object): + + def __init__(self, data, next): + self.data = data + self.next = next + + def get_next_data(self): + # use deferred type as argument + return get_data(self.next) + + def append_to_tail(self, other): + cur = self + while cur.next is not None: + cur = cur.next + cur.next = other + + node_type.define(LinkedNode.class_type.instance_type) + + first = LinkedNode(123, None) + self.assertEqual(first.data, 123) + self.assertIsNone(first.next) + + second = LinkedNode(321, first) + + first_meminfo = _get_meminfo(first) + second_meminfo = _get_meminfo(second) + self.assertEqual(first_meminfo.refcount, 3) + self.assertEqual(second.next.data, first.data) + self.assertEqual(first_meminfo.refcount, 3) + self.assertEqual(second_meminfo.refcount, 2) + + # Test using deferred type as argument + first_val = second.get_next_data() + self.assertEqual(first_val, first.data) + + # Check setattr (issue #2606) + self.assertIsNone(first.next) + second.append_to_tail(LinkedNode(567, None)) + self.assertIsNotNone(first.next) + self.assertEqual(first.next.data, 567) + self.assertIsNone(first.next.next) + second.append_to_tail(LinkedNode(678, None)) + self.assertIsNotNone(first.next.next) + self.assertEqual(first.next.next.data, 678) + + # Check ownership + self.assertEqual(first_meminfo.refcount, 3) + del second, second_meminfo + self.assertEqual(first_meminfo.refcount, 2) + + def test_c_structure(self): + spec = OrderedDict() + spec["a"] = int32 + spec["b"] = int16 + spec["c"] = float64 + + @jitclass(spec) + class Struct(object): + + def __init__(self, a, b, c): + self.a = a + self.b = b + self.c = c + + st = Struct(0xabcd, 0xef, 3.1415) + + class CStruct(ctypes.Structure): + _fields_ = [ + ("a", ctypes.c_int32), + ("b", ctypes.c_int16), + ("c", ctypes.c_double), + ] + + ptr = ctypes.c_void_p(_box.box_get_dataptr(st)) + cstruct = ctypes.cast(ptr, ctypes.POINTER(CStruct))[0] + self.assertEqual(cstruct.a, st.a) + self.assertEqual(cstruct.b, st.b) + self.assertEqual(cstruct.c, st.c) + + def test_is(self): + Vector = self._make_Vector2() + vec_a = Vector(1, 2) + + @njit + def do_is(a, b): + return a is b + + with self.assertRaises(LoweringError) as raises: + # trigger compilation + do_is(vec_a, vec_a) + self.assertIn("no default `is` implementation", str(raises.exception)) + + def test_isinstance(self): + Vector2 = self._make_Vector2() + vec = Vector2(1, 2) + self.assertIsInstance(vec, Vector2) + + def test_subclassing(self): + Vector2 = self._make_Vector2() + with self.assertRaises(TypeError) as raises: + class SubV(Vector2): + pass + self.assertEqual(str(raises.exception), + "cannot subclass from a jitclass") + + def test_base_class(self): + class Base(object): + + def what(self): + return self.attr + + @jitclass([("attr", int32)]) + class Test(Base): + + def __init__(self, attr): + self.attr = attr + + obj = Test(123) + self.assertEqual(obj.what(), 123) + + def test_globals(self): + + class Mine(object): + constant = 123 + + def __init__(self): + pass + + with self.assertRaises(TypeError) as raises: + jitclass(Mine) + + self.assertEqual(str(raises.exception), + "class members are not yet supported: constant") + + def test_user_getter_setter(self): + @jitclass([("attr", int32)]) + class Foo(object): + + def __init__(self, attr): + self.attr = attr + + @property + def value(self): + return self.attr + 1 + + @value.setter + def value(self, val): + self.attr = val - 1 + + foo = Foo(123) + self.assertEqual(foo.attr, 123) + # Getter + self.assertEqual(foo.value, 123 + 1) + # Setter + foo.value = 789 + self.assertEqual(foo.attr, 789 - 1) + self.assertEqual(foo.value, 789) + + # Test nopython mode usage of getter and setter + @njit + def bar(foo, val): + a = foo.value + foo.value = val + b = foo.value + c = foo.attr + return a, b, c + + a, b, c = bar(foo, 567) + self.assertEqual(a, 789) + self.assertEqual(b, 567) + self.assertEqual(c, 567 - 1) + + def test_user_deleter_error(self): + class Foo(object): + + def __init__(self): + pass + + @property + def value(self): + return 1 + + @value.deleter + def value(self): + pass + + with self.assertRaises(TypeError) as raises: + jitclass(Foo) + self.assertEqual(str(raises.exception), + "deleter is not supported: value") + + def test_name_shadowing_error(self): + class Foo(object): + + def __init__(self): + pass + + @property + def my_property(self): + pass + + def my_method(self): + pass + + with self.assertRaises(NameError) as raises: + jitclass(Foo, [("my_property", int32)]) + self.assertEqual(str(raises.exception), "name shadowing: my_property") + + with self.assertRaises(NameError) as raises: + jitclass(Foo, [("my_method", int32)]) + self.assertEqual(str(raises.exception), "name shadowing: my_method") + + def test_distinct_classes(self): + # Different classes with the same names shouldn't confuse the compiler + @jitclass([("x", int32)]) + class Foo(object): + + def __init__(self, x): + self.x = x + 2 + + def run(self): + return self.x + 1 + + FirstFoo = Foo + + @jitclass([("x", int32)]) + class Foo(object): + + def __init__(self, x): + self.x = x - 2 + + def run(self): + return self.x - 1 + + SecondFoo = Foo + foo = FirstFoo(5) + self.assertEqual(foo.x, 7) + self.assertEqual(foo.run(), 8) + foo = SecondFoo(5) + self.assertEqual(foo.x, 3) + self.assertEqual(foo.run(), 2) + + def test_parameterized(self): + class MyClass(object): + + def __init__(self, value): + self.value = value + + def create_my_class(value): + cls = jitclass(MyClass, [("value", typeof(value))]) + return cls(value) + + a = create_my_class(123) + self.assertEqual(a.value, 123) + + b = create_my_class(12.3) + self.assertEqual(b.value, 12.3) + + c = create_my_class(np.array([123])) + np.testing.assert_equal(c.value, [123]) + + d = create_my_class(np.array([12.3])) + np.testing.assert_equal(d.value, [12.3]) + + def test_protected_attrs(self): + spec = { + "value": int32, + "_value": float32, + "__value": int32, + "__value__": int32, + } + + @jitclass(spec) + class MyClass(object): + + def __init__(self, value): + self.value = value + self._value = value / 2 + self.__value = value * 2 + self.__value__ = value - 1 + + @property + def private_value(self): + return self.__value + + @property + def _inner_value(self): + return self._value + + @_inner_value.setter + def _inner_value(self, v): + self._value = v + + @property + def __private_value(self): + return self.__value + + @__private_value.setter + def __private_value(self, v): + self.__value = v + + def swap_private_value(self, new): + old = self.__private_value + self.__private_value = new + return old + + def _protected_method(self, factor): + return self._value * factor + + def __private_method(self, factor): + return self.__value * factor + + def check_private_method(self, factor): + return self.__private_method(factor) + + value = 123 + inst = MyClass(value) + # test attributes + self.assertEqual(inst.value, value) + self.assertEqual(inst._value, value / 2) + self.assertEqual(inst.private_value, value * 2) + # test properties + self.assertEqual(inst._inner_value, inst._value) + freeze_inst_value = inst._value + inst._inner_value -= 1 + self.assertEqual(inst._inner_value, freeze_inst_value - 1) + + self.assertEqual(inst.swap_private_value(321), value * 2) + self.assertEqual(inst.swap_private_value(value * 2), 321) + # test methods + self.assertEqual(inst._protected_method(3), inst._value * 3) + self.assertEqual(inst.check_private_method(3), inst.private_value * 3) + # test special + self.assertEqual(inst.__value__, value - 1) + inst.__value__ -= 100 + self.assertEqual(inst.__value__, value - 101) + + # test errors + @njit + def access_dunder(inst): + return inst.__value + + with self.assertRaises(errors.TypingError) as raises: + access_dunder(inst) + # It will appear as "_TestJitClass__value" because the `access_dunder` + # is under the scope of "TestJitClass". + self.assertIn("_TestJitClass__value", str(raises.exception)) + + with self.assertRaises(AttributeError) as raises: + access_dunder.py_func(inst) + self.assertIn("_TestJitClass__value", str(raises.exception)) + + @skip_if_typeguard + def test_annotations(self): + """ + Methods with annotations should compile fine (issue #1911). + """ + from .annotation_usecases import AnnotatedClass + + spec = {"x": int32} + cls = jitclass(AnnotatedClass, spec) + + obj = cls(5) + self.assertEqual(obj.x, 5) + self.assertEqual(obj.add(2), 7) + + def test_docstring(self): + + @jitclass + class Apple(object): + "Class docstring" + + def __init__(self): + "init docstring" + + def foo(self): + "foo method docstring" + + @property + def aval(self): + "aval property docstring" + + self.assertEqual(Apple.__doc__, "Class docstring") + self.assertEqual(Apple.__init__.__doc__, "init docstring") + self.assertEqual(Apple.foo.__doc__, "foo method docstring") + self.assertEqual(Apple.aval.__doc__, "aval property docstring") + + def test_kwargs(self): + spec = [("a", int32), + ("b", float64)] + + @jitclass(spec) + class TestClass(object): + def __init__(self, x, y, z): + self.a = x * y + self.b = z + + x = 2 + y = 2 + z = 1.1 + kwargs = {"y": y, "z": z} + tc = TestClass(x=2, **kwargs) + self.assertEqual(tc.a, x * y) + self.assertEqual(tc.b, z) + + def test_default_args(self): + spec = [("x", int32), + ("y", int32), + ("z", int32)] + + @jitclass(spec) + class TestClass(object): + def __init__(self, x, y, z=1): + self.x = x + self.y = y + self.z = z + + tc = TestClass(1, 2, 3) + self.assertEqual(tc.x, 1) + self.assertEqual(tc.y, 2) + self.assertEqual(tc.z, 3) + + tc = TestClass(1, 2) + self.assertEqual(tc.x, 1) + self.assertEqual(tc.y, 2) + self.assertEqual(tc.z, 1) + + tc = TestClass(y=2, z=5, x=1) + self.assertEqual(tc.x, 1) + self.assertEqual(tc.y, 2) + self.assertEqual(tc.z, 5) + + def test_default_args_keyonly(self): + spec = [("x", int32), + ("y", int32), + ("z", int32), + ("a", int32)] + + TestClass = jitclass(TestClass1, spec) + + tc = TestClass(2, 3) + self.assertEqual(tc.x, 2) + self.assertEqual(tc.y, 3) + self.assertEqual(tc.z, 1) + self.assertEqual(tc.a, 5) + + tc = TestClass(y=4, x=2, a=42, z=100) + self.assertEqual(tc.x, 2) + self.assertEqual(tc.y, 4) + self.assertEqual(tc.z, 100) + self.assertEqual(tc.a, 42) + + tc = TestClass(y=4, x=2, a=42) + self.assertEqual(tc.x, 2) + self.assertEqual(tc.y, 4) + self.assertEqual(tc.z, 1) + self.assertEqual(tc.a, 42) + + tc = TestClass(y=4, x=2) + self.assertEqual(tc.x, 2) + self.assertEqual(tc.y, 4) + self.assertEqual(tc.z, 1) + self.assertEqual(tc.a, 5) + + def test_default_args_starargs_and_keyonly(self): + spec = [("x", int32), + ("y", int32), + ("z", int32), + ("args", types.UniTuple(int32, 2)), + ("a", int32)] + + with self.assertRaises(errors.UnsupportedError) as raises: + jitclass(TestClass2, spec) + + msg = "VAR_POSITIONAL argument type unsupported" + self.assertIn(msg, str(raises.exception)) + + def test_generator_method(self): + spec = [] + + @jitclass(spec) + class TestClass(object): + def __init__(self): + pass + + def gen(self, niter): + for i in range(niter): + yield np.arange(i) + + def expected_gen(niter): + for i in range(niter): + yield np.arange(i) + + for niter in range(10): + for expect, got in zip(expected_gen(niter), TestClass().gen(niter)): + self.assertPreciseEqual(expect, got) + + def test_getitem(self): + spec = [("data", int32[:])] + + @jitclass(spec) + class TestClass(object): + def __init__(self): + self.data = np.zeros(10, dtype=np.int32) + + def __setitem__(self, key, data): + self.data[key] = data + + def __getitem__(self, key): + return self.data[key] + + @njit + def create_and_set_indices(): + t = TestClass() + t[1] = 1 + t[2] = 2 + t[3] = 3 + return t + + @njit + def get_index(t, n): + return t[n] + + t = create_and_set_indices() + self.assertEqual(get_index(t, 1), 1) + self.assertEqual(get_index(t, 2), 2) + self.assertEqual(get_index(t, 3), 3) + + def test_getitem_unbox(self): + spec = [("data", int32[:])] + + @jitclass(spec) + class TestClass(object): + def __init__(self): + self.data = np.zeros(10, dtype=np.int32) + + def __setitem__(self, key, data): + self.data[key] = data + + def __getitem__(self, key): + return self.data[key] + + t = TestClass() + t[1] = 10 + + @njit + def set2return1(t): + t[2] = 20 + return t[1] + + t_1 = set2return1(t) + self.assertEqual(t_1, 10) + self.assertEqual(t[2], 20) + + def test_getitem_complex_key(self): + spec = [("data", int32[:, :])] + + @jitclass(spec) + class TestClass(object): + def __init__(self): + self.data = np.zeros((10, 10), dtype=np.int32) + + def __setitem__(self, key, data): + self.data[int(key.real), int(key.imag)] = data + + def __getitem__(self, key): + return self.data[int(key.real), int(key.imag)] + + t = TestClass() + + t[complex(1, 1)] = 3 + + @njit + def get_key(t, real, imag): + return t[complex(real, imag)] + + @njit + def set_key(t, real, imag, data): + t[complex(real, imag)] = data + + self.assertEqual(get_key(t, 1, 1), 3) + set_key(t, 2, 2, 4) + self.assertEqual(t[complex(2, 2)], 4) + + def test_getitem_tuple_key(self): + spec = [("data", int32[:, :])] + + @jitclass(spec) + class TestClass(object): + def __init__(self): + self.data = np.zeros((10, 10), dtype=np.int32) + + def __setitem__(self, key, data): + self.data[key[0], key[1]] = data + + def __getitem__(self, key): + return self.data[key[0], key[1]] + + t = TestClass() + t[1, 1] = 11 + + @njit + def get11(t): + return t[1, 1] + + @njit + def set22(t, data): + t[2, 2] = data + + self.assertEqual(get11(t), 11) + set22(t, 22) + self.assertEqual(t[2, 2], 22) + + def test_getitem_slice_key(self): + spec = [("data", int32[:])] + + @jitclass(spec) + class TestClass(object): + def __init__(self): + self.data = np.zeros(10, dtype=np.int32) + + def __setitem__(self, slc, data): + self.data[slc.start] = data + self.data[slc.stop] = data + slc.step + + def __getitem__(self, slc): + return self.data[slc.start] + + t = TestClass() + # set t.data[1] = 1 and t.data[5] = 2 + t[1:5:1] = 1 + + self.assertEqual(t[1:1:1], 1) + self.assertEqual(t[5:5:5], 2) + + @njit + def get5(t): + return t[5:6:1] + + self.assertEqual(get5(t), 2) + + # sets t.data[2] = data, and t.data[6] = data + 1 + @njit + def set26(t, data): + t[2:6:1] = data + + set26(t, 2) + self.assertEqual(t[2:2:1], 2) + self.assertEqual(t[6:6:1], 3) + + def test_jitclass_longlabel_not_truncated(self): + # See issue #3872, llvm 7 introduced a max label length of 1024 chars + # Numba ships patched llvm 7.1 (ppc64le) and patched llvm 8 to undo this + # change, this test is here to make sure long labels are ok: + alphabet = [chr(ord("a") + x) for x in range(26)] + + spec = [(letter * 10, float64) for letter in alphabet] + spec.extend([(letter.upper() * 10, float64) for letter in alphabet]) + + @jitclass(spec) + class TruncatedLabel(object): + def __init__(self,): + self.aaaaaaaaaa = 10. + + def meth1(self): + self.bbbbbbbbbb = random.gauss(self.aaaaaaaaaa, self.aaaaaaaaaa) + + def meth2(self): + self.meth1() + + # unpatched LLVMs will raise here... + TruncatedLabel().meth2() + + def test_pickling(self): + @jitclass + class PickleTestSubject(object): + def __init__(self): + pass + + inst = PickleTestSubject() + ty = typeof(inst) + self.assertIsInstance(ty, types.ClassInstanceType) + pickled = pickle.dumps(ty) + self.assertIs(pickle.loads(pickled), ty) + + def test_static_methods(self): + @jitclass([("x", int32)]) + class Test1: + def __init__(self, x): + self.x = x + + def increase(self, y): + self.x = self.add(self.x, y) + return self.x + + @staticmethod + def add(a, b): + return a + b + + @staticmethod + def sub(a, b): + return a - b + + @jitclass([("x", int32)]) + class Test2: + def __init__(self, x): + self.x = x + + def increase(self, y): + self.x = self.add(self.x, y) + return self.x + + @staticmethod + def add(a, b): + return a - b + + self.assertIsInstance(Test1.add, Dispatcher) + self.assertIsInstance(Test1.sub, Dispatcher) + self.assertIsInstance(Test2.add, Dispatcher) + self.assertNotEqual(Test1.add, Test2.add) + + self.assertEqual(3, Test1.add(1, 2)) + self.assertEqual(-1, Test2.add(1, 2)) + self.assertEqual(4, Test1.sub(6, 2)) + + t1 = Test1(0) + t2 = Test2(0) + self.assertEqual(1, t1.increase(1)) + self.assertEqual(-1, t2.increase(1)) + self.assertEqual(2, t1.add(1, 1)) + self.assertEqual(0, t1.sub(1, 1)) + self.assertEqual(0, t2.add(1, 1)) + self.assertEqual(2j, t1.add(1j, 1j)) + self.assertEqual(1j, t1.sub(2j, 1j)) + self.assertEqual("foobar", t1.add("foo", "bar")) + + with self.assertRaises(AttributeError) as raises: + Test2.sub(3, 1) + self.assertIn("has no attribute 'sub'", + str(raises.exception)) + + with self.assertRaises(TypeError) as raises: + Test1.add(3) + self.assertIn("not enough arguments: expected 2, got 1", + str(raises.exception)) + + # Check error message for calling a static method as a class attr from + # another method (currently unsupported). + + @jitclass([]) + class Test3: + def __init__(self): + pass + + @staticmethod + def a_static_method(a, b): + pass + + def call_static(self): + return Test3.a_static_method(1, 2) + + invalid = Test3() + with self.assertRaises(errors.TypingError) as raises: + invalid.call_static() + + self.assertIn("Unknown attribute 'a_static_method'", + str(raises.exception)) + + def test_jitclass_decorator_usecases(self): + spec = OrderedDict(x=float64) + + @jitclass() + class Test1: + x: float + + def __init__(self): + self.x = 0 + + self.assertIsInstance(Test1, JitClassType) + self.assertDictEqual(Test1.class_type.struct, spec) + + @jitclass(spec=spec) + class Test2: + + def __init__(self): + self.x = 0 + + self.assertIsInstance(Test2, JitClassType) + self.assertDictEqual(Test2.class_type.struct, spec) + + @jitclass + class Test3: + x: float + + def __init__(self): + self.x = 0 + + self.assertIsInstance(Test3, JitClassType) + self.assertDictEqual(Test3.class_type.struct, spec) + + @jitclass(spec) + class Test4: + + def __init__(self): + self.x = 0 + + self.assertIsInstance(Test4, JitClassType) + self.assertDictEqual(Test4.class_type.struct, spec) + + def test_jitclass_function_usecases(self): + spec = OrderedDict(x=float64) + + class AnnotatedTest: + x: float + + def __init__(self): + self.x = 0 + + JitTest1 = jitclass(AnnotatedTest) + self.assertIsInstance(JitTest1, JitClassType) + self.assertDictEqual(JitTest1.class_type.struct, spec) + + class UnannotatedTest: + + def __init__(self): + self.x = 0 + + JitTest2 = jitclass(UnannotatedTest, spec) + self.assertIsInstance(JitTest2, JitClassType) + self.assertDictEqual(JitTest2.class_type.struct, spec) + + def test_jitclass_isinstance(self): + spec = OrderedDict(value=int32) + + @jitclass(spec) + class Foo(object): + def __init__(self, value): + self.value = value + + def getValue(self): + return self.value + + def getValueIncr(self): + return self.value + 1 + + @jitclass(spec) + class Bar(object): + def __init__(self, value): + self.value = value + + def getValue(self): + return self.value + + def test_jitclass_isinstance(obj): + if isinstance(obj, (Foo, Bar)): + # call something that both classes implements + x = obj.getValue() + if isinstance(obj, Foo): # something that only Foo implements + return obj.getValueIncr() + x, 'Foo' + else: + return obj.getValue() + x, 'Bar' + else: + return 'no match' + + pyfunc = test_jitclass_isinstance + cfunc = njit(test_jitclass_isinstance) + + self.assertIsInstance(Foo, JitClassType) + self.assertEqual(pyfunc(Foo(3)), cfunc(Foo(3))) + self.assertEqual(pyfunc(Bar(123)), cfunc(Bar(123))) + self.assertEqual(pyfunc(0), cfunc(0)) + + def test_jitclass_unsupported_dunder(self): + with self.assertRaises(TypeError) as e: + @jitclass + class Foo(object): + def __init__(self): + return + + def __enter__(self): + return None + Foo() + self.assertIn("Method '__enter__' is not supported.", str(e.exception)) + + def test_modulename(self): + @jitclass + class TestModname(object): + def __init__(self): + self.x = 12 + + thisModule = __name__ + classModule = TestModname.__module__ + self.assertEqual(thisModule, classModule) + + +class TestJitClassOverloads(MemoryLeakMixin, TestCase): + + class PyList: + def __init__(self): + self.x = [0] + + def append(self, y): + self.x.append(y) + + def clear(self): + self.x.clear() + + def __abs__(self): + return len(self.x) * 7 + + def __bool__(self): + return len(self.x) % 3 != 0 + + def __complex__(self): + c = complex(2) + if self.x: + c += self.x[0] + return c + + def __contains__(self, y): + return y in self.x + + def __float__(self): + f = 3.1415 + if self.x: + f += self.x[0] + return f + + def __int__(self): + i = 5 + if self.x: + i += self.x[0] + return i + + def __len__(self): + return len(self.x) + 1 + + def __str__(self): + if len(self.x) == 0: + return "PyList empty" + else: + return "PyList non-empty" + + @staticmethod + def get_int_wrapper(): + @jitclass([("x", types.intp)]) + class IntWrapper: + def __init__(self, value): + self.x = value + + def __eq__(self, other): + return self.x == other.x + + def __hash__(self): + return self.x + + def __lshift__(self, other): + return IntWrapper(self.x << other.x) + + def __rshift__(self, other): + return IntWrapper(self.x >> other.x) + + def __and__(self, other): + return IntWrapper(self.x & other.x) + + def __or__(self, other): + return IntWrapper(self.x | other.x) + + def __xor__(self, other): + return IntWrapper(self.x ^ other.x) + + return IntWrapper + + @staticmethod + def get_float_wrapper(): + @jitclass([("x", types.float64)]) + class FloatWrapper: + + def __init__(self, value): + self.x = value + + def __eq__(self, other): + return self.x == other.x + + def __hash__(self): + return self.x + + def __ge__(self, other): + return self.x >= other.x + + def __gt__(self, other): + return self.x > other.x + + def __le__(self, other): + return self.x <= other.x + + def __lt__(self, other): + return self.x < other.x + + def __add__(self, other): + return FloatWrapper(self.x + other.x) + + def __floordiv__(self, other): + return FloatWrapper(self.x // other.x) + + def __mod__(self, other): + return FloatWrapper(self.x % other.x) + + def __mul__(self, other): + return FloatWrapper(self.x * other.x) + + def __neg__(self, other): + return FloatWrapper(-self.x) + + def __pos__(self, other): + return FloatWrapper(+self.x) + + def __pow__(self, other): + return FloatWrapper(self.x ** other.x) + + def __sub__(self, other): + return FloatWrapper(self.x - other.x) + + def __truediv__(self, other): + return FloatWrapper(self.x / other.x) + + return FloatWrapper + + def assertSame(self, first, second, msg=None): + self.assertEqual(type(first), type(second), msg=msg) + self.assertEqual(first, second, msg=msg) + + def test_overloads(self): + # Check that the dunder methods are exposed on ClassInstanceType. + + JitList = jitclass({"x": types.List(types.intp)})(self.PyList) + + py_funcs = [ + lambda x: abs(x), + lambda x: x.__abs__(), + lambda x: bool(x), + lambda x: x.__bool__(), + lambda x: complex(x), + lambda x: x.__complex__(), + lambda x: 0 in x, # contains + lambda x: x.__contains__(0), + lambda x: float(x), + lambda x: x.__float__(), + lambda x: int(x), + lambda x: x.__int__(), + lambda x: len(x), + lambda x: x.__len__(), + lambda x: str(x), + lambda x: x.__str__(), + lambda x: 1 if x else 0, # truth + ] + jit_funcs = [njit(f) for f in py_funcs] + + py_list = self.PyList() + jit_list = JitList() + for py_f, jit_f in zip(py_funcs, jit_funcs): + self.assertSame(py_f(py_list), py_f(jit_list)) + self.assertSame(py_f(py_list), jit_f(jit_list)) + + py_list.append(2) + jit_list.append(2) + for py_f, jit_f in zip(py_funcs, jit_funcs): + self.assertSame(py_f(py_list), py_f(jit_list)) + self.assertSame(py_f(py_list), jit_f(jit_list)) + + py_list.append(-5) + jit_list.append(-5) + for py_f, jit_f in zip(py_funcs, jit_funcs): + self.assertSame(py_f(py_list), py_f(jit_list)) + self.assertSame(py_f(py_list), jit_f(jit_list)) + + py_list.clear() + jit_list.clear() + for py_f, jit_f in zip(py_funcs, jit_funcs): + self.assertSame(py_f(py_list), py_f(jit_list)) + self.assertSame(py_f(py_list), jit_f(jit_list)) + + def test_bool_fallback(self): + + def py_b(x): + return bool(x) + + jit_b = njit(py_b) + + @jitclass([("x", types.List(types.intp))]) + class LenClass: + def __init__(self, x): + self.x = x + + def __len__(self): + return len(self.x) % 4 + + def append(self, y): + self.x.append(y) + + def pop(self): + self.x.pop(0) + + obj = LenClass([1, 2, 3]) + self.assertTrue(py_b(obj)) + self.assertTrue(jit_b(obj)) + + obj.append(4) + self.assertFalse(py_b(obj)) + self.assertFalse(jit_b(obj)) + + obj.pop() + self.assertTrue(py_b(obj)) + self.assertTrue(jit_b(obj)) + + @jitclass([("y", types.float64)]) + class NormalClass: + def __init__(self, y): + self.y = y + + obj = NormalClass(0) + self.assertTrue(py_b(obj)) + self.assertTrue(jit_b(obj)) + + def test_numeric_fallback(self): + def py_c(x): + return complex(x) + + def py_f(x): + return float(x) + + def py_i(x): + return int(x) + + jit_c = njit(py_c) + jit_f = njit(py_f) + jit_i = njit(py_i) + + @jitclass([]) + class FloatClass: + def __init__(self): + pass + + def __float__(self): + return 3.1415 + + obj = FloatClass() + self.assertSame(py_c(obj), complex(3.1415)) + self.assertSame(jit_c(obj), complex(3.1415)) + self.assertSame(py_f(obj), 3.1415) + self.assertSame(jit_f(obj), 3.1415) + + with self.assertRaises(TypeError) as e: + py_i(obj) + self.assertIn("int", str(e.exception)) + with self.assertRaises(TypingError) as e: + jit_i(obj) + self.assertIn("int", str(e.exception)) + + @jitclass([]) + class IntClass: + def __init__(self): + pass + + def __int__(self): + return 7 + + obj = IntClass() + self.assertSame(py_i(obj), 7) + self.assertSame(jit_i(obj), 7) + + with self.assertRaises(TypeError) as e: + py_c(obj) + self.assertIn("complex", str(e.exception)) + with self.assertRaises(TypingError) as e: + jit_c(obj) + self.assertIn("complex", str(e.exception)) + with self.assertRaises(TypeError) as e: + py_f(obj) + self.assertIn("float", str(e.exception)) + with self.assertRaises(TypingError) as e: + jit_f(obj) + self.assertIn("float", str(e.exception)) + + @jitclass([]) + class IndexClass: + def __init__(self): + pass + + def __index__(self): + return 1 + + obj = IndexClass() + + self.assertSame(py_c(obj), complex(1)) + self.assertSame(jit_c(obj), complex(1)) + self.assertSame(py_f(obj), 1.) + self.assertSame(jit_f(obj), 1.) + self.assertSame(py_i(obj), 1) + self.assertSame(jit_i(obj), 1) + + @jitclass([]) + class FloatIntIndexClass: + def __init__(self): + pass + + def __float__(self): + return 3.1415 + + def __int__(self): + return 7 + + def __index__(self): + return 1 + + obj = FloatIntIndexClass() + self.assertSame(py_c(obj), complex(3.1415)) + self.assertSame(jit_c(obj), complex(3.1415)) + self.assertSame(py_f(obj), 3.1415) + self.assertSame(jit_f(obj), 3.1415) + self.assertSame(py_i(obj), 7) + self.assertSame(jit_i(obj), 7) + + def test_arithmetic_logical(self): + IntWrapper = self.get_int_wrapper() + FloatWrapper = self.get_float_wrapper() + + float_py_funcs = [ + lambda x, y: x == y, + lambda x, y: x != y, + lambda x, y: x >= y, + lambda x, y: x > y, + lambda x, y: x <= y, + lambda x, y: x < y, + lambda x, y: x + y, + lambda x, y: x // y, + lambda x, y: x % y, + lambda x, y: x * y, + lambda x, y: x ** y, + lambda x, y: x - y, + lambda x, y: x / y, + ] + int_py_funcs = [ + lambda x, y: x == y, + lambda x, y: x != y, + lambda x, y: x << y, + lambda x, y: x >> y, + lambda x, y: x & y, + lambda x, y: x | y, + lambda x, y: x ^ y, + ] + + test_values = [ + (0.0, 2.0), + (1.234, 3.1415), + (13.1, 1.01), + ] + + def unwrap(value): + return getattr(value, "x", value) + + for jit_f, (x, y) in itertools.product( + map(njit, float_py_funcs), test_values): + + py_f = jit_f.py_func + + expected = py_f(x, y) + jit_x = FloatWrapper(x) + jit_y = FloatWrapper(y) + + check = ( + self.assertEqual + if type(expected) is not float + else self.assertAlmostEqual + ) + check(expected, jit_f(x, y)) + check(expected, unwrap(py_f(jit_x, jit_y))) + check(expected, unwrap(jit_f(jit_x, jit_y))) + + for jit_f, (x, y) in itertools.product( + map(njit, int_py_funcs), test_values): + + py_f = jit_f.py_func + x, y = int(x), int(y) + + expected = py_f(x, y) + jit_x = IntWrapper(x) + jit_y = IntWrapper(y) + + self.assertEqual(expected, jit_f(x, y)) + self.assertEqual(expected, unwrap(py_f(jit_x, jit_y))) + self.assertEqual(expected, unwrap(jit_f(jit_x, jit_y))) + + def test_arithmetic_logical_inplace(self): + + # If __i*__ methods are not defined, should fall back to normal methods. + JitIntWrapper = self.get_int_wrapper() + JitFloatWrapper = self.get_float_wrapper() + + PyIntWrapper = JitIntWrapper.mro()[1] + PyFloatWrapper = JitFloatWrapper.mro()[1] + + @jitclass([("x", types.intp)]) + class JitIntUpdateWrapper(PyIntWrapper): + def __init__(self, value): + self.x = value + + def __ilshift__(self, other): + return JitIntUpdateWrapper(self.x << other.x) + + def __irshift__(self, other): + return JitIntUpdateWrapper(self.x >> other.x) + + def __iand__(self, other): + return JitIntUpdateWrapper(self.x & other.x) + + def __ior__(self, other): + return JitIntUpdateWrapper(self.x | other.x) + + def __ixor__(self, other): + return JitIntUpdateWrapper(self.x ^ other.x) + + @jitclass({"x": types.float64}) + class JitFloatUpdateWrapper(PyFloatWrapper): + + def __init__(self, value): + self.x = value + + def __iadd__(self, other): + return JitFloatUpdateWrapper(self.x + 2.718 * other.x) + + def __ifloordiv__(self, other): + return JitFloatUpdateWrapper(self.x * 2.718 // other.x) + + def __imod__(self, other): + return JitFloatUpdateWrapper(self.x % (other.x + 1)) + + def __imul__(self, other): + return JitFloatUpdateWrapper(self.x * other.x + 1) + + def __ipow__(self, other): + return JitFloatUpdateWrapper(self.x ** other.x + 1) + + def __isub__(self, other): + return JitFloatUpdateWrapper(self.x - 3.1415 * other.x) + + def __itruediv__(self, other): + return JitFloatUpdateWrapper((self.x + 1) / other.x) + + PyIntUpdateWrapper = JitIntUpdateWrapper.mro()[1] + PyFloatUpdateWrapper = JitFloatUpdateWrapper.mro()[1] + + def get_update_func(op): + template = f""" +def f(x, y): + x {op}= y + return x +""" + namespace = {} + exec(template, namespace) + return namespace["f"] + + float_py_funcs = [get_update_func(op) for op in [ + "+", "//", "%", "*", "**", "-", "/", + ]] + int_py_funcs = [get_update_func(op) for op in [ + "<<", ">>", "&", "|", "^", + ]] + + test_values = [ + (0.0, 2.0), + (1.234, 3.1415), + (13.1, 1.01), + ] + + for jit_f, (py_cls, jit_cls), (x, y) in itertools.product( + map(njit, float_py_funcs), + [ + (PyFloatWrapper, JitFloatWrapper), + (PyFloatUpdateWrapper, JitFloatUpdateWrapper) + ], + test_values): + py_f = jit_f.py_func + + expected = py_f(py_cls(x), py_cls(y)).x + self.assertAlmostEqual(expected, py_f(jit_cls(x), jit_cls(y)).x) + self.assertAlmostEqual(expected, jit_f(jit_cls(x), jit_cls(y)).x) + + for jit_f, (py_cls, jit_cls), (x, y) in itertools.product( + map(njit, int_py_funcs), + [ + (PyIntWrapper, JitIntWrapper), + (PyIntUpdateWrapper, JitIntUpdateWrapper) + ], + test_values): + x, y = int(x), int(y) + py_f = jit_f.py_func + + expected = py_f(py_cls(x), py_cls(y)).x + self.assertEqual(expected, py_f(jit_cls(x), jit_cls(y)).x) + self.assertEqual(expected, jit_f(jit_cls(x), jit_cls(y)).x) + + def test_hash_eq_ne(self): + + class HashEqTest: + x: int + + def __init__(self, x): + self.x = x + + def __hash__(self): + return self.x % 10 + + def __eq__(self, o): + return (self.x - o.x) % 20 == 0 + + class HashEqNeTest(HashEqTest): + def __ne__(self, o): + return (self.x - o.x) % 20 > 1 + + def py_hash(x): + return hash(x) + + def py_eq(x, y): + return x == y + + def py_ne(x, y): + return x != y + + def identity_decorator(f): + return f + + comparisons = [ + (0, 1), # Will give different ne results. + (2, 22), + (7, 10), + (3, 3), + ] + + for base_cls, use_jit in itertools.product( + [HashEqTest, HashEqNeTest], [False, True] + ): + decorator = njit if use_jit else identity_decorator + hash_func = decorator(py_hash) + eq_func = decorator(py_eq) + ne_func = decorator(py_ne) + + jit_cls = jitclass(base_cls) + + for v in [0, 2, 10, 24, -8]: + self.assertEqual(hash_func(jit_cls(v)), v % 10) + + for x, y in comparisons: + self.assertEqual( + eq_func(jit_cls(x), jit_cls(y)), + base_cls(x) == base_cls(y), + ) + self.assertEqual( + ne_func(jit_cls(x), jit_cls(y)), + base_cls(x) != base_cls(y), + ) + + def test_bool_fallback_len(self): + # Check that the fallback to using len(obj) to determine truth of an + # object is implemented correctly as per + # https://docs.python.org/3/library/stdtypes.html#truth-value-testing + # + # Relevant points: + # + # "By default, an object is considered true unless its class defines + # either a __bool__() method that returns False or a __len__() method + # that returns zero, when called with the object." + # + # and: + # + # "Operations and built-in functions that have a Boolean result always + # return 0 or False for false and 1 or True for true, unless otherwise + # stated." + + class NoBoolHasLen: + def __init__(self, val): + self.val = val + + def __len__(self): + return self.val + + def get_bool(self): + return bool(self) + + py_class = NoBoolHasLen + jitted_class = jitclass([('val', types.int64)])(py_class) + + py_class_0_bool = py_class(0).get_bool() + py_class_2_bool = py_class(2).get_bool() + jitted_class_0_bool = jitted_class(0).get_bool() + jitted_class_2_bool = jitted_class(2).get_bool() + + # Truth values from bool(obj) should be equal + self.assertEqual(py_class_0_bool, jitted_class_0_bool) + self.assertEqual(py_class_2_bool, jitted_class_2_bool) + + # Truth values from bool(obj) should be the same type + self.assertEqual(type(py_class_0_bool), type(jitted_class_0_bool)) + self.assertEqual(type(py_class_2_bool), type(jitted_class_2_bool)) + + def test_bool_fallback_default(self): + # Similar to test_bool_fallback, but checks the case where there is no + # __bool__() or __len__() defined, so the object should always be True. + + class NoBoolNoLen: + def __init__(self): + pass + + def get_bool(self): + return bool(self) + + py_class = NoBoolNoLen + jitted_class = jitclass([])(py_class) + + py_class_bool = py_class().get_bool() + jitted_class_bool = jitted_class().get_bool() + + # Truth values from bool(obj) should be equal + self.assertEqual(py_class_bool, jitted_class_bool) + + # Truth values from bool(obj) should be the same type + self.assertEqual(type(py_class_bool), type(jitted_class_bool)) + + def test_operator_reflection(self): + class OperatorsDefined: + def __init__(self, x): + self.x = x + + def __eq__(self, other): + return self.x == other.x + + def __le__(self, other): + return self.x <= other.x + + def __lt__(self, other): + return self.x < other.x + + def __ge__(self, other): + return self.x >= other.x + + def __gt__(self, other): + return self.x > other.x + + class NoOperatorsDefined: + def __init__(self, x): + self.x = x + + spec = [('x', types.int32)] + JitOperatorsDefined = jitclass(spec)(OperatorsDefined) + JitNoOperatorsDefined = jitclass(spec)(NoOperatorsDefined) + + py_ops_defined = OperatorsDefined(2) + py_ops_not_defined = NoOperatorsDefined(3) + + jit_ops_defined = JitOperatorsDefined(2) + jit_ops_not_defined = JitNoOperatorsDefined(3) + + self.assertEqual(py_ops_not_defined == py_ops_defined, + jit_ops_not_defined == jit_ops_defined) + + self.assertEqual(py_ops_not_defined <= py_ops_defined, + jit_ops_not_defined <= jit_ops_defined) + + self.assertEqual(py_ops_not_defined < py_ops_defined, + jit_ops_not_defined < jit_ops_defined) + + self.assertEqual(py_ops_not_defined >= py_ops_defined, + jit_ops_not_defined >= jit_ops_defined) + + self.assertEqual(py_ops_not_defined > py_ops_defined, + jit_ops_not_defined > jit_ops_defined) + + @skip_unless_scipy + def test_matmul_operator(self): + class ArrayAt: + def __init__(self, array): + self.arr = array + + def __matmul__(self, other): + return self.arr @ other.arr + + def __rmatmul__(self, other): + return other.arr @ self.arr + + def __imatmul__(self, other): + self.arr = self.arr @ other.arr + return self + + class ArrayNoAt: + def __init__(self, array): + self.arr = array + + n = 3 + np.random.seed(1) + vec = np.random.random(size=(n,)) + mat = np.random.random(size=(n, n)) + + vector_noat = ArrayNoAt(vec) + vector_at = ArrayAt(vec) + jit_vector_noat = jitclass(ArrayNoAt, spec={"arr": float64[::1]})(vec) + jit_vector_at = jitclass(ArrayAt, spec={"arr": float64[::1]})(vec) + + matrix_noat = ArrayNoAt(mat) + matrix_at = ArrayAt(mat) + jit_matrix_noat = jitclass(ArrayNoAt, spec={"arr": float64[:,::1]})(mat) + jit_matrix_at = jitclass(ArrayAt, spec={"arr": float64[:,::1]})(mat) + + # __matmul__ + np.testing.assert_allclose(vector_at @ vector_noat, + jit_vector_at @ jit_vector_noat) + np.testing.assert_allclose(vector_at @ matrix_noat, + jit_vector_at @ jit_matrix_noat) + np.testing.assert_allclose(matrix_at @ vector_noat, + jit_matrix_at @ jit_vector_noat) + np.testing.assert_allclose(matrix_at @ matrix_noat, + jit_matrix_at @ jit_matrix_noat) + + # __rmatmul__ + np.testing.assert_allclose(vector_noat @ vector_at, + jit_vector_noat @ jit_vector_at) + np.testing.assert_allclose(vector_noat @ matrix_at, + jit_vector_noat @ jit_matrix_at) + np.testing.assert_allclose(matrix_noat @ vector_at, + jit_matrix_noat @ jit_vector_at) + np.testing.assert_allclose(matrix_noat @ matrix_at, + jit_matrix_noat @ jit_matrix_at) + + # __imatmul__ + vector_at @= matrix_noat + matrix_at @= matrix_noat + jit_vector_at @= jit_matrix_noat + jit_matrix_at @= jit_matrix_noat + + np.testing.assert_allclose(vector_at.arr, jit_vector_at.arr) + np.testing.assert_allclose(matrix_at.arr, jit_matrix_at.arr) + + def test_arithmetic_logical_reflection(self): + class OperatorsDefined: + def __init__(self, x): + self.x = x + + def __radd__(self, other): + return other.x + self.x + + def __rsub__(self, other): + return other.x - self.x + + def __rmul__(self, other): + return other.x * self.x + + def __rtruediv__(self, other): + return other.x / self.x + + def __rfloordiv__(self, other): + return other.x // self.x + + def __rmod__(self, other): + return other.x % self.x + + def __rpow__(self, other): + return other.x ** self.x + + def __rlshift__(self, other): + return other.x << self.x + + def __rrshift__(self, other): + return other.x >> self.x + + def __rand__(self, other): + return other.x & self.x + + def __rxor__(self, other): + return other.x ^ self.x + + def __ror__(self, other): + return other.x | self.x + + class NoOperatorsDefined: + def __init__(self, x): + self.x = x + + float_op = ["+", "-", "*", "**", "/", "//", "%"] + int_op = [*float_op, "<<", ">>" , "&", "^", "|"] + + for test_type, test_op, test_value in [ + (int32, int_op, (2, 4)), + (float64, float_op, (2., 4.)), + (float64[::1], float_op, + (np.array([1., 2., 4.]), np.array([20., -24., 1.]))) + ]: + spec = {"x": test_type} + JitOperatorsDefined = jitclass(OperatorsDefined, spec) + JitNoOperatorsDefined = jitclass(NoOperatorsDefined, spec) + + py_ops_defined = OperatorsDefined(test_value[0]) # noqa: F841 + py_ops_not_defined = NoOperatorsDefined(test_value[1]) # noqa: F841 + + jit_ops_defined = JitOperatorsDefined(test_value[0]) # noqa: F841 + jit_ops_not_defined = JitNoOperatorsDefined(test_value[1]) # noqa: F841 E501 + + for op in test_op: + if not ("array" in str(test_type)): + self.assertEqual( + eval(f"py_ops_not_defined {op} py_ops_defined"), + eval(f"jit_ops_not_defined {op} jit_ops_defined") + ) + else: + self.assertTupleEqual( + tuple(eval(f"py_ops_not_defined {op} py_ops_defined")), + tuple(eval(f"jit_ops_not_defined {op} jit_ops_defined")) + ) + + def test_implicit_hash_compiles(self): + # Ensure that classes with __hash__ implicitly defined as None due to + # the presence of __eq__ are correctly handled by ignoring the __hash__ + # class member. + class ImplicitHash: + def __init__(self): + pass + + def __eq__(self, other): + return False + + jitted = jitclass([])(ImplicitHash) + instance = jitted() + + self.assertFalse(instance == instance) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_jitmethod.py b/venv/lib/python3.10/site-packages/numba/tests/test_jitmethod.py new file mode 100644 index 0000000000000000000000000000000000000000..437c45b6bbdf00b490b49b569e8ebbe05ce63ad3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_jitmethod.py @@ -0,0 +1,70 @@ +import unittest + +import numpy as np + +from numba import jit +from numba.tests.support import override_config + + +class TestJITMethod(unittest.TestCase): + def test_bound_jit_method_with_loop_lift(self): + class Something(object): + def __init__(self, x0): + self.x0 = x0 + + @jit(forceobj=True) + def method(self, x): + a = np.empty(shape=5, dtype=np.float32) + x0 = self.x0 + + for i in range(a.shape[0]): + a[i] = x0 * x + + return a + + something = Something(3) + np.testing.assert_array_equal(something.method(5), + np.array([15, 15, 15, 15, 15], dtype=np.float32)) + + # Check that loop lifting in nopython mode was successful + [cres] = something.method.overloads.values() + jitloop = cres.lifted[0] + [loopcres] = jitloop.overloads.values() + self.assertTrue(loopcres.fndesc.native) + + def test_unbound_jit_method(self): + class Something(object): + def __init__(self, x0): + self.x0 = x0 + + @jit(forceobj=True) + def method(self): + return self.x0 + + something = Something(3) + self.assertEqual(Something.method(something), 3) + + +class TestDisabledJIT(unittest.TestCase): + def test_decorated_function(self): + with override_config('DISABLE_JIT', True): + def method(x): + return x + jitted = jit(method) + + self.assertEqual(jitted, method) + self.assertEqual(10, method(10)) + self.assertEqual(10, jitted(10)) + + def test_decorated_function_with_kwargs(self): + with override_config('DISABLE_JIT', True): + def method(x): + return x + jitted = jit(nopython=True)(method) + + self.assertEqual(jitted, method) + self.assertEqual(10, method(10)) + self.assertEqual(10, jitted(10)) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_linalg.py b/venv/lib/python3.10/site-packages/numba/tests/test_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb80ae08d1071bb61af49b7068f3a629d179549 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_linalg.py @@ -0,0 +1,2696 @@ +import contextlib +import gc +from itertools import product, cycle +import sys +import warnings +from numbers import Number, Integral +import platform + +import numpy as np + +from numba import jit, njit, typeof +from numba.core import errors +from numba.tests.support import (TestCase, tag, needs_lapack, needs_blas, + _is_armv7l, EnableNRTStatsMixin) +from .matmul_usecase import matmul_usecase +import unittest + + +def dot2(a, b): + return np.dot(a, b) + + +def dot3(a, b, out): + return np.dot(a, b, out=out) + + +def vdot(a, b): + return np.vdot(a, b) + + +class TestProduct(EnableNRTStatsMixin, TestCase): + """ + Tests for dot products. + """ + + dtypes = (np.float64, np.float32, np.complex128, np.complex64) + + def setUp(self): + # Collect leftovers from previous test cases before checking for leaks + gc.collect() + super(TestProduct, self).setUp() + + def sample_vector(self, n, dtype): + # Be careful to generate only exactly representable float values, + # to avoid rounding discrepancies between Numpy and Numba + base = np.arange(n) + if issubclass(dtype, np.complexfloating): + return (base * (1 - 0.5j) + 2j).astype(dtype) + else: + return (base * 0.5 - 1).astype(dtype) + + def sample_matrix(self, m, n, dtype): + return self.sample_vector(m * n, dtype).reshape((m, n)) + + @contextlib.contextmanager + def check_contiguity_warning(self, pyfunc): + """ + Check performance warning(s) for non-contiguity. + """ + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', errors.NumbaPerformanceWarning) + yield + self.assertGreaterEqual(len(w), 1) + self.assertIs(w[0].category, errors.NumbaPerformanceWarning) + self.assertIn("faster on contiguous arrays", str(w[0].message)) + self.assertEqual(w[0].filename, pyfunc.__code__.co_filename) + # This works because our functions are one-liners + self.assertEqual(w[0].lineno, pyfunc.__code__.co_firstlineno + 1) + + def check_func(self, pyfunc, cfunc, args): + with self.assertNoNRTLeak(): + expected = pyfunc(*args) + got = cfunc(*args) + self.assertPreciseEqual(got, expected, ignore_sign_on_zero=True) + del got, expected + + + def _aligned_copy(self, arr): + # This exists for armv7l because NumPy wants aligned arrays for the + # `out` arg of functions, but np.empty/np.copy doesn't seem to always + # produce them, in particular for complex dtypes + size = (arr.size + 1) * arr.itemsize + 1 + datasize = arr.size * arr.itemsize + tmp = np.empty(size, dtype=np.uint8) + for i in range(arr.itemsize + 1): + new = tmp[i : i + datasize].view(dtype=arr.dtype) + if new.flags.aligned: + break + else: + raise Exception("Could not obtain aligned array") + if arr.flags.c_contiguous: + new = np.reshape(new, arr.shape, order='C') + else: + new = np.reshape(new, arr.shape, order='F') + new[:] = arr[:] + assert new.flags.aligned + return new + + def check_func_out(self, pyfunc, cfunc, args, out): + copier = self._aligned_copy if _is_armv7l else np.copy + with self.assertNoNRTLeak(): + expected = copier(out) + got = copier(out) + self.assertIs(pyfunc(*args, out=expected), expected) + self.assertIs(cfunc(*args, out=got), got) + self.assertPreciseEqual(got, expected, ignore_sign_on_zero=True) + del got, expected + + def assert_mismatching_sizes(self, cfunc, args, is_out=False): + with self.assertRaises(ValueError) as raises: + cfunc(*args) + msg = ("incompatible output array size" if is_out else + "incompatible array sizes") + self.assertIn(msg, str(raises.exception)) + + def assert_mismatching_dtypes(self, cfunc, args, func_name="np.dot()"): + with self.assertRaises(errors.TypingError) as raises: + cfunc(*args) + self.assertIn("%s arguments must all have the same dtype" + % (func_name,), + str(raises.exception)) + + def check_dot_vv(self, pyfunc, func_name): + n = 3 + cfunc = jit(nopython=True)(pyfunc) + for dtype in self.dtypes: + a = self.sample_vector(n, dtype) + b = self.sample_vector(n, dtype) + self.check_func(pyfunc, cfunc, (a, b)) + # Non-contiguous + self.check_func(pyfunc, cfunc, (a[::-1], b[::-1])) + + # Mismatching sizes + a = self.sample_vector(n - 1, np.float64) + b = self.sample_vector(n, np.float64) + self.assert_mismatching_sizes(cfunc, (a, b)) + # Mismatching dtypes + a = self.sample_vector(n, np.float32) + b = self.sample_vector(n, np.float64) + self.assert_mismatching_dtypes(cfunc, (a, b), func_name=func_name) + + @needs_blas + def test_dot_vv(self): + """ + Test vector * vector np.dot() + """ + self.check_dot_vv(dot2, "np.dot()") + + @needs_blas + def test_vdot(self): + """ + Test np.vdot() + """ + self.check_dot_vv(vdot, "np.vdot()") + + def check_dot_vm(self, pyfunc2, pyfunc3, func_name): + + def samples(m, n): + for order in 'CF': + a = self.sample_matrix(m, n, np.float64).copy(order=order) + b = self.sample_vector(n, np.float64) + yield a, b + for dtype in self.dtypes: + a = self.sample_matrix(m, n, dtype) + b = self.sample_vector(n, dtype) + yield a, b + # Non-contiguous + yield a[::-1], b[::-1] + + cfunc2 = jit(nopython=True)(pyfunc2) + if pyfunc3 is not None: + cfunc3 = jit(nopython=True)(pyfunc3) + + for m, n in [(2, 3), + (3, 0), + (0, 3) + ]: + for a, b in samples(m, n): + self.check_func(pyfunc2, cfunc2, (a, b)) + self.check_func(pyfunc2, cfunc2, (b, a.T)) + if pyfunc3 is not None: + for a, b in samples(m, n): + out = np.empty(m, dtype=a.dtype) + self.check_func_out(pyfunc3, cfunc3, (a, b), out) + self.check_func_out(pyfunc3, cfunc3, (b, a.T), out) + + # Mismatching sizes + m, n = 2, 3 + a = self.sample_matrix(m, n - 1, np.float64) + b = self.sample_vector(n, np.float64) + self.assert_mismatching_sizes(cfunc2, (a, b)) + self.assert_mismatching_sizes(cfunc2, (b, a.T)) + if pyfunc3 is not None: + out = np.empty(m, np.float64) + self.assert_mismatching_sizes(cfunc3, (a, b, out)) + self.assert_mismatching_sizes(cfunc3, (b, a.T, out)) + a = self.sample_matrix(m, m, np.float64) + b = self.sample_vector(m, np.float64) + out = np.empty(m - 1, np.float64) + self.assert_mismatching_sizes(cfunc3, (a, b, out), is_out=True) + self.assert_mismatching_sizes(cfunc3, (b, a.T, out), is_out=True) + # Mismatching dtypes + a = self.sample_matrix(m, n, np.float32) + b = self.sample_vector(n, np.float64) + self.assert_mismatching_dtypes(cfunc2, (a, b), func_name) + if pyfunc3 is not None: + a = self.sample_matrix(m, n, np.float64) + b = self.sample_vector(n, np.float64) + out = np.empty(m, np.float32) + self.assert_mismatching_dtypes(cfunc3, (a, b, out), func_name) + + @needs_blas + def test_dot_vm(self): + """ + Test vector * matrix and matrix * vector np.dot() + """ + self.check_dot_vm(dot2, dot3, "np.dot()") + + def check_dot_mm(self, pyfunc2, pyfunc3, func_name): + + def samples(m, n, k): + for order_a, order_b in product('CF', 'CF'): + a = self.sample_matrix(m, k, np.float64).copy(order=order_a) + b = self.sample_matrix(k, n, np.float64).copy(order=order_b) + yield a, b + for dtype in self.dtypes: + a = self.sample_matrix(m, k, dtype) + b = self.sample_matrix(k, n, dtype) + yield a, b + # Non-contiguous + yield a[::-1], b[::-1] + + cfunc2 = jit(nopython=True)(pyfunc2) + if pyfunc3 is not None: + cfunc3 = jit(nopython=True)(pyfunc3) + + # Test generic matrix * matrix as well as "degenerate" cases + # where one of the outer dimensions is 1 (i.e. really represents + # a vector, which may select a different implementation), + # one of the matrices is empty, or both matrices are empty. + for m, n, k in [(2, 3, 4), # Generic matrix * matrix + (1, 3, 4), # 2d vector * matrix + (1, 1, 4), # 2d vector * 2d vector + (0, 3, 2), # Empty matrix * matrix, empty output + (3, 0, 2), # Matrix * empty matrix, empty output + (0, 0, 3), # Both arguments empty, empty output + (3, 2, 0), # Both arguments empty, nonempty output + ]: + for a, b in samples(m, n, k): + self.check_func(pyfunc2, cfunc2, (a, b)) + self.check_func(pyfunc2, cfunc2, (b.T, a.T)) + if pyfunc3 is not None: + for a, b in samples(m, n, k): + out = np.empty((m, n), dtype=a.dtype) + self.check_func_out(pyfunc3, cfunc3, (a, b), out) + out = np.empty((n, m), dtype=a.dtype) + self.check_func_out(pyfunc3, cfunc3, (b.T, a.T), out) + + # Mismatching sizes + m, n, k = 2, 3, 4 + a = self.sample_matrix(m, k - 1, np.float64) + b = self.sample_matrix(k, n, np.float64) + self.assert_mismatching_sizes(cfunc2, (a, b)) + if pyfunc3 is not None: + out = np.empty((m, n), np.float64) + self.assert_mismatching_sizes(cfunc3, (a, b, out)) + a = self.sample_matrix(m, k, np.float64) + b = self.sample_matrix(k, n, np.float64) + out = np.empty((m, n - 1), np.float64) + self.assert_mismatching_sizes(cfunc3, (a, b, out), is_out=True) + # Mismatching dtypes + a = self.sample_matrix(m, k, np.float32) + b = self.sample_matrix(k, n, np.float64) + self.assert_mismatching_dtypes(cfunc2, (a, b), func_name) + if pyfunc3 is not None: + a = self.sample_matrix(m, k, np.float64) + b = self.sample_matrix(k, n, np.float64) + out = np.empty((m, n), np.float32) + self.assert_mismatching_dtypes(cfunc3, (a, b, out), func_name) + + @needs_blas + def test_dot_mm(self): + """ + Test matrix * matrix np.dot() + """ + self.check_dot_mm(dot2, dot3, "np.dot()") + + @needs_blas + def test_matmul_vv(self): + """ + Test vector @ vector + """ + self.check_dot_vv(matmul_usecase, "'@'") + + @needs_blas + def test_matmul_vm(self): + """ + Test vector @ matrix and matrix @ vector + """ + self.check_dot_vm(matmul_usecase, None, "'@'") + + @needs_blas + def test_matmul_mm(self): + """ + Test matrix @ matrix + """ + self.check_dot_mm(matmul_usecase, None, "'@'") + + @needs_blas + def test_contiguity_warnings(self): + m, k, n = 2, 3, 4 + dtype = np.float64 + a = self.sample_matrix(m, k, dtype)[::-1] + b = self.sample_matrix(k, n, dtype)[::-1] + out = np.empty((m, n), dtype) + + cfunc = jit(nopython=True)(dot2) + with self.check_contiguity_warning(cfunc.py_func): + cfunc(a, b) + cfunc = jit(nopython=True)(dot3) + with self.check_contiguity_warning(cfunc.py_func): + cfunc(a, b, out) + + a = self.sample_vector(n, dtype)[::-1] + b = self.sample_vector(n, dtype)[::-1] + + cfunc = jit(nopython=True)(vdot) + with self.check_contiguity_warning(cfunc.py_func): + cfunc(a, b) + + +# Implementation definitions for the purpose of jitting. + +def invert_matrix(a): + return np.linalg.inv(a) + + +def cholesky_matrix(a): + return np.linalg.cholesky(a) + + +def eig_matrix(a): + return np.linalg.eig(a) + + +def eigvals_matrix(a): + return np.linalg.eigvals(a) + + +def eigh_matrix(a): + return np.linalg.eigh(a) + + +def eigvalsh_matrix(a): + return np.linalg.eigvalsh(a) + + +def svd_matrix(a, full_matrices=1): + return np.linalg.svd(a, full_matrices) + + +def qr_matrix(a): + return np.linalg.qr(a) + + +def lstsq_system(A, B, rcond=-1): + return np.linalg.lstsq(A, B, rcond) + + +def solve_system(A, B): + return np.linalg.solve(A, B) + + +def pinv_matrix(A, rcond=1e-15): # 1e-15 from numpy impl + return np.linalg.pinv(A) + + +def slogdet_matrix(a): + return np.linalg.slogdet(a) + + +def det_matrix(a): + return np.linalg.det(a) + + +def norm_matrix(a, ord=None): + return np.linalg.norm(a, ord) + + +def cond_matrix(a, p=None): + return np.linalg.cond(a, p) + + +def matrix_rank_matrix(a, tol=None): + return np.linalg.matrix_rank(a, tol) + + +def matrix_power_matrix(a, n): + return np.linalg.matrix_power(a, n) + + +def trace_matrix(a, offset=0): + return np.trace(a, offset) + + +def trace_matrix_no_offset(a): + return np.trace(a) + + +def outer_matrix(a, b, out=None): + return np.outer(a, b, out=out) + + +def kron_matrix(a, b): + return np.kron(a, b) + + +class TestLinalgBase(EnableNRTStatsMixin, TestCase): + """ + Provides setUp and common data/error modes for testing np.linalg functions. + """ + + # supported dtypes + dtypes = (np.float64, np.float32, np.complex128, np.complex64) + + def setUp(self): + # Collect leftovers from previous test cases before checking for leaks + gc.collect() + super(TestLinalgBase, self).setUp() + + def sample_vector(self, n, dtype): + # Be careful to generate only exactly representable float values, + # to avoid rounding discrepancies between Numpy and Numba + base = np.arange(n) + if issubclass(dtype, np.complexfloating): + return (base * (1 - 0.5j) + 2j).astype(dtype) + else: + return (base * 0.5 + 1).astype(dtype) + + def specific_sample_matrix( + self, size, dtype, order, rank=None, condition=None): + """ + Provides a sample matrix with an optionally specified rank or condition + number. + + size: (rows, columns), the dimensions of the returned matrix. + dtype: the dtype for the returned matrix. + order: the memory layout for the returned matrix, 'F' or 'C'. + rank: the rank of the matrix, an integer value, defaults to full rank. + condition: the condition number of the matrix (defaults to 1.) + + NOTE: Only one of rank or condition may be set. + """ + + # default condition + d_cond = 1. + + if len(size) != 2: + raise ValueError("size must be a length 2 tuple.") + + if order not in ['F', 'C']: + raise ValueError("order must be one of 'F' or 'C'.") + + if dtype not in [np.float32, np.float64, np.complex64, np.complex128]: + raise ValueError("dtype must be a numpy floating point type.") + + if rank is not None and condition is not None: + raise ValueError("Only one of rank or condition can be specified.") + + if condition is None: + condition = d_cond + + if condition < 1: + raise ValueError("Condition number must be >=1.") + + np.random.seed(0) # repeatable seed + m, n = size + + if m < 0 or n < 0: + raise ValueError("Negative dimensions given for matrix shape.") + + minmn = min(m, n) + if rank is None: + rv = minmn + else: + if rank <= 0: + raise ValueError("Rank must be greater than zero.") + if not isinstance(rank, Integral): + raise ValueError("Rank must an integer.") + rv = rank + if rank > minmn: + raise ValueError("Rank given greater than full rank.") + + if m == 1 or n == 1: + # vector, must be rank 1 (enforced above) + # condition of vector is also 1 + if condition != d_cond: + raise ValueError( + "Condition number was specified for a vector (always 1.).") + maxmn = max(m, n) + Q = self.sample_vector(maxmn, dtype).reshape(m, n) + else: + # Build a sample matrix via combining SVD like inputs. + + # Create matrices of left and right singular vectors. + # This could use Modified Gram-Schmidt and perhaps be quicker, + # at present it uses QR decompositions to obtain orthonormal + # matrices. + tmp = self.sample_vector(m * m, dtype).reshape(m, m) + U, _ = np.linalg.qr(tmp) + # flip the second array, else for m==n the identity matrix appears + tmp = self.sample_vector(n * n, dtype)[::-1].reshape(n, n) + V, _ = np.linalg.qr(tmp) + # create singular values. + sv = np.linspace(d_cond, condition, rv) + S = np.zeros((m, n)) + idx = np.nonzero(np.eye(m, n)) + S[idx[0][:rv], idx[1][:rv]] = sv + Q = np.dot(np.dot(U, S), V.T) # construct + Q = np.array(Q, dtype=dtype, order=order) # sort out order/type + + return Q + + def assert_error(self, cfunc, args, msg, err=ValueError): + with self.assertRaises(err) as raises: + cfunc(*args) + self.assertIn(msg, str(raises.exception)) + + def assert_non_square(self, cfunc, args): + msg = "Last 2 dimensions of the array must be square." + self.assert_error(cfunc, args, msg, np.linalg.LinAlgError) + + def assert_wrong_dtype(self, name, cfunc, args): + msg = "np.linalg.%s() only supported on float and complex arrays" % name + self.assert_error(cfunc, args, msg, errors.TypingError) + + def assert_wrong_dimensions(self, name, cfunc, args, la_prefix=True): + prefix = "np.linalg" if la_prefix else "np" + msg = "%s.%s() only supported on 2-D arrays" % (prefix, name) + self.assert_error(cfunc, args, msg, errors.TypingError) + + def assert_no_nan_or_inf(self, cfunc, args): + msg = "Array must not contain infs or NaNs." + self.assert_error(cfunc, args, msg, np.linalg.LinAlgError) + + def assert_contig_sanity(self, got, expected_contig): + """ + This checks that in a computed result from numba (array, possibly tuple + of arrays) all the arrays are contiguous in memory and that they are + all at least one of "C_CONTIGUOUS" or "F_CONTIGUOUS". The computed + result of the contiguousness is then compared against a hardcoded + expected result. + + got: is the computed results from numba + expected_contig: is "C" or "F" and is the expected type of + contiguousness across all input values + (and therefore tests). + """ + + if isinstance(got, tuple): + # tuple present, check all results + for a in got: + self.assert_contig_sanity(a, expected_contig) + else: + if not isinstance(got, Number): + # else a single array is present + c_contig = got.flags.c_contiguous + f_contig = got.flags.f_contiguous + + # check that the result (possible set of) is at least one of + # C or F contiguous. + msg = "Results are not at least one of all C or F contiguous." + self.assertTrue(c_contig | f_contig, msg) + + msg = "Computed contiguousness does not match expected." + if expected_contig == "C": + self.assertTrue(c_contig, msg) + elif expected_contig == "F": + self.assertTrue(f_contig, msg) + else: + raise ValueError("Unknown contig") + + def assert_raise_on_singular(self, cfunc, args): + msg = "Matrix is singular to machine precision." + self.assert_error(cfunc, args, msg, err=np.linalg.LinAlgError) + + def assert_is_identity_matrix(self, got, rtol=None, atol=None): + """ + Checks if a matrix is equal to the identity matrix. + """ + # check it is square + self.assertEqual(got.shape[-1], got.shape[-2]) + # create identity matrix + eye = np.eye(got.shape[-1], dtype=got.dtype) + resolution = 5 * np.finfo(got.dtype).resolution + if rtol is None: + rtol = 10 * resolution + if atol is None: + atol = 100 * resolution # zeros tend to be fuzzy + # check it matches + np.testing.assert_allclose(got, eye, rtol, atol) + + def assert_invalid_norm_kind(self, cfunc, args): + """ + For use in norm() and cond() tests. + """ + msg = "Invalid norm order for matrices." + self.assert_error(cfunc, args, msg, ValueError) + + def assert_raise_on_empty(self, cfunc, args): + msg = 'Arrays cannot be empty' + self.assert_error(cfunc, args, msg, np.linalg.LinAlgError) + + +class TestTestLinalgBase(TestCase): + """ + The sample matrix code TestLinalgBase.specific_sample_matrix() + is a bit involved, this class tests it works as intended. + """ + + def test_specific_sample_matrix(self): + + # add a default test to the ctor, it never runs so doesn't matter + inst = TestLinalgBase('specific_sample_matrix') + + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + + # test loop + for size, dtype, order in product(sizes, inst.dtypes, 'FC'): + + m, n = size + minmn = min(m, n) + + # test default full rank + A = inst.specific_sample_matrix(size, dtype, order) + self.assertEqual(A.shape, size) + self.assertEqual(np.linalg.matrix_rank(A), minmn) + + # test reduced rank if a reduction is possible + if minmn > 1: + rank = minmn - 1 + A = inst.specific_sample_matrix(size, dtype, order, rank=rank) + self.assertEqual(A.shape, size) + self.assertEqual(np.linalg.matrix_rank(A), rank) + + resolution = 5 * np.finfo(dtype).resolution + + # test default condition + A = inst.specific_sample_matrix(size, dtype, order) + self.assertEqual(A.shape, size) + np.testing.assert_allclose(np.linalg.cond(A), + 1., + rtol=resolution, + atol=resolution) + + # test specified condition if matrix is > 1D + if minmn > 1: + condition = 10. + A = inst.specific_sample_matrix( + size, dtype, order, condition=condition) + self.assertEqual(A.shape, size) + np.testing.assert_allclose(np.linalg.cond(A), + 10., + rtol=resolution, + atol=resolution) + + # check errors are raised appropriately + def check_error(args, msg, err=ValueError): + with self.assertRaises(err) as raises: + inst.specific_sample_matrix(*args) + self.assertIn(msg, str(raises.exception)) + + # check the checker runs ok + with self.assertRaises(AssertionError) as raises: + msg = "blank" + check_error(((2, 3), np.float64, 'F'), msg, err=ValueError) + + # check invalid inputs... + + # bad size + msg = "size must be a length 2 tuple." + check_error(((1,), np.float64, 'F'), msg, err=ValueError) + + # bad order + msg = "order must be one of 'F' or 'C'." + check_error(((2, 3), np.float64, 'z'), msg, err=ValueError) + + # bad type + msg = "dtype must be a numpy floating point type." + check_error(((2, 3), np.int32, 'F'), msg, err=ValueError) + + # specifying both rank and condition + msg = "Only one of rank or condition can be specified." + check_error(((2, 3), np.float64, 'F', 1, 1), msg, err=ValueError) + + # specifying negative condition + msg = "Condition number must be >=1." + check_error(((2, 3), np.float64, 'F', None, -1), msg, err=ValueError) + + # specifying negative matrix dimension + msg = "Negative dimensions given for matrix shape." + check_error(((2, -3), np.float64, 'F'), msg, err=ValueError) + + # specifying negative rank + msg = "Rank must be greater than zero." + check_error(((2, 3), np.float64, 'F', -1), msg, err=ValueError) + + # specifying a rank greater than maximum rank + msg = "Rank given greater than full rank." + check_error(((2, 3), np.float64, 'F', 4), msg, err=ValueError) + + # specifying a condition number for a vector + msg = "Condition number was specified for a vector (always 1.)." + check_error(((1, 3), np.float64, 'F', None, 10), msg, err=ValueError) + + # specifying a non integer rank + msg = "Rank must an integer." + check_error(((2, 3), np.float64, 'F', 1.5), msg, err=ValueError) + + +class TestLinalgInv(TestLinalgBase): + """ + Tests for np.linalg.inv. + """ + + @needs_lapack + def test_linalg_inv(self): + """ + Test np.linalg.inv + """ + n = 10 + cfunc = jit(nopython=True)(invert_matrix) + + def check(a, **kwargs): + expected = invert_matrix(a) + got = cfunc(a) + self.assert_contig_sanity(got, "F") + + use_reconstruction = False + + # try strict + try: + np.testing.assert_array_almost_equal_nulp(got, expected, + nulp=10) + except AssertionError: + # fall back to reconstruction + use_reconstruction = True + + if use_reconstruction: + rec = np.dot(got, a) + self.assert_is_identity_matrix(rec) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a) + + for dtype, order in product(self.dtypes, 'CF'): + a = self.specific_sample_matrix((n, n), dtype, order) + check(a) + + # 0 dimensioned matrix + check(np.empty((0, 0))) + + # Non square matrix + self.assert_non_square(cfunc, (np.ones((2, 3)),)) + + # Wrong dtype + self.assert_wrong_dtype("inv", cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions("inv", cfunc, (np.ones(10),)) + + # Singular matrix + self.assert_raise_on_singular(cfunc, (np.zeros((2, 2)),)) + + @needs_lapack + def test_no_input_mutation(self): + X = np.array([[1., 3, 2, 7,], + [-5, 4, 2, 3,], + [9, -3, 1, 1,], + [2, -2, 2, 8,]], order='F') + + X_orig = np.copy(X) + + @jit(nopython=True) + def ainv(X, test): + if test: + # not executed, but necessary to trigger A ordering in X + X = X[1:2, :] + return np.linalg.inv(X) + + expected = ainv.py_func(X, False) + np.testing.assert_allclose(X, X_orig) + + got = ainv(X, False) + np.testing.assert_allclose(X, X_orig) + + np.testing.assert_allclose(expected, got) + + +class TestLinalgCholesky(TestLinalgBase): + """ + Tests for np.linalg.cholesky. + """ + + def sample_matrix(self, m, dtype, order): + # pd. (positive definite) matrix has eigenvalues in Z+ + np.random.seed(0) # repeatable seed + A = np.random.rand(m, m) + # orthonormal q needed to form up q^{-1}*D*q + # no "orth()" in numpy + q, _ = np.linalg.qr(A) + L = np.arange(1, m + 1) # some positive eigenvalues + Q = np.dot(np.dot(q.T, np.diag(L)), q) # construct + Q = np.array(Q, dtype=dtype, order=order) # sort out order/type + return Q + + def assert_not_pd(self, cfunc, args): + msg = "Matrix is not positive definite." + self.assert_error(cfunc, args, msg, np.linalg.LinAlgError) + + @needs_lapack + def test_linalg_cholesky(self): + """ + Test np.linalg.cholesky + """ + n = 10 + cfunc = jit(nopython=True)(cholesky_matrix) + + def check(a): + expected = cholesky_matrix(a) + got = cfunc(a) + use_reconstruction = False + # check that the computed results are contig and in the same way + self.assert_contig_sanity(got, "C") + + # try strict + try: + np.testing.assert_array_almost_equal_nulp(got, expected, + nulp=10) + except AssertionError: + # fall back to reconstruction + use_reconstruction = True + + # try via reconstruction + if use_reconstruction: + rec = np.dot(got, np.conj(got.T)) + resolution = 5 * np.finfo(a.dtype).resolution + np.testing.assert_allclose( + a, + rec, + rtol=resolution, + atol=resolution + ) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a) + + for dtype, order in product(self.dtypes, 'FC'): + a = self.sample_matrix(n, dtype, order) + check(a) + + # 0 dimensioned matrix + check(np.empty((0, 0))) + + rn = "cholesky" + # Non square matrices + self.assert_non_square(cfunc, (np.ones((2, 3), dtype=np.float64),)) + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64),)) + + # not pd + self.assert_not_pd(cfunc, + (np.ones(4, dtype=np.float64).reshape(2, 2),)) + + +class TestLinalgEigenSystems(TestLinalgBase): + """ + Tests for np.linalg.eig/eigvals. + """ + + def sample_matrix(self, m, dtype, order): + # This is a tridiag with the same but skewed values on the diagonals + v = self.sample_vector(m, dtype) + Q = np.diag(v) + idx = np.nonzero(np.eye(Q.shape[0], Q.shape[1], 1)) + Q[idx] = v[1:] + idx = np.nonzero(np.eye(Q.shape[0], Q.shape[1], -1)) + Q[idx] = v[:-1] + Q = np.array(Q, dtype=dtype, order=order) + return Q + + def assert_no_domain_change(self, name, cfunc, args): + msg = name + "() argument must not cause a domain change." + self.assert_error(cfunc, args, msg) + + def _check_worker(self, cfunc, name, expected_res_len, + check_for_domain_change): + def check(*args): + expected = cfunc.py_func(*args) + got = cfunc(*args) + a = args[0] + # check that the returned tuple is same length + self.assertEqual(len(expected), len(got)) + # and that dimension is correct + res_is_tuple = False + if isinstance(got, tuple): + res_is_tuple = True + self.assertEqual(len(got), expected_res_len) + else: # its an array + self.assertEqual(got.ndim, expected_res_len) + + # and that the computed results are contig and in the same way + self.assert_contig_sanity(got, "F") + + use_reconstruction = False + # try plain match of each array to np first + for k in range(len(expected)): + try: + np.testing.assert_array_almost_equal_nulp( + got[k], expected[k], nulp=10) + except AssertionError: + # plain match failed, test by reconstruction + use_reconstruction = True + + # If plain match fails then reconstruction is used. + # this checks that A*V ~== V*diag(W) + # i.e. eigensystem ties out + # this is required as numpy uses only double precision lapack + # routines and computation of eigenvectors is numerically + # sensitive, numba uses the type specific routines therefore + # sometimes comes out with a different (but entirely + # valid) answer (eigenvectors are not unique etc.). + # This is only applicable if eigenvectors are computed + # along with eigenvalues i.e. result is a tuple. + resolution = 5 * np.finfo(a.dtype).resolution + if use_reconstruction: + if res_is_tuple: + w, v = got + # modify 'a' if hermitian eigensystem functionality is + # being tested. 'L' for use lower part is default and + # the only thing used at present so we conjugate transpose + # the lower part into the upper for use in the + # reconstruction. By construction the sample matrix is + # tridiag so this is just a question of copying the lower + # diagonal into the upper and conjugating on the way. + if name[-1] == 'h': + idxl = np.nonzero(np.eye(a.shape[0], a.shape[1], -1)) + idxu = np.nonzero(np.eye(a.shape[0], a.shape[1], 1)) + cfunc(*args) + # upper idx must match lower for default uplo="L" + # if complex, conjugate + a[idxu] = np.conj(a[idxl]) + # also, only the real part of the diagonals is + # considered in the calculation so the imag is zeroed + # out for the purposes of use in reconstruction. + a[np.diag_indices(a.shape[0])] = np.real(np.diag(a)) + + lhs = np.dot(a, v) + rhs = np.dot(v, np.diag(w)) + + np.testing.assert_allclose( + lhs.real, + rhs.real, + rtol=resolution, + atol=resolution + ) + if np.iscomplexobj(v): + np.testing.assert_allclose( + lhs.imag, + rhs.imag, + rtol=resolution, + atol=resolution + ) + else: + # This isn't technically reconstruction but is here to + # deal with that the order of the returned eigenvalues + # may differ in the case of routines just returning + # eigenvalues and there's no true reconstruction + # available with which to perform a check. + np.testing.assert_allclose( + np.sort(expected), + np.sort(got), + rtol=resolution, + atol=resolution + ) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(*args) + return check + + def checker_for_linalg_eig( + self, name, func, expected_res_len, check_for_domain_change=None): + """ + Test np.linalg.eig + """ + n = 10 + cfunc = jit(nopython=True)(func) + check = self._check_worker(cfunc, name, expected_res_len, + check_for_domain_change) + + + # The main test loop + for dtype, order in product(self.dtypes, 'FC'): + a = self.sample_matrix(n, dtype, order) + check(a) + + # Test both a real and complex type as the impls are different + for ty in [np.float32, np.complex64]: + + # 0 dimensioned matrix + check(np.empty((0, 0), dtype=ty)) + + # Non square matrices + self.assert_non_square(cfunc, (np.ones((2, 3), dtype=ty),)) + + # Wrong dtype + self.assert_wrong_dtype(name, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(name, cfunc, (np.ones(10, dtype=ty),)) + + # no nans or infs + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=ty),)) + + if check_for_domain_change: + # By design numba does not support dynamic return types, numpy does + # and uses this in the case of returning eigenvalues/vectors of + # a real matrix. The return type of np.linalg.eig(), when + # operating on a matrix in real space depends on the values present + # in the matrix itself (recalling that eigenvalues are the roots of the + # characteristic polynomial of the system matrix, which will by + # construction depend on the values present in the system matrix). + # This test asserts that if a domain change is required on the return + # type, i.e. complex eigenvalues from a real input, an error is raised. + # For complex types, regardless of the value of the imaginary part of + # the returned eigenvalues, a complex type will be returned, this + # follows numpy and fits in with numba. + + # First check that the computation is valid (i.e. in complex space) + A = np.array([[1, -2], [2, 1]]) + check(A.astype(np.complex128)) + # and that the imaginary part is nonzero + l, _ = func(A) + self.assertTrue(np.any(l.imag)) + + # Now check that the computation fails in real space + for ty in [np.float32, np.float64]: + self.assert_no_domain_change(name, cfunc, (A.astype(ty),)) + + @needs_lapack + def test_linalg_eig(self): + self.checker_for_linalg_eig("eig", eig_matrix, 2, True) + + @needs_lapack + def test_linalg_eigvals(self): + self.checker_for_linalg_eig("eigvals", eigvals_matrix, 1, True) + + @needs_lapack + def test_linalg_eigh(self): + self.checker_for_linalg_eig("eigh", eigh_matrix, 2, False) + + @needs_lapack + def test_linalg_eigvalsh(self): + self.checker_for_linalg_eig("eigvalsh", eigvalsh_matrix, 1, False) + + @needs_lapack + def test_no_input_mutation(self): + # checks inputs are not mutated + + for c in (('eig', 2, True), + ('eigvals', 1, True), + ('eigh', 2, False), + ('eigvalsh', 1, False)): + + m, nout, domain_change = c + + meth = getattr(np.linalg, m) + + @jit(nopython=True) + def func(X, test): + if test: + # not executed, but necessary to trigger A ordering in X + X = X[1:2, :] + return meth(X) + + check = self._check_worker(func, m, nout, domain_change) + + for dtype in (np.float64, np.complex128): + with self.subTest(meth=meth, dtype=dtype): + # trivial system, doesn't matter, just checking if it gets + # mutated + X = np.array([[10., 1, 0, 1], + [1, 9, 0, 0], + [0, 0, 8, 0], + [1, 0, 0, 7], + ], order='F', dtype=dtype) + + X_orig = np.copy(X) + + expected = func.py_func(X, False) + np.testing.assert_allclose(X, X_orig) + + got = func(X, False) + np.testing.assert_allclose(X, X_orig) + + check(X, False) + + +class TestLinalgSvd(TestLinalgBase): + """ + Tests for np.linalg.svd. + """ + + # This checks that A ~= U*S*V**H, i.e. SV decomposition ties out. This is + # required as NumPy uses only double precision LAPACK routines and + # computation of SVD is numerically sensitive. Numba uses type-specific + # routines and therefore sometimes comes out with a different answer to + # NumPy (orthonormal bases are not unique, etc.). + + def check_reconstruction(self, a, got, expected): + u, sv, vt = got + + # Check they are dimensionally correct + for k in range(len(expected)): + self.assertEqual(got[k].shape, expected[k].shape) + + # Columns in u and rows in vt dictates the working size of s + s = np.zeros((u.shape[1], vt.shape[0])) + np.fill_diagonal(s, sv) + + rec = np.dot(np.dot(u, s), vt) + resolution = np.finfo(a.dtype).resolution + np.testing.assert_allclose( + a, + rec, + rtol=10 * resolution, + atol=100 * resolution # zeros tend to be fuzzy + ) + + @needs_lapack + def test_linalg_svd(self): + """ + Test np.linalg.svd + """ + cfunc = jit(nopython=True)(svd_matrix) + + def check(a, **kwargs): + expected = svd_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + # check that the returned tuple is same length + self.assertEqual(len(expected), len(got)) + # and that length is 3 + self.assertEqual(len(got), 3) + # and that the computed results are contig and in the same way + self.assert_contig_sanity(got, "F") + + use_reconstruction = False + # try plain match of each array to np first + for k in range(len(expected)): + + try: + np.testing.assert_array_almost_equal_nulp( + got[k], expected[k], nulp=10) + except AssertionError: + # plain match failed, test by reconstruction + use_reconstruction = True + + if use_reconstruction: + self.check_reconstruction(a, got, expected) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # test: column vector, tall, wide, square, row vector + # prime sizes + sizes = [(7, 1), (7, 5), (5, 7), (3, 3), (1, 7)] + + # flip on reduced or full matrices + full_matrices = (True, False) + + # test loop + for size, dtype, fmat, order in \ + product(sizes, self.dtypes, full_matrices, 'FC'): + + a = self.specific_sample_matrix(size, dtype, order) + check(a, full_matrices=fmat) + + rn = "svd" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64),)) + + # no nans or infs + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=np.float64),)) + # empty + for sz in [(0, 1), (1, 0), (0, 0)]: + args = (np.empty(sz), True) + self.assert_raise_on_empty(cfunc, args) + + @needs_lapack + def test_no_input_mutation(self): + X = np.array([[1., 3, 2, 7,], + [-5, 4, 2, 3,], + [9, -3, 1, 1,], + [2, -2, 2, 8,]], order='F') + + X_orig = np.copy(X) + + @jit(nopython=True) + def func(X, test): + if test: + # not executed, but necessary to trigger A ordering in X + X = X[1:2, :] + return np.linalg.svd(X) + + expected = func.py_func(X, False) + np.testing.assert_allclose(X, X_orig) + + got = func(X, False) + np.testing.assert_allclose(X, X_orig) + + try: + for e_a, g_a in zip(expected, got): + np.testing.assert_allclose(e_a, g_a) + except AssertionError: + self.check_reconstruction(X, got, expected) + + +class TestLinalgQr(TestLinalgBase): + """ + Tests for np.linalg.qr. + """ + + @needs_lapack + def test_linalg_qr(self): + """ + Test np.linalg.qr + """ + cfunc = jit(nopython=True)(qr_matrix) + + def check(a, **kwargs): + expected = qr_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + # check that the returned tuple is same length + self.assertEqual(len(expected), len(got)) + # and that length is 2 + self.assertEqual(len(got), 2) + # and that the computed results are contig and in the same way + self.assert_contig_sanity(got, "F") + + use_reconstruction = False + # try plain match of each array to np first + for k in range(len(expected)): + try: + np.testing.assert_array_almost_equal_nulp( + got[k], expected[k], nulp=10) + except AssertionError: + # plain match failed, test by reconstruction + use_reconstruction = True + + # if plain match fails then reconstruction is used. + # this checks that A ~= Q*R and that (Q^H)*Q = I + # i.e. QR decomposition ties out + # this is required as numpy uses only double precision lapack + # routines and computation of qr is numerically + # sensitive, numba using the type specific routines therefore + # sometimes comes out with a different answer (orthonormal bases + # are not unique etc.). + if use_reconstruction: + q, r = got + + # check they are dimensionally correct + for k in range(len(expected)): + self.assertEqual(got[k].shape, expected[k].shape) + + # check A=q*r + rec = np.dot(q, r) + resolution = np.finfo(a.dtype).resolution + np.testing.assert_allclose( + a, + rec, + rtol=10 * resolution, + atol=100 * resolution # zeros tend to be fuzzy + ) + + # check q is orthonormal + self.assert_is_identity_matrix(np.dot(np.conjugate(q.T), q)) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # test: column vector, tall, wide, square, row vector + # prime sizes + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + + # test loop + for size, dtype, order in \ + product(sizes, self.dtypes, 'FC'): + a = self.specific_sample_matrix(size, dtype, order) + check(a) + + rn = "qr" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64),)) + + # no nans or infs + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=np.float64),)) + + # empty + for sz in [(0, 1), (1, 0), (0, 0)]: + self.assert_raise_on_empty(cfunc, (np.empty(sz),)) + + @needs_lapack + def test_no_input_mutation(self): + X = np.array([[1., 3, 2, 7,], + [-5, 4, 2, 3,], + [9, -3, 1, 1,], + [2, -2, 2, 8,]], order='F') + + X_orig = np.copy(X) + + @jit(nopython=True) + def func(X, test): + if test: + # not executed, but necessary to trigger A ordering in X + X = X[1:2, :] + return np.linalg.qr(X) + + expected = func.py_func(X, False) + np.testing.assert_allclose(X, X_orig) + + got = func(X, False) + np.testing.assert_allclose(X, X_orig) + + for e_a, g_a in zip(expected, got): + np.testing.assert_allclose(e_a, g_a) + + +class TestLinalgSystems(TestLinalgBase): + """ + Base class for testing "system" solvers from np.linalg. + Namely np.linalg.solve() and np.linalg.lstsq(). + """ + + # check for RHS with dimension > 2 raises + def assert_wrong_dimensions_1D(self, name, cfunc, args, la_prefix=True): + prefix = "np.linalg" if la_prefix else "np" + msg = "%s.%s() only supported on 1 and 2-D arrays" % (prefix, name) + self.assert_error(cfunc, args, msg, errors.TypingError) + + # check that a dimensionally invalid system raises + def assert_dimensionally_invalid(self, cfunc, args): + msg = "Incompatible array sizes, system is not dimensionally valid." + self.assert_error(cfunc, args, msg, np.linalg.LinAlgError) + + # check that args with differing dtypes raise + def assert_homogeneous_dtypes(self, name, cfunc, args): + msg = "np.linalg.%s() only supports inputs that have homogeneous dtypes." % name + self.assert_error(cfunc, args, msg, errors.TypingError) + + +class TestLinalgLstsq(TestLinalgSystems): + """ + Tests for np.linalg.lstsq. + """ + + # NOTE: The testing of this routine is hard as it has to handle numpy + # using double precision routines on single precision input, this has + # a knock on effect especially in rank deficient cases and cases where + # conditioning is generally poor. As a result computed ranks can differ + # and consequently the calculated residual can differ. + # The tests try and deal with this as best as they can through the use + # of reconstruction and measures like residual norms. + # Suggestions for improvements are welcomed! + + @needs_lapack + def test_linalg_lstsq(self): + """ + Test np.linalg.lstsq + """ + cfunc = jit(nopython=True)(lstsq_system) + + def check(A, B, **kwargs): + expected = lstsq_system(A, B, **kwargs) + got = cfunc(A, B, **kwargs) + + # check that the returned tuple is same length + self.assertEqual(len(expected), len(got)) + # and that length is 4 + self.assertEqual(len(got), 4) + # and that the computed results are contig and in the same way + self.assert_contig_sanity(got, "C") + + use_reconstruction = False + + # check the ranks are the same and continue to a standard + # match if that is the case (if ranks differ, then output + # in e.g. residual array is of different size!). + try: + self.assertEqual(got[2], expected[2]) + # try plain match of each array to np first + for k in range(len(expected)): + try: + np.testing.assert_array_almost_equal_nulp( + got[k], expected[k], nulp=10) + except AssertionError: + # plain match failed, test by reconstruction + use_reconstruction = True + except AssertionError: + use_reconstruction = True + + if use_reconstruction: + x, res, rank, s = got + + # indicies in the output which are ndarrays + out_array_idx = [0, 1, 3] + + try: + # check the ranks are the same + self.assertEqual(rank, expected[2]) + # check they are dimensionally correct, skip [2] = rank. + for k in out_array_idx: + if isinstance(expected[k], np.ndarray): + self.assertEqual(got[k].shape, expected[k].shape) + except AssertionError: + # check the rank differs by 1. (numerical fuzz) + self.assertTrue(abs(rank - expected[2]) < 2) + + # check if A*X = B + resolution = np.finfo(A.dtype).resolution + try: + # this will work so long as the conditioning is + # ok and the rank is full + rec = np.dot(A, x) + np.testing.assert_allclose( + B, + rec, + rtol=10 * resolution, + atol=10 * resolution + ) + except AssertionError: + # system is probably under/over determined and/or + # poorly conditioned. Check slackened equality + # and that the residual norm is the same. + for k in out_array_idx: + try: + np.testing.assert_allclose( + expected[k], + got[k], + rtol=100 * resolution, + atol=100 * resolution + ) + except AssertionError: + # check the fail is likely due to bad conditioning + c = np.linalg.cond(A) + self.assertGreater(10 * c, (1. / resolution)) + + # make sure the residual 2-norm is ok + # if this fails its probably due to numpy using double + # precision LAPACK routines for singles. + res_expected = np.linalg.norm( + B - np.dot(A, expected[0])) + res_got = np.linalg.norm(B - np.dot(A, x)) + # rtol = 10. as all the systems are products of orthonormals + # and on the small side (rows, cols) < 100. + np.testing.assert_allclose( + res_expected, res_got, rtol=10.) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(A, B, **kwargs) + + # test: column vector, tall, wide, square, row vector + # prime sizes, the A's + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + # compatible B's for Ax=B must have same number of rows and 1 or more + # columns + + # This test takes ages! So combinations are trimmed via cycling + + # gets a dtype + cycle_dt = cycle(self.dtypes) + + orders = ['F', 'C'] + # gets a memory order flag + cycle_order = cycle(orders) + + # a specific condition number to use in the following tests + # there is nothing special about it other than it is not magic + specific_cond = 10. + + # inner test loop, extracted as there's additional logic etc required + # that'd end up with this being repeated a lot + def inner_test_loop_fn(A, dt, **kwargs): + # test solve Ax=B for (column, matrix) B, same dtype as A + b_sizes = (1, 13) + + for b_size in b_sizes: + + # check 2D B + b_order = next(cycle_order) + B = self.specific_sample_matrix( + (A.shape[0], b_size), dt, b_order) + check(A, B, **kwargs) + + # check 1D B + b_order = next(cycle_order) + tmp = B[:, 0].copy(order=b_order) + check(A, tmp, **kwargs) + + # test loop + for a_size in sizes: + + dt = next(cycle_dt) + a_order = next(cycle_order) + + # A full rank, well conditioned system + A = self.specific_sample_matrix(a_size, dt, a_order) + + # run the test loop + inner_test_loop_fn(A, dt) + + m, n = a_size + minmn = min(m, n) + + # operations that only make sense with a 2D matrix system + if m != 1 and n != 1: + + # Test a rank deficient system + r = minmn - 1 + A = self.specific_sample_matrix( + a_size, dt, a_order, rank=r) + # run the test loop + inner_test_loop_fn(A, dt) + + # Test a system with a given condition number for use in + # testing the rcond parameter. + # This works because the singular values in the + # specific_sample_matrix code are linspace (1, cond, [0... if + # rank deficient]) + A = self.specific_sample_matrix( + a_size, dt, a_order, condition=specific_cond) + # run the test loop + rcond = 1. / specific_cond + approx_half_rank_rcond = minmn * rcond + inner_test_loop_fn(A, dt, + rcond=approx_half_rank_rcond) + + # check empty arrays + empties = [ + [(0, 1), (1,)], # empty A, valid b + [(1, 0), (1,)], # empty A, valid b + [(1, 1), (0,)], # valid A, empty 1D b + [(1, 1), (1, 0)], # valid A, empty 2D b + ] + + for A, b in empties: + args = (np.empty(A), np.empty(b)) + self.assert_raise_on_empty(cfunc, args) + + # Test input validation + ok = np.array([[1., 2.], [3., 4.]], dtype=np.float64) + + # check ok input is ok + cfunc, (ok, ok) + + # check bad inputs + rn = "lstsq" + + # Wrong dtype + bad = np.array([[1, 2], [3, 4]], dtype=np.int32) + self.assert_wrong_dtype(rn, cfunc, (ok, bad)) + self.assert_wrong_dtype(rn, cfunc, (bad, ok)) + + # different dtypes + bad = np.array([[1, 2], [3, 4]], dtype=np.float32) + self.assert_homogeneous_dtypes(rn, cfunc, (ok, bad)) + self.assert_homogeneous_dtypes(rn, cfunc, (bad, ok)) + + # Dimension issue + bad = np.array([1, 2], dtype=np.float64) + self.assert_wrong_dimensions(rn, cfunc, (bad, ok)) + + # no nans or infs + bad = np.array([[1., 2., ], [np.inf, np.nan]], dtype=np.float64) + self.assert_no_nan_or_inf(cfunc, (ok, bad)) + self.assert_no_nan_or_inf(cfunc, (bad, ok)) + + # check 1D is accepted for B (2D is done previously) + # and then that anything of higher dimension raises + oneD = np.array([1., 2.], dtype=np.float64) + cfunc, (ok, oneD) + bad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float64) + self.assert_wrong_dimensions_1D(rn, cfunc, (ok, bad)) + + # check a dimensionally invalid system raises (1D and 2D cases + # checked) + bad1D = np.array([1.], dtype=np.float64) + bad2D = np.array([[1.], [2.], [3.]], dtype=np.float64) + self.assert_dimensionally_invalid(cfunc, (ok, bad1D)) + self.assert_dimensionally_invalid(cfunc, (ok, bad2D)) + + @needs_lapack + def test_issue3368(self): + X = np.array([[1., 7.54, 6.52], + [1., 2.70, 4.00], + [1., 2.50, 3.80], + [1., 1.15, 5.64], + [1., 4.22, 3.27], + [1., 1.41, 5.70],], order='F') + + X_orig = np.copy(X) + y = np.array([1., 2., 3., 4., 5., 6.]) + + @jit(nopython=True) + def f2(X, y, test): + if test: + # never executed, but necessary to trigger the bug + X = X[1:2, :] + return np.linalg.lstsq(X, y) + + f2(X, y, False) + np.testing.assert_allclose(X, X_orig) + + +class TestLinalgSolve(TestLinalgSystems): + """ + Tests for np.linalg.solve. + """ + + @needs_lapack + def test_linalg_solve(self): + """ + Test np.linalg.solve + """ + cfunc = jit(nopython=True)(solve_system) + + def check(a, b, **kwargs): + expected = solve_system(a, b, **kwargs) + got = cfunc(a, b, **kwargs) + + # check that the computed results are contig and in the same way + self.assert_contig_sanity(got, "F") + + use_reconstruction = False + # try plain match of the result first + try: + np.testing.assert_array_almost_equal_nulp( + got, expected, nulp=10) + except AssertionError: + # plain match failed, test by reconstruction + use_reconstruction = True + + # If plain match fails then reconstruction is used, + # this checks that AX ~= B. + # Plain match can fail due to numerical fuzziness associated + # with system size and conditioning, or more simply from + # numpy using double precision routines for computation that + # could be done in single precision (which is what numba does). + # Therefore minor differences in results can appear due to + # e.g. numerical roundoff being different between two precisions. + if use_reconstruction: + # check they are dimensionally correct + self.assertEqual(got.shape, expected.shape) + + # check AX=B + rec = np.dot(a, got) + resolution = np.finfo(a.dtype).resolution + np.testing.assert_allclose( + b, + rec, + rtol=10 * resolution, + atol=100 * resolution # zeros tend to be fuzzy + ) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, b, **kwargs) + + # test: prime size squares + sizes = [(1, 1), (3, 3), (7, 7)] + + # test loop + for size, dtype, order in \ + product(sizes, self.dtypes, 'FC'): + A = self.specific_sample_matrix(size, dtype, order) + + b_sizes = (1, 13) + + for b_size, b_order in product(b_sizes, 'FC'): + # check 2D B + B = self.specific_sample_matrix( + (A.shape[0], b_size), dtype, b_order) + check(A, B) + + # check 1D B + tmp = B[:, 0].copy(order=b_order) + check(A, tmp) + + # check empty + cfunc(np.empty((0, 0)), np.empty((0,))) + + # Test input validation + ok = np.array([[1., 0.], [0., 1.]], dtype=np.float64) + + # check ok input is ok + cfunc(ok, ok) + + # check bad inputs + rn = "solve" + + # Wrong dtype + bad = np.array([[1, 0], [0, 1]], dtype=np.int32) + self.assert_wrong_dtype(rn, cfunc, (ok, bad)) + self.assert_wrong_dtype(rn, cfunc, (bad, ok)) + + # different dtypes + bad = np.array([[1, 2], [3, 4]], dtype=np.float32) + self.assert_homogeneous_dtypes(rn, cfunc, (ok, bad)) + self.assert_homogeneous_dtypes(rn, cfunc, (bad, ok)) + + # Dimension issue + bad = np.array([1, 0], dtype=np.float64) + self.assert_wrong_dimensions(rn, cfunc, (bad, ok)) + + # no nans or infs + bad = np.array([[1., 0., ], [np.inf, np.nan]], dtype=np.float64) + self.assert_no_nan_or_inf(cfunc, (ok, bad)) + self.assert_no_nan_or_inf(cfunc, (bad, ok)) + + # check 1D is accepted for B (2D is done previously) + # and then that anything of higher dimension raises + ok_oneD = np.array([1., 2.], dtype=np.float64) + cfunc(ok, ok_oneD) + bad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float64) + self.assert_wrong_dimensions_1D(rn, cfunc, (ok, bad)) + + # check an invalid system raises (1D and 2D cases checked) + bad1D = np.array([1.], dtype=np.float64) + bad2D = np.array([[1.], [2.], [3.]], dtype=np.float64) + self.assert_dimensionally_invalid(cfunc, (ok, bad1D)) + self.assert_dimensionally_invalid(cfunc, (ok, bad2D)) + + # check that a singular system raises + bad2D = self.specific_sample_matrix((2, 2), np.float64, 'C', rank=1) + self.assert_raise_on_singular(cfunc, (bad2D, ok)) + + @needs_lapack + def test_no_input_mutation(self): + X = np.array([[1., 1, 1, 1], + [0., 1, 1, 1], + [0., 0, 1, 1], + [1., 0, 0, 1],], order='F') + + X_orig = np.copy(X) + y = np.array([1., 2., 3., 4]) + y_orig = np.copy(y) + + @jit(nopython=True) + def func(X, y, test): + if test: + # not executed, triggers A order in X + X = X[1:2, :] + return np.linalg.solve(X, y) + + expected = func.py_func(X, y, False) + np.testing.assert_allclose(X, X_orig) + np.testing.assert_allclose(y, y_orig) + + got = func(X, y, False) + np.testing.assert_allclose(X, X_orig) + np.testing.assert_allclose(y, y_orig) + + np.testing.assert_allclose(expected, got) + + +class TestLinalgPinv(TestLinalgBase): + """ + Tests for np.linalg.pinv. + """ + + @needs_lapack + def test_linalg_pinv(self): + """ + Test np.linalg.pinv + """ + cfunc = jit(nopython=True)(pinv_matrix) + + def check(a, **kwargs): + expected = pinv_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + # check that the computed results are contig and in the same way + self.assert_contig_sanity(got, "F") + + use_reconstruction = False + # try plain match of each array to np first + + try: + np.testing.assert_array_almost_equal_nulp( + got, expected, nulp=10) + except AssertionError: + # plain match failed, test by reconstruction + use_reconstruction = True + + # If plain match fails then reconstruction is used. + # This can occur due to numpy using double precision + # LAPACK when single can be used, this creates round off + # problems. Also, if the matrix has machine precision level + # zeros in its singular values then the singular vectors are + # likely to vary depending on round off. + if use_reconstruction: + + # check they are dimensionally correct + self.assertEqual(got.shape, expected.shape) + + # check pinv(A)*A~=eye + # if the problem is numerical fuzz then this will probably + # work, if the problem is rank deficiency then it won't! + rec = np.dot(got, a) + try: + self.assert_is_identity_matrix(rec) + except AssertionError: + # check A=pinv(pinv(A)) + resolution = 5 * np.finfo(a.dtype).resolution + rec = cfunc(got) + np.testing.assert_allclose( + rec, + a, + rtol=10 * resolution, + atol=100 * resolution # zeros tend to be fuzzy + ) + if a.shape[0] >= a.shape[1]: + # if it is overdetermined or fully determined + # use numba lstsq function (which is type specific) to + # compute the inverse and check against that. + lstsq = jit(nopython=True)(lstsq_system) + lstsq_pinv = lstsq( + a, np.eye( + a.shape[0]).astype( + a.dtype), **kwargs)[0] + np.testing.assert_allclose( + got, + lstsq_pinv, + rtol=10 * resolution, + atol=100 * resolution # zeros tend to be fuzzy + ) + # check the 2 norm of the difference is small + self.assertLess(np.linalg.norm(got - expected), resolution) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # test: column vector, tall, wide, square, row vector + # prime sizes + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + + # When required, a specified condition number + specific_cond = 10. + + # test loop + for size, dtype, order in \ + product(sizes, self.dtypes, 'FC'): + # check a full rank matrix + a = self.specific_sample_matrix(size, dtype, order) + check(a) + + m, n = size + if m != 1 and n != 1: + # check a rank deficient matrix + minmn = min(m, n) + a = self.specific_sample_matrix(size, dtype, order, + condition=specific_cond) + rcond = 1. / specific_cond + approx_half_rank_rcond = minmn * rcond + check(a, rcond=approx_half_rank_rcond) + + # check empty + for sz in [(0, 1), (1, 0)]: + check(np.empty(sz)) + + rn = "pinv" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64),)) + + # no nans or infs + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=np.float64),)) + + @needs_lapack + def test_issue5870(self): + # testing for mutation of input matrix + @jit(nopython=True) + def some_fn(v): + return np.linalg.pinv(v[0]) + + v_data = np.array([[1., 3, 2, 7,], + [-5, 4, 2, 3,], + [9, -3, 1, 1,], + [2, -2, 2, 8,]], order='F') + + v_orig = np.copy(v_data) + reshaped_v = v_data.reshape((1, 4, 4)) + + expected = some_fn.py_func(reshaped_v) + np.testing.assert_allclose(v_data, v_orig) + + got = some_fn(reshaped_v) + np.testing.assert_allclose(v_data, v_orig) + + np.testing.assert_allclose(expected, got) + + +class TestLinalgDetAndSlogdet(TestLinalgBase): + """ + Tests for np.linalg.det. and np.linalg.slogdet. + Exactly the same inputs are used for both tests as + det() is a trivial function of slogdet(), the tests + are therefore combined. + """ + + def check_det(self, cfunc, a, **kwargs): + expected = det_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + resolution = 5 * np.finfo(a.dtype).resolution + + # check the determinants are the same + np.testing.assert_allclose(got, expected, rtol=resolution) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + def check_slogdet(self, cfunc, a, **kwargs): + expected = slogdet_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + # As numba returns python floats types and numpy returns + # numpy float types, some more adjustment and different + # types of comparison than those used with array based + # results is required. + + # check that the returned tuple is same length + self.assertEqual(len(expected), len(got)) + # and that length is 2 + self.assertEqual(len(got), 2) + + # check that the domain of the results match + for k in range(2): + self.assertEqual( + np.iscomplexobj(got[k]), + np.iscomplexobj(expected[k])) + + # turn got[0] into the same dtype as `a` + # this is so checking with nulp will work + got_conv = a.dtype.type(got[0]) + np.testing.assert_array_almost_equal_nulp( + got_conv, expected[0], nulp=10) + # compare log determinant magnitude with a more fuzzy value + # as numpy values come from higher precision lapack routines + resolution = 5 * np.finfo(a.dtype).resolution + np.testing.assert_allclose( + got[1], expected[1], rtol=resolution, atol=resolution) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + def do_test(self, rn, check, cfunc): + + # test: 1x1 as it is unusual, 4x4 as it is even and 7x7 as is it odd! + sizes = [(1, 1), (4, 4), (7, 7)] + + # test loop + for size, dtype, order in \ + product(sizes, self.dtypes, 'FC'): + # check a full rank matrix + a = self.specific_sample_matrix(size, dtype, order) + check(cfunc, a) + + # use a matrix of zeros to trip xgetrf U(i,i)=0 singular test + for dtype, order in product(self.dtypes, 'FC'): + a = np.zeros((3, 3), dtype=dtype) + check(cfunc, a) + + # check empty + check(cfunc, np.empty((0, 0))) + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64),)) + + # no nans or infs + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=np.float64),)) + + @needs_lapack + def test_linalg_det(self): + cfunc = jit(nopython=True)(det_matrix) + self.do_test("det", self.check_det, cfunc) + + @needs_lapack + def test_linalg_slogdet(self): + cfunc = jit(nopython=True)(slogdet_matrix) + self.do_test("slogdet", self.check_slogdet, cfunc) + + @needs_lapack + def test_no_input_mutation(self): + X = np.array([[1., 3, 2, 7,], + [-5, 4, 2, 3,], + [9, -3, 1, 1,], + [2, -2, 2, 8,]], order='F') + + X_orig = np.copy(X) + + @jit(nopython=True) + def func(X, test): + if test: + # not executed, but necessary to trigger A ordering in X + X = X[1:2, :] + return np.linalg.slogdet(X) + + expected = func.py_func(X, False) + np.testing.assert_allclose(X, X_orig) + + got = func(X, False) + np.testing.assert_allclose(X, X_orig) + + np.testing.assert_allclose(expected, got) + +# Use TestLinalgSystems as a base to get access to additional +# testing for 1 and 2D inputs. + + +class TestLinalgNorm(TestLinalgSystems): + """ + Tests for np.linalg.norm. + """ + + @needs_lapack + def test_linalg_norm(self): + """ + Test np.linalg.norm + """ + cfunc = jit(nopython=True)(norm_matrix) + + def check(a, **kwargs): + expected = norm_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + # All results should be in the real domain + self.assertTrue(not np.iscomplexobj(got)) + + resolution = 5 * np.finfo(a.dtype).resolution + + # check the norms are the same to the arg `a` precision + np.testing.assert_allclose(got, expected, rtol=resolution) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # Check 1D inputs + sizes = [1, 4, 7] + nrm_types = [None, np.inf, -np.inf, 0, 1, -1, 2, -2, 5, 6.7, -4.3] + + # standard 1D input + for size, dtype, nrm_type in \ + product(sizes, self.dtypes, nrm_types): + a = self.sample_vector(size, dtype) + check(a, ord=nrm_type) + + # sliced 1D input + for dtype, nrm_type in \ + product(self.dtypes, nrm_types): + a = self.sample_vector(10, dtype)[::3] + check(a, ord=nrm_type) + + # Check 2D inputs: + # test: column vector, tall, wide, square, row vector + # prime sizes + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + nrm_types = [None, np.inf, -np.inf, 1, -1, 2, -2] + + # standard 2D input + for size, dtype, order, nrm_type in \ + product(sizes, self.dtypes, 'FC', nrm_types): + # check a full rank matrix + a = self.specific_sample_matrix(size, dtype, order) + check(a, ord=nrm_type) + + # check 2D slices work for the case where xnrm2 is called from + # BLAS (ord=None) to make sure it is working ok. + nrm_types = [None] + for dtype, nrm_type, order in \ + product(self.dtypes, nrm_types, 'FC'): + a = self.specific_sample_matrix((17, 13), dtype, order) + # contig for C order + check(a[:3], ord=nrm_type) + + # contig for Fortran order + check(a[:, 3:], ord=nrm_type) + + # contig for neither order + check(a[1, 4::3], ord=nrm_type) + + # check that numba returns zero for empty arrays. Numpy returns zero + # for most norm types and raises ValueError for +/-np.inf. + # there is not a great deal of consistency in Numpy's response so + # it is not being emulated in Numba + for dtype, nrm_type, order in \ + product(self.dtypes, nrm_types, 'FC'): + a = np.empty((0,), dtype=dtype, order=order) + self.assertEqual(cfunc(a, nrm_type), 0.0) + a = np.empty((0, 0), dtype=dtype, order=order) + self.assertEqual(cfunc(a, nrm_type), 0.0) + + rn = "norm" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue, reuse the test from the TestLinalgSystems class + self.assert_wrong_dimensions_1D( + rn, cfunc, (np.ones( + 12, dtype=np.float64).reshape( + 2, 2, 3),)) + + # no nans or infs for 2d case when SVD is used (e.g 2-norm) + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2.], [np.inf, np.nan]], + dtype=np.float64), 2)) + + # assert 2D input raises for an invalid norm kind kwarg + self.assert_invalid_norm_kind(cfunc, (np.array([[1., 2.], [3., 4.]], + dtype=np.float64), 6)) + + +class TestLinalgCond(TestLinalgBase): + """ + Tests for np.linalg.cond. + """ + + @needs_lapack + def test_linalg_cond(self): + """ + Test np.linalg.cond + """ + + cfunc = jit(nopython=True)(cond_matrix) + + def check(a, **kwargs): + expected = cond_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + # All results should be in the real domain + self.assertTrue(not np.iscomplexobj(got)) + + resolution = 5 * np.finfo(a.dtype).resolution + + # check the cond is the same to the arg `a` precision + np.testing.assert_allclose(got, expected, rtol=resolution) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # valid p values (used to indicate norm type) + ps = [None, np.inf, -np.inf, 1, -1, 2, -2] + sizes = [(3, 3), (7, 7)] + + for size, dtype, order, p in \ + product(sizes, self.dtypes, 'FC', ps): + a = self.specific_sample_matrix(size, dtype, order) + check(a, p=p) + + # When p=None non-square matrices are accepted. + sizes = [(7, 1), (11, 5), (5, 11), (1, 7)] + for size, dtype, order in \ + product(sizes, self.dtypes, 'FC'): + a = self.specific_sample_matrix(size, dtype, order) + check(a) + + # empty + for sz in [(0, 1), (1, 0), (0, 0)]: + self.assert_raise_on_empty(cfunc, (np.empty(sz),)) + + # singular systems to trip divide-by-zero + x = np.array([[1, 0], [0, 0]], dtype=np.float64) + check(x) + check(x, p=2) + x = np.array([[0, 0], [0, 0]], dtype=np.float64) + check(x, p=-2) + + # try an ill-conditioned system with 2-norm, make sure np raises an + # overflow warning as the result is `+inf` and that the result from + # numba matches. + with warnings.catch_warnings(): + a = np.array([[1.e308, 0], [0, 0.1]], dtype=np.float64) + warnings.simplefilter("ignore", RuntimeWarning) + check(a) + + rn = "cond" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64),)) + + # no nans or infs when p="None" (default for kwarg). + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=np.float64),)) + + # assert raises for an invalid norm kind kwarg + self.assert_invalid_norm_kind(cfunc, (np.array([[1., 2.], [3., 4.]], + dtype=np.float64), 6)) + + +class TestLinalgMatrixRank(TestLinalgSystems): + """ + Tests for np.linalg.matrix_rank. + """ + + @needs_lapack + def test_linalg_matrix_rank(self): + """ + Test np.linalg.matrix_rank + """ + + cfunc = jit(nopython=True)(matrix_rank_matrix) + + def check(a, **kwargs): + expected = matrix_rank_matrix(a, **kwargs) + got = cfunc(a, **kwargs) + + # Ranks are integral so comparison should be trivial. + # check the rank is the same + np.testing.assert_allclose(got, expected) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + + for size, dtype, order in \ + product(sizes, self.dtypes, 'FC'): + # check full rank system + a = self.specific_sample_matrix(size, dtype, order) + check(a) + + # If the system is a matrix, check rank deficiency is reported + # correctly. Check all ranks from 0 to (full rank - 1). + tol = 1e-13 + # first check 1 to (full rank - 1) + for k in range(1, min(size) - 1): + # check rank k + a = self.specific_sample_matrix(size, dtype, order, rank=k) + self.assertEqual(cfunc(a), k) + check(a) + # check provision of a tolerance works as expected + # create a (m x n) diagonal matrix with a singular value + # guaranteed below the tolerance 1e-13 + m, n = a.shape + a[:, :] = 0. # reuse `a`'s memory + idx = np.nonzero(np.eye(m, n)) + if np.iscomplexobj(a): + b = 1. + np.random.rand(k) + 1.j +\ + 1.j * np.random.rand(k) + # min singular value is sqrt(2)*1e-14 + b[0] = 1e-14 + 1e-14j + else: + b = 1. + np.random.rand(k) + b[0] = 1e-14 # min singular value is 1e-14 + a[idx[0][:k], idx[1][:k]] = b.astype(dtype) + # rank should be k-1 (as tol is present) + self.assertEqual(cfunc(a, tol), k - 1) + check(a, tol=tol) + # then check zero rank + a[:, :] = 0. + self.assertEqual(cfunc(a), 0) + check(a) + # add in a singular value that is small + if np.iscomplexobj(a): + a[-1, -1] = 1e-14 + 1e-14j + else: + a[-1, -1] = 1e-14 + # check the system has zero rank to a given tolerance + self.assertEqual(cfunc(a, tol), 0) + check(a, tol=tol) + + # check the zero vector returns rank 0 and a nonzero vector + # returns rank 1. + for dt in self.dtypes: + a = np.zeros((5), dtype=dt) + self.assertEqual(cfunc(a), 0) + check(a) + # make it a nonzero vector + a[0] = 1. + self.assertEqual(cfunc(a), 1) + check(a) + + # empty + for sz in [(0, 1), (1, 0), (0, 0)]: + for tol in [None, 1e-13]: + self.assert_raise_on_empty(cfunc, (np.empty(sz), tol)) + + rn = "matrix_rank" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32),)) + + # Dimension issue + self.assert_wrong_dimensions_1D( + rn, cfunc, (np.ones( + 12, dtype=np.float64).reshape( + 2, 2, 3),)) + + # no nans or infs for 2D case + self.assert_no_nan_or_inf(cfunc, + (np.array([[1., 2., ], [np.inf, np.nan]], + dtype=np.float64),)) + + @needs_lapack + def test_no_input_mutation(self): + # this is here to test no input mutation by + # numba.np.linalg._compute_singular_values + # which is the workhorse for norm with 2d input, rank and cond. + + X = np.array([[1., 3, 2, 7,], + [-5, 4, 2, 3,], + [9, -3, 1, 1,], + [2, -2, 2, 8,]], order='F') + + X_orig = np.copy(X) + + @jit(nopython=True) + def func(X, test): + if test: + # not executed, but necessary to trigger A ordering in X + X = X[1:2, :] + return np.linalg.matrix_rank(X) + + expected = func.py_func(X, False) + np.testing.assert_allclose(X, X_orig) + + got = func(X, False) + np.testing.assert_allclose(X, X_orig) + + np.testing.assert_allclose(expected, got) + + +class TestLinalgMatrixPower(TestLinalgBase): + """ + Tests for np.linalg.matrix_power. + """ + + def assert_int_exponenent(self, cfunc, args): + # validate first arg is ok + cfunc(args[0], 1) + # pass in both args and assert fail + with self.assertRaises(errors.TypingError): + cfunc(*args) + + @needs_lapack + def test_linalg_matrix_power(self): + cfunc = jit(nopython=True)(matrix_power_matrix) + + def check(a, pwr): + expected = matrix_power_matrix(a, pwr) + got = cfunc(a, pwr) + + # check that the computed results are contig and in the same way + self.assert_contig_sanity(got, "C") + + res = 7 * np.finfo(a.dtype).resolution + np.testing.assert_allclose(got, expected, rtol=res, atol=res) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, pwr) + + sizes = [(1, 1), (5, 5), (7, 7)] + powers = [-33, -17] + list(range(-10, 10)) + [17, 33] + + for size, pwr, dtype, order in \ + product(sizes, powers, self.dtypes, 'FC'): + a = self.specific_sample_matrix(size, dtype, order) + check(a, pwr) + a = np.empty((0, 0), dtype=dtype, order=order) + check(a, pwr) + + rn = "matrix_power" + + # Wrong dtype + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32), 1)) + + # not an int power + self.assert_wrong_dtype(rn, cfunc, + (np.ones((2, 2), dtype=np.int32), 1)) + + # non square system + args = (np.ones((3, 5)), 1) + msg = 'input must be a square array' + self.assert_error(cfunc, args, msg) + + # Dimension issue + self.assert_wrong_dimensions(rn, cfunc, + (np.ones(10, dtype=np.float64), 1)) + + # non-integer supplied as exponent + self.assert_int_exponenent(cfunc, (np.ones((2, 2)), 1.2)) + + # singular matrix is not invertible + self.assert_raise_on_singular(cfunc, (np.array([[0., 0], [1, 1]]), -1)) + + +class TestTrace(TestLinalgBase): + """ + Tests for np.trace. + """ + + def setUp(self): + super(TestTrace, self).setUp() + # compile two versions, one with and one without the offset kwarg + self.cfunc_w_offset = jit(nopython=True)(trace_matrix) + self.cfunc_no_offset = jit(nopython=True)(trace_matrix_no_offset) + + def assert_int_offset(self, cfunc, a, **kwargs): + # validate first arg is ok + cfunc(a) + # pass in kwarg and assert fail + with self.assertRaises(errors.TypingError): + cfunc(a, **kwargs) + + def test_trace(self): + + def check(a, **kwargs): + if 'offset' in kwargs: + expected = trace_matrix(a, **kwargs) + cfunc = self.cfunc_w_offset + else: + expected = trace_matrix_no_offset(a, **kwargs) + cfunc = self.cfunc_no_offset + + got = cfunc(a, **kwargs) + + res = 5 * np.finfo(a.dtype).resolution + np.testing.assert_allclose(got, expected, rtol=res, atol=res) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # test: column vector, tall, wide, square, row vector + # prime sizes + sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)] + + # offsets to cover the range of the matrix sizes above + offsets = [-13, -12, -11] + list(range(-10, 10)) + [11, 12, 13] + + for size, offset, dtype, order in \ + product(sizes, offsets, self.dtypes, 'FC'): + a = self.specific_sample_matrix(size, dtype, order) + check(a, offset=offset) + if offset == 0: + check(a) + a = np.empty((0, 0), dtype=dtype, order=order) + check(a, offset=offset) + if offset == 0: + check(a) + + rn = "trace" + + # Dimension issue + self.assert_wrong_dimensions(rn, self.cfunc_w_offset, + (np.ones(10, dtype=np.float64), 1), False) + self.assert_wrong_dimensions(rn, self.cfunc_no_offset, + (np.ones(10, dtype=np.float64),), False) + + # non-integer supplied as exponent + self.assert_int_offset( + self.cfunc_w_offset, np.ones( + (2, 2)), offset=1.2) + + def test_trace_w_optional_input(self): + "Issue 2314" + @jit("(optional(float64[:,:]),)", nopython=True) + def tested(a): + return np.trace(a) + + a = np.ones((5, 5), dtype=np.float64) + tested(a) + + with self.assertRaises(TypeError) as raises: + tested(None) + + errmsg = str(raises.exception) + self.assertEqual('expected array(float64, 2d, A), got None', errmsg) + + +class TestBasics(TestLinalgSystems): # TestLinalgSystems for 1d test + + order1 = cycle(['F', 'C', 'C', 'F']) + order2 = cycle(['C', 'F', 'C', 'F']) + + # test: column vector, matrix, row vector, 1d sizes + # (7, 1, 3) and two scalars + sizes = [(7, 1), (3, 3), (1, 7), (7,), (1,), (3,), 3., 5.] + + def _assert_wrong_dim(self, rn, cfunc): + # Dimension issue + self.assert_wrong_dimensions_1D( + rn, cfunc, (np.array([[[1]]], dtype=np.float64), np.ones(1)), False) + self.assert_wrong_dimensions_1D( + rn, cfunc, (np.ones(1), np.array([[[1]]], dtype=np.float64)), False) + + def _gen_input(self, size, dtype, order): + if not isinstance(size, tuple): + return size + else: + if len(size) == 1: + return self.sample_vector(size[0], dtype) + else: + return self.sample_vector( + size[0] * size[1], + dtype).reshape( + size, order=order) + + def _get_input(self, size1, size2, dtype): + a = self._gen_input(size1, dtype, next(self.order1)) + b = self._gen_input(size2, dtype, next(self.order2)) + # force domain consistency as underlying ufuncs require it + if np.iscomplexobj(a): + b = b + 1j + if np.iscomplexobj(b): + a = a + 1j + return (a, b) + + def test_outer(self): + cfunc = jit(nopython=True)(outer_matrix) + + def check(a, b, **kwargs): + + # check without kwargs + expected = outer_matrix(a, b) + got = cfunc(a, b) + + res = 5 * np.finfo(np.asarray(a).dtype).resolution + np.testing.assert_allclose(got, expected, rtol=res, atol=res) + + # if kwargs present check with them too + if 'out' in kwargs: + got = cfunc(a, b, **kwargs) + np.testing.assert_allclose(got, expected, rtol=res, + atol=res) + np.testing.assert_allclose(kwargs['out'], expected, + rtol=res, atol=res) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, b, **kwargs) + + dts = cycle(self.dtypes) + for size1, size2 in product(self.sizes, self.sizes): + dtype = next(dts) + (a, b) = self._get_input(size1, size2, dtype) + check(a, b) + c = np.empty((np.asarray(a).size, np.asarray(b).size), + dtype=np.asarray(a).dtype) + check(a, b, out=c) + + self._assert_wrong_dim("outer", cfunc) + + def test_kron(self): + cfunc = jit(nopython=True)(kron_matrix) + + def check(a, b, **kwargs): + + expected = kron_matrix(a, b) + got = cfunc(a, b) + + res = 5 * np.finfo(np.asarray(a).dtype).resolution + np.testing.assert_allclose(got, expected, rtol=res, atol=res) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, b) + + for size1, size2, dtype in \ + product(self.sizes, self.sizes, self.dtypes): + (a, b) = self._get_input(size1, size2, dtype) + check(a, b) + + self._assert_wrong_dim("kron", cfunc) + + args = (np.empty(10)[::2], np.empty(10)[::2]) + msg = "only supports 'C' or 'F' layout" + self.assert_error(cfunc, args, msg, err=errors.TypingError) + + +class TestHelpers(TestCase): + def test_copy_to_fortran_order(self): + from numba.np.linalg import _copy_to_fortran_order + + def check(udt, expectfn, shapes, dtypes, orders): + for shape, dtype, order in product(shapes, dtypes, orders): + a = np.arange(np.prod(shape)).reshape(shape, order=order) + + r = udt(a) + # check correct operation + self.assertPreciseEqual(expectfn(a), r) + # check new copy has made + self.assertNotEqual(a.ctypes.data, r.ctypes.data) + + @njit + def direct_call(a): + return _copy_to_fortran_order(a) + + shapes = [(3, 4), (3, 2, 5)] + dtypes = [np.intp] + orders = ['C', 'F'] + check(direct_call, np.asfortranarray, shapes, dtypes, orders) + + + @njit + def slice_to_any(a): + # make a 'any' layout slice + sliced = a[::2][0] + return _copy_to_fortran_order(sliced) + + shapes = [(3, 3, 4), (3, 3, 2, 5)] + dtypes = [np.intp] + orders = ['C', 'F'] + + def expected_slice_to_any(a): + # make a 'any' layout slice + sliced = a[::2][0] + return np.asfortranarray(sliced) + + check(slice_to_any, expected_slice_to_any, shapes, dtypes, orders) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_listimpl.py b/venv/lib/python3.10/site-packages/numba/tests/test_listimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..f55e2369a4208903951ea1b5d2e910028cd1a2a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_listimpl.py @@ -0,0 +1,527 @@ +""" +Testing C implementation of the numba typed-list +""" + +import ctypes +import struct + +from numba.tests.support import TestCase +from numba import _helperlib + + +LIST_OK = 0 +LIST_ERR_INDEX = -1 +LIST_ERR_NO_MEMORY = -2 +LIST_ERR_MUTATED = -3 +LIST_ERR_ITER_EXHAUSTED = -4 +LIST_ERR_IMMUTABLE = -5 + + +class List(object): + """A wrapper around the C-API to provide a minimal list object for + testing. + """ + def __init__(self, tc, item_size, allocated): + """ + Parameters + ---------- + tc : TestCase instance + item_size : int + byte size for the items + allocated : int + number of items to allocate for + """ + self.tc = tc + self.item_size = item_size + self.lp = self.list_new(item_size, allocated) + + # The following methods implement part of the list API + + def __del__(self): + self.tc.numba_list_free(self.lp) + + def __len__(self): + return self.list_length() + + def __setitem__(self, i, item): + return self.list_setitem(i, item) + + def __getitem__(self, i): + return self.list_getitem(i) + + def __iter__(self): + return ListIter(self) + + def __delitem__(self, i): + self.list_delitem(i) + + def handle_index(self, i): + # handling negative indices is done at the compiler level, so we only + # support -1 to be last element of the list here + if i < -1 or len(self) == 0: + IndexError("list index out of range") + elif i == -1: + i = len(self) - 1 + return i + + @property + def allocated(self): + return self.list_allocated() + + @property + def is_mutable(self): + return self.list_is_mutable() + + def set_mutable(self): + return self.list_set_is_mutable(1) + + def set_immutable(self): + return self.list_set_is_mutable(0) + + def append(self, item): + self.list_append(item) + + def pop(self, i=-1): + return self.list_pop(i) + + # The methods below are higher-level wrappers for the C-API wrappers + + def list_new(self, item_size, allocated): + lp = ctypes.c_void_p() + status = self.tc.numba_list_new( + ctypes.byref(lp), item_size, allocated, + ) + self.tc.assertEqual(status, LIST_OK) + return lp + + def list_length(self): + return self.tc.numba_list_length(self.lp) + + def list_allocated(self): + return self.tc.numba_list_allocated(self.lp) + + def list_is_mutable(self): + return self.tc.numba_list_is_mutable(self.lp) + + def list_set_is_mutable(self, is_mutable): + return self.tc.numba_list_set_is_mutable(self.lp, is_mutable) + + def list_setitem(self, i, item): + status = self.tc.numba_list_setitem(self.lp, i, item) + if status == LIST_ERR_INDEX: + raise IndexError("list index out of range") + elif status == LIST_ERR_IMMUTABLE: + raise ValueError("list is immutable") + else: + self.tc.assertEqual(status, LIST_OK) + + def list_getitem(self, i): + i = self.handle_index(i) + item_out_buffer = ctypes.create_string_buffer(self.item_size) + status = self.tc.numba_list_getitem(self.lp, i, item_out_buffer) + if status == LIST_ERR_INDEX: + raise IndexError("list index out of range") + else: + self.tc.assertEqual(status, LIST_OK) + return item_out_buffer.raw + + def list_append(self, item): + status = self.tc.numba_list_append(self.lp, item) + if status == LIST_ERR_IMMUTABLE: + raise ValueError("list is immutable") + self.tc.assertEqual(status, LIST_OK) + + def list_pop(self, i): + # pop is getitem and delitem + i = self.handle_index(i) + item = self.list_getitem(i) + self.list_delitem(i) + return item + + def list_delitem(self, i): + # special case slice + if isinstance(i, slice): + status = self.tc.numba_list_delete_slice(self.lp, + i.start, + i.stop, + i.step) + if status == LIST_ERR_IMMUTABLE: + raise ValueError("list is immutable") + self.tc.assertEqual(status, LIST_OK) + # must be an integer, defer to delitem + else: + i = self.handle_index(i) + status = self.tc.numba_list_delitem(self.lp, i) + if status == LIST_ERR_INDEX: + raise IndexError("list index out of range") + elif status == LIST_ERR_IMMUTABLE: + raise ValueError("list is immutable") + self.tc.assertEqual(status, LIST_OK) + + def list_iter(self, itptr): + self.tc.numba_list_iter(itptr, self.lp) + + def list_iter_next(self, itptr): + bi = ctypes.c_void_p(0) + status = self.tc.numba_list_iter_next( + itptr, ctypes.byref(bi), + ) + if status == LIST_ERR_MUTATED: + raise ValueError('list mutated') + elif status == LIST_ERR_ITER_EXHAUSTED: + raise StopIteration + else: + self.tc.assertGreaterEqual(status, 0) + item = (ctypes.c_char * self.item_size).from_address(bi.value) + return item.value + + +class ListIter(object): + """An iterator for the `List`. + """ + def __init__(self, parent): + self.parent = parent + itsize = self.parent.tc.numba_list_iter_sizeof() + self.it_state_buf = (ctypes.c_char_p * itsize)(0) + self.it = ctypes.cast(self.it_state_buf, ctypes.c_void_p) + self.parent.list_iter(self.it) + + def __iter__(self): + return self + + def __next__(self): + return self.parent.list_iter_next(self.it) + + next = __next__ # needed for py2 only + + +class TestListImpl(TestCase): + def setUp(self): + """Bind to the c_helper library and provide the ctypes wrapper. + """ + list_t = ctypes.c_void_p + iter_t = ctypes.c_void_p + + def wrap(name, restype, argtypes=()): + proto = ctypes.CFUNCTYPE(restype, *argtypes) + return proto(_helperlib.c_helpers[name]) + + # numba_test_list() + self.numba_test_list = wrap( + 'test_list', + ctypes.c_int, + ) + + # numba_list_new(NB_List *l, Py_ssize_t item_size, Py_ssize_t allocated) + self.numba_list_new = wrap( + 'list_new', + ctypes.c_int, + [ctypes.POINTER(list_t), ctypes.c_ssize_t, ctypes.c_ssize_t], + ) + # numba_list_free(NB_List *l) + self.numba_list_free = wrap( + 'list_free', + None, + [list_t], + ) + # numba_list_length(NB_List *l) + self.numba_list_length = wrap( + 'list_length', + ctypes.c_int, + [list_t], + ) + # numba_list_allocated(NB_List *l) + self.numba_list_allocated = wrap( + 'list_allocated', + ctypes.c_int, + [list_t], + ) + # numba_list_is_mutable(NB_List *lp) + self.numba_list_is_mutable = wrap( + 'list_is_mutable', + ctypes.c_int, + [list_t], + ) + # numba_list_set_is_mutable(NB_List *lp, int is_mutable) + self.numba_list_set_is_mutable = wrap( + 'list_set_is_mutable', + None, + [list_t, ctypes.c_int], + ) + # numba_list_setitem(NB_List *l, Py_ssize_t i, const char *item) + self.numba_list_setitem = wrap( + 'list_setitem', + ctypes.c_int, + [list_t, ctypes.c_ssize_t, ctypes.c_char_p], + ) + # numba_list_append(NB_List *l, const char *item) + self.numba_list_append = wrap( + 'list_append', + ctypes.c_int, + [list_t, ctypes.c_char_p], + ) + # numba_list_getitem(NB_List *l, Py_ssize_t i, char *out) + self.numba_list_getitem = wrap( + 'list_getitem', + ctypes.c_int, + [list_t, ctypes.c_ssize_t, ctypes.c_char_p], + ) + # numba_list_delitem(NB_List *l, Py_ssize_t i) + self.numba_list_delitem = wrap( + 'list_delitem', + ctypes.c_int, + [list_t, ctypes.c_ssize_t], + ) + # numba_list_delete_slice(NB_List *l, + # Py_ssize_t start, + # Py_ssize_t stop, + # Py_ssize_t step) + self.numba_list_delete_slice = wrap( + 'list_delete_slice', + ctypes.c_int, + [list_t, ctypes.c_ssize_t, ctypes.c_ssize_t, ctypes.c_ssize_t], + ) + # numba_list_iter_sizeof() + self.numba_list_iter_sizeof = wrap( + 'list_iter_sizeof', + ctypes.c_size_t, + ) + # numba_list_iter(NB_ListIter *it, NB_List *l) + self.numba_list_iter = wrap( + 'list_iter', + None, + [ + iter_t, + list_t, + ], + ) + # numba_list_iter_next(NB_ListIter *it, const char **item_ptr) + self.numba_list_iter_next = wrap( + 'list_iter_next', + ctypes.c_int, + [ + iter_t, # it + ctypes.POINTER(ctypes.c_void_p), # item_ptr + ], + ) + + def test_simple_c_test(self): + # Runs the basic test in C. + ret = self.numba_test_list() + self.assertEqual(ret, 0) + + def test_length(self): + l = List(self, 8, 0) + self.assertEqual(len(l), 0) + + def test_allocation(self): + for i in range(16): + l = List(self, 8, i) + self.assertEqual(len(l), 0) + self.assertEqual(l.allocated, i) + + def test_append_get_string(self): + l = List(self, 8, 1) + l.append(b"abcdefgh") + self.assertEqual(len(l), 1) + r = l[0] + self.assertEqual(r, b"abcdefgh") + + def test_append_get_int(self): + l = List(self, 8, 1) + l.append(struct.pack("q", 1)) + self.assertEqual(len(l), 1) + r = struct.unpack("q", l[0])[0] + self.assertEqual(r, 1) + + def test_append_get_string_realloc(self): + l = List(self, 8, 1) + l.append(b"abcdefgh") + self.assertEqual(len(l), 1) + l.append(b"hijklmno") + self.assertEqual(len(l), 2) + r = l[1] + self.assertEqual(r, b"hijklmno") + + def test_set_item_getitem_index_error(self): + l = List(self, 8, 0) + with self.assertRaises(IndexError): + l[0] + with self.assertRaises(IndexError): + l[0] = b"abcdefgh" + + def test_iter(self): + l = List(self, 1, 0) + values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + for i in values: + l.append(i) + received = [] + for j in l: + received.append(j) + self.assertEqual(values, received) + + def test_pop(self): + l = List(self, 1, 0) + values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + for i in values: + l.append(i) + self.assertEqual(len(l), 8) + + received = l.pop() + self.assertEqual(b'h', received) + self.assertEqual(len(l), 7) + received = [j for j in l] + self.assertEqual(received, values[:-1]) + + received = l.pop(0) + self.assertEqual(b'a', received) + self.assertEqual(len(l), 6) + + received = l.pop(2) + self.assertEqual(b'd', received) + self.assertEqual(len(l), 5) + + expected = [b'b', b'c', b'e', b'f', b'g'] + received = [j for j in l] + self.assertEqual(received, expected) + + def test_pop_index_error(self): + l = List(self, 8, 0) + with self.assertRaises(IndexError): + l.pop() + + def test_pop_byte(self): + l = List(self, 4, 0) + values = [b'aaaa', b'bbbb', b'cccc', b'dddd', + b'eeee', b'ffff', b'gggg', b'hhhhh'] + for i in values: + l.append(i) + self.assertEqual(len(l), 8) + + received = l.pop() + self.assertEqual(b'hhhh', received) + self.assertEqual(len(l), 7) + received = [j for j in l] + self.assertEqual(received, values[:-1]) + + received = l.pop(0) + self.assertEqual(b'aaaa', received) + self.assertEqual(len(l), 6) + + received = l.pop(2) + self.assertEqual(b'dddd', received) + self.assertEqual(len(l), 5) + + expected = [b'bbbb', b'cccc', b'eeee', b'ffff', b'gggg'] + received = [j for j in l] + self.assertEqual(received, expected) + + def test_delitem(self): + l = List(self, 1, 0) + values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + for i in values: + l.append(i) + self.assertEqual(len(l), 8) + + # delete first item + del l[0] + self.assertEqual(len(l), 7) + self.assertEqual(list(l), values[1:]) + # delete last item + del l[-1] + self.assertEqual(len(l), 6) + self.assertEqual(list(l), values[1:-1]) + # delete item from middle + del l[2] + self.assertEqual(len(l), 5) + self.assertEqual(list(l), [b'b', b'c', b'e', b'f', b'g']) + + def test_delete_slice(self): + l = List(self, 1, 0) + values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + for i in values: + l.append(i) + self.assertEqual(len(l), 8) + + # delete every second item + # no slice default normalization here, be explicit about start anb stop + del l[0:8:2] + self.assertEqual(len(l), 4) + self.assertEqual(list(l), values[1:8:2]) + + # delete first item + del l[0:1:1] + self.assertEqual(len(l), 3) + self.assertEqual(list(l), [b'd', b'f', b'h']) + + # delete last item + del l[2:3:1] + self.assertEqual(len(l), 2) + self.assertEqual(list(l), [b'd', b'f']) + + # delete all left items + del l[0:2:1] + self.assertEqual(len(l), 0) + self.assertEqual(list(l), []) + + def check_sizing(self, item_size, nmax): + # Helper to verify different item_sizes + l = List(self, item_size, 0) + + def make_item(v): + tmp = "{:0{}}".format(nmax - v - 1, item_size).encode("latin-1") + return tmp[:item_size] + + for i in range(nmax): + l.append(make_item(i)) + + self.assertEqual(len(l), nmax) + + for i in range(nmax): + self.assertEqual(l[i], make_item(i)) + + def test_sizing(self): + # Check different sizes of the key & value. + for i in range(1, 16): + self.check_sizing(item_size=i, nmax=2**i) + + def test_mutability(self): + # setup and populate a singleton + l = List(self, 8, 1) + one = struct.pack("q", 1) + l.append(one) + self.assertTrue(l.is_mutable) + self.assertEqual(len(l), 1) + r = struct.unpack("q", l[0])[0] + self.assertEqual(r, 1) + + # set to immutable and test guards + l.set_immutable() + self.assertFalse(l.is_mutable) + # append + with self.assertRaises(ValueError) as raises: + l.append(one) + self.assertIn("list is immutable", str(raises.exception)) + # setitem + with self.assertRaises(ValueError) as raises: + l[0] = one + self.assertIn("list is immutable", str(raises.exception)) + # pop + with self.assertRaises(ValueError) as raises: + l.pop() + self.assertIn("list is immutable", str(raises.exception)) + # delitem with index + with self.assertRaises(ValueError) as raises: + del l[0] + self.assertIn("list is immutable", str(raises.exception)) + # delitem with slice + with self.assertRaises(ValueError) as raises: + del l[0:1:1] + self.assertIn("list is immutable", str(raises.exception)) + l.set_mutable() + + # check that nothing has changed + self.assertTrue(l.is_mutable) + self.assertEqual(len(l), 1) + r = struct.unpack("q", l[0])[0] + self.assertEqual(r, 1) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_listobject.py b/venv/lib/python3.10/site-packages/numba/tests/test_listobject.py new file mode 100644 index 0000000000000000000000000000000000000000..1d3d8b3aa6c984f9c234764e0b810e3c1c98b9b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_listobject.py @@ -0,0 +1,1652 @@ +""" Tests for the compiler components of the Numba typed-list. + +The tests here should exercise everything within an `@njit` context. +Importantly, the tests should not return a typed list from within such a +context as this would require code from numba/typed/typedlist.py (this is +tested separately). Tests in this file build on each other in the order of +writing. For example, the first test, tests the creation, append and len of the +list. These are the barebones to do anything useful with a list. The subsequent +test for getitem assumes makes use of these three operations and therefore +assumes that they work. + +""" + +from textwrap import dedent + +from numba import njit +from numba import int32 +from numba.extending import register_jitable +from numba.core import types +from numba.core.errors import TypingError +from numba.tests.support import (TestCase, MemoryLeakMixin, override_config, + forbid_codegen) +from numba.typed import listobject, List + + +class TestCreateAppendLength(MemoryLeakMixin, TestCase): + """Test list creation, append and len. """ + + def test_list_create(self): + @njit + def foo(n): + l = listobject.new_list(int32) + for i in range(n): + l.append(i) + return len(l) + + for i in (0, 1, 2, 100): + self.assertEqual(foo(i), i) + + def test_list_create_no_jit(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + l = listobject.new_list(int32) + self.assertEqual(type(l), list) + + def test_nonempty_list_create_no_jit(self): + # See Issue #6001: https://github.com/numba/numba/issues/6001 + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + l = List([1, 2, 3]) + self.assertEqual(type(l), list) + self.assertEqual(l, [1, 2, 3]) + + +class TestBool(MemoryLeakMixin, TestCase): + """Test list bool.""" + + def test_list_bool(self): + @njit + def foo(n): + l = listobject.new_list(int32) + for i in range(n): + l.append(i) + return bool(l) + + for i in (0, 1, 2, 100): + self.assertEqual(foo(i), i > 0) + + +class TestAllocation(MemoryLeakMixin, TestCase): + + def test_list_allocation(self): + @njit + def foo_kwarg(n): + l = listobject.new_list(int32, allocated=n) + return l._allocated() + + for i in range(16): + self.assertEqual(foo_kwarg(i), i) + + @njit + def foo_posarg(n): + l = listobject.new_list(int32, n) + return l._allocated() + for i in range(16): + self.assertEqual(foo_posarg(i), i) + + def test_list_allocation_negative(self): + @njit + def foo(): + l = listobject.new_list(int32, -1) + return l._allocated() + + with self.assertRaises(RuntimeError) as raises: + self.assertEqual(foo(), -1) + self.assertIn( + "expecting *allocated* to be >= 0", + str(raises.exception), + ) + + +class TestToFromMeminfo(MemoryLeakMixin, TestCase): + + def test_list_to_from_meminfo(self): + """ + Exercise listobject.{_as_meminfo, _from_meminfo} + """ + + @njit + def boxer(): + l = listobject.new_list(int32) + for i in range(10, 20): + l.append(i) + return listobject._as_meminfo(l) + + lsttype = types.ListType(int32) + + @njit + def unboxer(mi): + l = listobject._from_meminfo(mi, lsttype) + return l[0], l[1], l[2], l[3], l[4], l[5], l[6], l[7], l[8], l[9] + + mi = boxer() + self.assertEqual(mi.refcount, 1) + + received = list(unboxer(mi)) + expected = list(range(10, 20)) + self.assertEqual(received, expected) + + +class TestGetitem(MemoryLeakMixin, TestCase): + """Test list getitem. """ + + def test_list_getitem_singleton(self): + @njit + def foo(n): + l = listobject.new_list(int32) + l.append(n) + return l[0] + + self.assertEqual(foo(0), 0) + + def test_list_getitem_singleton_negtive_index(self): + @njit + def foo(n): + l = listobject.new_list(int32) + l.append(n) + return l[-1] + + self.assertEqual(foo(0), 0) + + def test_list_getitem_multiple(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return l[i] + + for i,j in ((0, 10), (9, 19), (4, 14), (-5, 15), (-1, 19), (-10, 10)): + self.assertEqual(foo(i), j) + + def test_list_getitem_empty_index_error(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + return l[i] + + for i in (1, 0, -1): + with self.assertRaises(IndexError) as raises: + foo(i) + self.assertIn( + "list index out of range", + str(raises.exception), + ) + + def test_list_getitem_multiple_index_error(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return l[i] + + for i in (10, -11): + with self.assertRaises(IndexError) as raises: + foo(i) + self.assertIn( + "list index out of range", + str(raises.exception), + ) + + def test_list_getitem_empty_typing_error(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + return l[i] + + for i in "xyz", 1.0, 1j: + with self.assertRaises(TypingError) as raises: + foo(i) + self.assertIn( + "list indices must be integers or slices", + str(raises.exception), + ) + + def test_list_getitem_integer_types_as_index(self): + + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + return l[i] + + # try all signed integers and make sure they are cast + for t in (types.signed_domain + ): + self.assertEqual(foo((t(0))), 0) + + def test_list_getitem_different_sized_uint_index(self): + # Checks that the index type cast and ext/trunc to the + # type of the length is correct, both wraparound and + # direct index is tested via -1/0. + + for ty in types.unsigned_domain: + @njit + def foo(): + l = listobject.new_list(int32) + l.append(7) + return l[ty(0)] + + self.assertEqual(foo(), 7) + + def test_list_getitem_different_sized_int_index(self): + # Checks that the index type cast and ext/trunc to the + # type of the length is correct, both wraparound and + # direct index is tested via -1/0. + + for ty in types.signed_domain: + @njit + def foo(): + l = listobject.new_list(int32) + l.append(7) + return l[ty(0)], l[ty(-1)] + + self.assertEqual(foo(), (7, 7)) + + +class TestGetitemSlice(MemoryLeakMixin, TestCase): + """Test list getitem when indexing with slices. """ + + def test_list_getitem_empty_slice_defaults(self): + @njit + def foo(): + l = listobject.new_list(int32) + n = l[:] + return len(n) + + self.assertEqual(foo(), 0) + + def test_list_getitem_singleton_slice_defaults(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + n = l[:] + return len(n) + + self.assertEqual(foo(), 1) + + def test_list_getitem_multiple_slice_defaults(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[:] + return n[i] + + for i,j in ((0, 10), (9, 19), (4, 14), (-5, 15), (-1, 19), (-10, 10)): + self.assertEqual(foo(i), j) + + def test_list_getitem_multiple_slice_pos_start(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[5:] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (15, 16, 17, 18, 19)) + + def test_list_getitem_multiple_slice_pos_stop(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[:5] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (10, 11, 12, 13, 14)) + + def test_list_getitem_multiple_slice_pos_start_pos_stop(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[2:7] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (12, 13, 14, 15, 16)) + + def test_list_getitem_multiple_slice_pos_start_pos_stop_pos_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[1:9:2] + return len(n), (n[0], n[1], n[2], n[3]) + + length, items = foo() + self.assertEqual(length, 4) + self.assertEqual(items, (11, 13, 15, 17)) + + def test_list_getitem_multiple_slice_neg_start(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[-5:] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (15, 16, 17, 18, 19)) + + def test_list_getitem_multiple_slice_neg_stop(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[:-5] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (10, 11, 12, 13, 14)) + + def test_list_getitem_multiple_slice_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[::-2] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (19, 17, 15, 13, 11)) + + def test_list_getitem_multiple_slice_pos_start_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[4::-1] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (14, 13, 12, 11, 10)) + + def test_list_getitem_multiple_slice_neg_start_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[-6::-1] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (14, 13, 12, 11, 10)) + + def test_list_getitem_multiple_slice_pos_stop_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[:4:-1] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (19, 18, 17, 16, 15)) + + def test_list_getitem_multiple_slice_neg_stop_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[:-6:-1] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (19, 18, 17, 16, 15)) + + def test_list_getitem_multiple_slice_pos_start_pos_stop_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[8:3:-1] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (18, 17, 16, 15, 14)) + + def test_list_getitem_multiple_slice_neg_start_neg_stop_neg_step(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[-2:-7:-1] + return len(n), (n[0], n[1], n[2], n[3], n[4]) + + length, items = foo() + self.assertEqual(length, 5) + self.assertEqual(items, (18, 17, 16, 15, 14)) + + def test_list_getitem_multiple_slice_start_out_of_range(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[10:] + return len(n) + + self.assertEqual(foo(), 0) + + def test_list_getitem_multiple_slice_stop_zero(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + n = l[:0] + return len(n) + + self.assertEqual(foo(), 0) + + def test_list_getitem_multiple_slice_zero_step_index_error(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + l[::0] + + with self.assertRaises(ValueError) as raises: + foo() + self.assertIn( + "slice step cannot be zero", + str(raises.exception), + ) + + +class TestSetitem(MemoryLeakMixin, TestCase): + """Test list setitem. """ + + def test_list_setitem_singleton(self): + @njit + def foo(n): + l = listobject.new_list(int32) + l.append(0) + l[0] = n + return l[0] + + for i in (0, 1, 2, 100): + self.assertEqual(foo(i), i) + + def test_list_setitem_singleton_negative_index(self): + @njit + def foo(n): + l = listobject.new_list(int32) + l.append(0) + l[0] = n + return l[-1] + + for i in (0, 1, 2, 100): + self.assertEqual(foo(i), i) + + def test_list_setitem_singleton_index_error(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + l[i] = 1 + + with self.assertRaises(IndexError): + foo(1) + + with self.assertRaises(IndexError): + foo(-2) + + def test_list_setitem_multiple(self): + + @njit + def foo(i, n): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + l[i] = n + return l[i] + + for i,n in zip(range(0,10), range(20,30)): + self.assertEqual(foo(i, n), n) + + def test_list_setitem_multiple_index_error(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + l[i] = 0 + + with self.assertRaises(IndexError): + foo(10) + + with self.assertRaises(IndexError): + foo(-11) + + def test_list_setitem_singleton_typing_error_on_index(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + # slice with a non-{integer,slice} + l[i] = 1 + + for i in "xyz", 1.0, 1j: + with self.assertRaises(TypingError) as raises: + foo(i) + self.assertIn( + "list indices must be integers or slices", + str(raises.exception), + ) + + def test_list_setitem_singleton_typing_error_on_item(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + # assign a non-iterable to a slice + l[:] = 1 + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "can only assign an iterable when using a slice " + "with assignment/setitem", + str(raises.exception), + ) + + def test_list_setitem_integer_types_as_index(self): + + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + l[i] = 1 + return l[i] + + # try all signed integers and make sure they are cast + for t in (types.signed_domain + ): + self.assertEqual(foo((t(0))), 1) + + +class TestPop(MemoryLeakMixin, TestCase): + """Test list pop. """ + + def test_list_pop_singleton(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + return l.pop(), len(l) + + self.assertEqual(foo(), (0, 0)) + + def test_list_pop_singleton_index(self): + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + return l.pop(i), len(l) + + self.assertEqual(foo(0), (0, 0)) + self.assertEqual(foo(-1), (0, 0)) + + def test_list_pop_multiple(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in (10, 11, 12): + l.append(j) + return l.pop(), len(l) + + self.assertEqual(foo(), (12, 2)) + + def test_list_pop_multiple_index(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in (10, 11, 12): + l.append(j) + return l.pop(i), len(l) + + for i, n in ((0, 10), (1, 11), (2, 12)): + self.assertEqual(foo(i), (n, 2)) + + for i, n in ((-3, 10), (-2, 11), (-1, 12)): + self.assertEqual(foo(i), (n, 2)) + + def test_list_pop_integer_types_as_index(self): + + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + return l.pop(i) + + # try all signed integers and make sure they are cast + for t in (types.signed_domain + ): + self.assertEqual(foo((t(0))), 0) + + def test_list_pop_empty_index_error_no_index(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.pop() + + with self.assertRaises(IndexError) as raises: + foo() + self.assertIn( + "pop from empty list", + str(raises.exception), + ) + + def test_list_pop_empty_index_error_with_index(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + l.pop(i) + + with self.assertRaises(IndexError) as raises: + foo(-1) + self.assertIn( + "pop from empty list", + str(raises.exception), + ) + + with self.assertRaises(IndexError) as raises: + foo(0) + self.assertIn( + "pop from empty list", + str(raises.exception), + ) + + with self.assertRaises(IndexError) as raises: + foo(1) + self.assertIn( + "pop from empty list", + str(raises.exception), + ) + + def test_list_pop_mutiple_index_error_with_index(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + for j in (10, 11, 12): + l.append(j) + l.pop(i) + + with self.assertRaises(IndexError) as raises: + foo(-4) + self.assertIn( + "list index out of range", + str(raises.exception), + ) + + with self.assertRaises(IndexError) as raises: + foo(3) + self.assertIn( + "list index out of range", + str(raises.exception), + ) + + def test_list_pop_singleton_typing_error_on_index(self): + self.disable_leak_check() + + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + # slice with a non-{integer,slice} + return l.pop(i) + + for i in "xyz", 1.0, 1j: + with self.assertRaises(TypingError) as raises: + foo(i) + self.assertIn( + "argument for pop must be an integer", + str(raises.exception), + ) + + +class TestListObjectDelitem(MemoryLeakMixin, TestCase): + """Test list delitem. + """ + + def test_list_singleton_delitem_index(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[0] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_singleton_delitem_slice_defaults(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[:] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_singleton_delitem_slice_start(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[0:] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_singleton_delitem_slice_stop(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[:1] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_singleton_delitem_slice_start_stop(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[0:1] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_singleton_delitem_slice_start_step(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[0::1] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_singleton_delitem_slice_start_stop_step(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + del l[0:1:1] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_multiple_delitem(self): + + @njit + def foo(): + l = listobject.new_list(int32) + for j in (10, 11, 12): + l.append(j) + del l[0] + return len(l), l[0], l[1] + self.assertEqual(foo(), (2, 11, 12)) + + def test_list_multiple_delitem_slice(self): + + @njit + def foo(): + l = listobject.new_list(int32) + for j in (10, 11, 12): + l.append(j) + del l[:] + return len(l) + self.assertEqual(foo(), 0) + + def test_list_multiple_delitem_off_by_one(self): + # this was exposing a nasty off-by-one error, leaving it in to detect + # and regressions + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + k = listobject.new_list(int32) + for j in range(10, 20): + k.append(j) + # should be a no-op + del l[-9:-20] + return k == l + self.assertTrue(foo()) + + +class TestContains(MemoryLeakMixin, TestCase): + """Test list contains. """ + + def test_list_contains_empty(self): + @njit + def foo(i): + l = listobject.new_list(int32) + return i in l + + self.assertFalse(foo(0)) + self.assertFalse(foo(1)) + + def test_list_contains_singleton(self): + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + return i in l + + self.assertTrue(foo(0)) + self.assertFalse(foo(1)) + + def test_list_contains_multiple(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return i in l + + for i in range(10, 20): + self.assertTrue(foo(i)) + + for i in range(20, 30): + self.assertFalse(foo(i)) + + +class TestCount(MemoryLeakMixin, TestCase): + """Test list count. """ + + def test_list_count_empty(self): + @njit + def foo(i): + l = listobject.new_list(int32) + return l.count(i) + + self.assertEqual(foo(10), 0) + + def test_list_count_singleton(self): + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(10) + return l.count(i) + + self.assertEqual(foo(1), 0) + self.assertEqual(foo(10), 1) + + def test_list_count_mutiple(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in [11, 12, 12, 13, 13, 13]: + l.append(j) + return l.count(i) + + self.assertEqual(foo(10), 0) + self.assertEqual(foo(11), 1) + self.assertEqual(foo(12), 2) + self.assertEqual(foo(13), 3) + + +class TestExtend(MemoryLeakMixin, TestCase): + """Test list extend. """ + + def test_list_extend_empty(self): + @njit + def foo(items): + l = listobject.new_list(int32) + l.extend(items) + return len(l) + + self.assertEqual(foo((1,)), 1) + self.assertEqual(foo((1,2)), 2) + self.assertEqual(foo((1,2,3)), 3) + + def test_list_extend_typing_error_non_iterable(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.extend(1) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "extend argument must be iterable", + str(raises.exception), + ) + + +class TestInsert(MemoryLeakMixin, TestCase): + """Test list insert. """ + + def test_list_insert_empty(self): + @njit + def foo(i): + l = listobject.new_list(int32) + l.insert(i, 1) + return len(l), l[0] + + for i in (-10, -5, -1, 0, 1, 4, 9): + self.assertEqual(foo(i), (1, 1)) + + def test_list_insert_singleton(self): + @njit + def foo(i): + l = listobject.new_list(int32) + l.append(0) + l.insert(i, 1) + return len(l), l[0], l[1] + + # insert before + for i in (-10, -3, -2, -1, 0): + self.assertEqual(foo(i), (2, 1, 0)) + + # insert after + for i in (1, 2, 3, 10): + self.assertEqual(foo(i), (2, 0, 1)) + + def test_list_insert_multiple(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10): + l.append(0) + l.insert(i, 1) + return len(l), l[i] + + for i in (0, 4, 9): + self.assertEqual(foo(i), (11, 1)) + + def test_list_insert_multiple_before(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10): + l.append(0) + l.insert(i, 1) + return len(l), l[0] + + for i in (-12, -11, -10, 0): + self.assertEqual(foo(i), (11, 1)) + + def test_list_insert_multiple_after(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10): + l.append(0) + l.insert(i, 1) + return len(l), l[10] + + for i in (10, 11, 12): + self.assertEqual(foo(i), (11, 1)) + + def test_list_insert_typing_error(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.insert("a", 0) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "list insert indices must be integers", + str(raises.exception), + ) + + +class TestRemove(MemoryLeakMixin, TestCase): + """Test list remove. """ + + def test_list_remove_empty(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.remove(0) + + with self.assertRaises(ValueError): + foo() + + def test_list_remove_singleton(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + l.remove(0) + return len(l) + + self.assertEqual(foo(), 0) + + def test_list_remove_singleton_value_error(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(1) + l.remove(0) + + with self.assertRaises(ValueError): + foo() + + def test_list_remove_multiple(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + l.remove(13) + l.remove(19) + return len(l) + + self.assertEqual(foo(), 8) + + def test_list_remove_multiple_value_error(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + l.remove(23) + + with self.assertRaises(ValueError): + foo() + + +class TestClear(MemoryLeakMixin, TestCase): + """Test list clear. """ + + def test_list_clear_empty(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.clear() + return len(l) + + self.assertEqual(foo(), 0) + + def test_list_clear_singleton(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + l.clear() + return len(l) + + self.assertEqual(foo(), 0) + + def test_list_clear_multiple(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10): + l.append(0) + l.clear() + return len(l) + self.assertEqual(foo(), 0) + + +class TestReverse(MemoryLeakMixin, TestCase): + """Test list reverse. """ + + def test_list_reverse_empty(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.reverse() + return len(l) + + self.assertEqual(foo(), 0) + + def test_list_reverse_singleton(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + l.reverse() + return len(l), l[0] + + self.assertEqual(foo(), (1, 0)) + + def test_list_reverse_multiple(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 13): + l.append(j) + l.reverse() + return len(l), l[0], l[1], l[2] + self.assertEqual(foo(), (3, 12, 11, 10)) + + +class TestCopy(MemoryLeakMixin, TestCase): + """Test list copy. """ + + def test_list_copy_empty(self): + @njit + def foo(): + l = listobject.new_list(int32) + n = l.copy() + return len(l), len(n) + + self.assertEqual(foo(), (0, 0)) + + def test_list_copy_singleton(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + n = l.copy() + return len(l), len(n), l[0], n[0] + + self.assertEqual(foo(), (1, 1, 0, 0)) + + def test_list_copy_multiple(self): + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 13): + l.append(j) + n = l.copy() + return len(l), len(n), l[0], l[1], l[2], l[0], l[1], l[2] + + self.assertEqual(foo(), (3, 3, 10, 11, 12, 10, 11, 12)) + + +class TestIndex(MemoryLeakMixin, TestCase): + + def test_index_singleton(self): + @njit + def foo(): + l = listobject.new_list(int32) + l.append(1) + return l.index(1) + + self.assertEqual(foo(), 0) + + def test_index_multiple(self): + @njit + def foo(i): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return l.index(i) + + for i,v in zip(range(10), range(10,20)): + self.assertEqual(foo(v), i) + + def test_index_duplicate(self): + @njit + def foo(): + l = listobject.new_list(int32) + for _ in range(10, 20): + l.append(1) + return l.index(1) + + self.assertEqual(foo(), 0) + + def test_index_duplicate_with_start(self): + @njit + def foo(start): + l = listobject.new_list(int32) + for _ in range(10, 20): + l.append(1) + return l.index(1, start) + + for i in range(10): + self.assertEqual(foo(i), i) + + def test_index_singleton_value_error(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + return l.index(1) + + with self.assertRaises(ValueError) as raises: + foo() + self.assertIn( + "item not in list", + str(raises.exception), + ) + + def test_index_multiple_value_error(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return l.index(23) + + with self.assertRaises(ValueError) as raises: + foo() + self.assertIn( + "item not in list", + str(raises.exception), + ) + + def test_index_multiple_value_error_start(self): + self.disable_leak_check() + + @njit + def foo(start): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return l.index(10, start) + + self.assertEqual(foo(0), 0) + for i in range(1,10): + with self.assertRaises(ValueError) as raises: + foo(i) + self.assertIn( + "item not in list", + str(raises.exception), + ) + + def test_index_multiple_value_error_end(self): + self.disable_leak_check() + + @njit + def foo(end): + l = listobject.new_list(int32) + for j in range(10, 20): + l.append(j) + return l.index(19, 0, end) + + self.assertEqual(foo(10), 9) + for i in range(0,9): + with self.assertRaises(ValueError) as raises: + foo(i) + self.assertIn( + "item not in list", + str(raises.exception), + ) + + def test_index_typing_error_start(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + return l.index(0, start="a") + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "start argument for index must be an integer", + str(raises.exception), + ) + + def test_index_typing_error_end(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.append(0) + return l.index(0, end="a") + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "end argument for index must be an integer", + str(raises.exception), + ) + + +class TestEqualNotEqual(MemoryLeakMixin, TestCase): + """Test list equal and not equal. """ + + def test_list_empty_equal(self): + @njit + def foo(): + t = listobject.new_list(int32) + o = listobject.new_list(int32) + return t == o, t != o + + self.assertEqual(foo(), (True, False)) + + def test_list_singleton_equal(self): + @njit + def foo(): + t = listobject.new_list(int32) + t.append(0) + o = listobject.new_list(int32) + o.append(0) + return t == o, t != o + + self.assertEqual(foo(), (True, False)) + + def test_list_singleton_not_equal(self): + @njit + def foo(): + t = listobject.new_list(int32) + t.append(0) + o = listobject.new_list(int32) + o.append(1) + return t == o, t != o + + self.assertEqual(foo(), (False, True)) + + def test_list_length_mismatch(self): + @njit + def foo(): + t = listobject.new_list(int32) + t.append(0) + o = listobject.new_list(int32) + return t == o, t != o + + self.assertEqual(foo(), (False, True)) + + def test_list_multiple_equal(self): + @njit + def foo(): + t = listobject.new_list(int32) + o = listobject.new_list(int32) + for i in range(10): + t.append(i) + o.append(i) + return t == o, t != o + + self.assertEqual(foo(), (True, False)) + + def test_list_multiple_not_equal(self): + @njit + def foo(): + t = listobject.new_list(int32) + o = listobject.new_list(int32) + for i in range(10): + t.append(i) + o.append(i) + o[-1] = 42 + return t == o, t != o + + self.assertEqual(foo(), (False, True)) + + +class TestIter(MemoryLeakMixin, TestCase): + """Test list iter. """ + + def test_list_iter(self): + @njit + def foo(items): + l = listobject.new_list(int32) + l.extend(items) + # use a simple sum to check this w/o having to return a list + r = 0 + for j in l: + r += j + return r + + items = (1, 2, 3, 4) + + self.assertEqual( + foo(items), + sum(items) + ) + + def test_list_iter_self_mutation(self): + self.disable_leak_check() + + @njit + def foo(): + l = listobject.new_list(int32) + l.extend((1, 2, 3, 4)) + for i in l: + l.append(i) + + with self.assertRaises(RuntimeError) as raises: + foo() + self.assertIn( + 'list was mutated during iteration'.format(**locals()), + str(raises.exception), + ) + + +class TestStringItem(MemoryLeakMixin, TestCase): + """Test list can take strings as items. """ + + def test_string_item(self): + @njit + def foo(): + l = listobject.new_list(types.unicode_type) + l.append('a') + l.append('b') + l.append('c') + l.append('d') + return l[0], l[1], l[2], l[3] + + items = foo() + self.assertEqual(['a', 'b', 'c', 'd'], list(items)) + + +class TestItemCasting(TestCase): + + @njit + def foo(fromty, toty): + l = listobject.new_list(toty) + l.append(fromty(0)) + + def check_good(self, fromty, toty): + TestItemCasting.foo(fromty, toty) + + def check_bad(self, fromty, toty): + with self.assertRaises(TypingError) as raises: + TestItemCasting.foo(fromty, toty) + self.assertIn( + 'cannot safely cast {fromty} to {toty}'.format(**locals()), + str(raises.exception), + ) + + def test_cast_int_to(self): + self.check_good(types.int32, types.float32) + self.check_good(types.int32, types.float64) + self.check_good(types.int32, types.complex128) + self.check_good(types.int64, types.complex128) + self.check_bad(types.int32, types.complex64) + self.check_good(types.int8, types.complex64) + + def test_cast_float_to(self): + self.check_good(types.float32, types.float64) + self.check_good(types.float32, types.complex64) + self.check_good(types.float64, types.complex128) + + def test_cast_bool_to(self): + self.check_good(types.boolean, types.int32) + self.check_good(types.boolean, types.float64) + self.check_good(types.boolean, types.complex128) + + def test_cast_fail_unicode_int(self): + + @njit + def foo(): + l = listobject.new_list(int32) + l.append("xyz") + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + 'cannot safely cast unicode_type to int32', + str(raises.exception), + ) + + def test_cast_fail_int_unicode(self): + + @njit + def foo(): + l = listobject.new_list(types.unicode_type) + l.append(int32(0)) + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + 'Cannot cast int32 to unicode_type', + str(raises.exception), + ) + + +@register_jitable +def make_test_list(): + l = listobject.new_list(int32) + l.append(int32(1)) + return l + + +class TestImmutable(MemoryLeakMixin, TestCase): + + def test_is_immutable(self): + @njit + def foo(): + l = make_test_list() + return l._is_mutable() + self.assertTrue(foo()) + + def test_make_immutable_is_immutable(self): + @njit + def foo(): + l = make_test_list() + l._make_immutable() + return l._is_mutable() + self.assertFalse(foo()) + + def test_length_still_works_when_immutable(self): + @njit + def foo(): + l = make_test_list() + l._make_immutable() + return len(l),l._is_mutable() + length, mutable = foo() + self.assertEqual(length, 1) + self.assertFalse(mutable) + + def test_getitem_still_works_when_immutable(self): + @njit + def foo(): + l = make_test_list() + l._make_immutable() + return l[0], l._is_mutable() + test_item, mutable = foo() + self.assertEqual(test_item, 1) + self.assertFalse(mutable) + + def test_append_fails(self): + self.disable_leak_check() + + @njit + def foo(): + l = make_test_list() + l._make_immutable() + l.append(int32(1)) + with self.assertRaises(ValueError) as raises: + foo() + self.assertIn( + 'list is immutable', + str(raises.exception), + ) + + def test_mutation_fails(self): + """ Test that any attempt to mutate an immutable typed list fails. """ + self.disable_leak_check() + + def generate_function(line): + context = {} + exec(dedent(""" + from numba.typed import listobject + from numba import int32 + def bar(): + lst = listobject.new_list(int32) + lst.append(int32(1)) + lst._make_immutable() + zero = int32(0) + {} + """.format(line)), context) + return njit(context["bar"]) + for line in ("lst.append(zero)", + "lst[0] = zero", + "lst.pop()", + "del lst[0]", + "lst.extend((zero,))", + "lst.insert(0, zero)", + "lst.clear()", + "lst.reverse()", + "lst.sort()", + ): + foo = generate_function(line) + with self.assertRaises(ValueError) as raises: + foo() + self.assertIn( + "list is immutable", + str(raises.exception), + ) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_lists.py b/venv/lib/python3.10/site-packages/numba/tests/test_lists.py new file mode 100644 index 0000000000000000000000000000000000000000..33eade4617cf139bdb6367af36de0206cfb338e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_lists.py @@ -0,0 +1,1836 @@ +from collections import namedtuple +import contextlib +import itertools +import math +import sys +import ctypes as ct +import numpy as np + +from numba import jit, typeof, njit, literal_unroll, literally +import unittest +from numba.core import types, errors +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.experimental import jitclass +from numba.core.extending import overload + + +Point = namedtuple('Point', ('a', 'b')) + + +def noop(x): + pass + +def unbox_usecase(x): + """ + Expect a list of numbers + """ + res = 0 + for v in x: + res += v + return res + +def unbox_usecase2(x): + """ + Expect a list of tuples + """ + res = 0 + for v in x: + res += len(v) + return res + +def unbox_usecase3(x): + """ + Expect a (number, list of numbers) tuple. + """ + a, b = x + res = a + for v in b: + res += v + return res + +def unbox_usecase4(x): + """ + Expect a (number, list of tuples) tuple. + """ + a, b = x + res = a + for v in b: + res += len(v) + return res + + +def create_list(x, y, z): + return [x, y, z] + +def create_nested_list(x, y, z, a, b, c): + return [[x, y, z], [a, b, c]] + +def list_comprehension1(): + return sum([x**2 for x in range(10)]) + +def list_comprehension2(): + return sum([x for x in range(10) if x % 2 == 0]) + +def list_comprehension3(): + return sum([math.pow(x, 2) for x in range(10)]) + +def list_comprehension4(): + return sum([x * y for x in range(10) for y in range(10)]) + +def list_comprehension5(): + return [x * 2 for x in range(10)] + +def list_comprehension6(): + return [[x for x in range(y)] for y in range(3)] + + +def list_constructor(n): + return list(range(n)) + +def list_constructor_empty(): + # cannot be typed, list is empty and no typing information is present to + # infer a type + return list() + +def list_constructor_empty_but_typeable(n): + # can be typed, list is empty but later append has typing info that allows + # for inference + y = list() + return y.append(n) + +def list_append(n): + l = [] + l.append(42) + for i in range(n): + l.append(i) + return l + +def list_append_heterogeneous(n): + l = [] + l.append(42.0) + for i in range(n): + l.append(i) + return l + +def list_extend(n): + l = [] + # A non-list iterable and a list + l.extend(range(n)) + l.extend(l[:-1]) + l.extend(range(n, 0, -1)) + return l + +def list_extend_heterogeneous(n): + l = [] + # Extend with various iterables, including lists, with different types + l.extend(range(n)) + l.extend(l[:-1]) + l.extend((5, 42)) + l.extend([123.0]) + return l + +def list_pop0(n): + l = list(range(n)) + res = 0 + while len(l) > 0: + res += len(l) * l.pop() + return res + +def list_pop1(n, i): + l = list(range(n)) + x = l.pop(i) + return x, l + +def list_len(n): + l = list(range(n)) + return len(l) + +def list_getitem(n): + l = list(range(n)) + res = 0 + # Positive indices + for i in range(len(l)): + res += i * l[i] + # Negative indices + for i in range(-len(l), 0): + res -= i * l[i] + return res + +def list_setitem(n): + l = list(range(n)) + res = 0 + # Positive indices + for i in range(len(l)): + l[i] = i * l[i] + # Negative indices + for i in range(-len(l), 0): + l[i] = i * l[i] + for i in range(len(l)): + res += l[i] + return res + +def list_getslice2(n, start, stop): + l = list(range(n)) + return l[start:stop] + +def list_getslice3(n, start, stop, step): + l = list(range(n)) + return l[start:stop:step] + +def list_setslice2(n, n_source, start, stop): + # Generic setslice with size change + l = list(range(n)) + v = list(range(100, 100 + n_source)) + l[start:stop] = v + return l + +def list_setslice3(n, start, stop, step): + l = list(range(n)) + v = l[start:stop:step] + for i in range(len(v)): + v[i] += 100 + l[start:stop:step] = v + return l + +def list_setslice3_arbitrary(n, n_src, start, stop, step): + l = list(range(n)) + l[start:stop:step] = list(range(100, 100 + n_src)) + return l + +def list_delslice0(n): + l = list(range(n)) + del l[:] + return l + +def list_delslice1(n, start, stop): + l = list(range(n)) + del l[start:] + del l[:stop] + return l + +def list_delslice2(n, start, stop): + l = list(range(n)) + del l[start:stop] + return l + +def list_clear(n): + l = list(range(n)) + l.clear() + return l + +def list_copy(n): + l = list(range(n)) + ll = l.copy() + l.append(42) + return l, ll + +def list_iteration(n): + l = list(range(n)) + res = 0 + for i, v in enumerate(l): + res += i * v + return res + +def list_contains(n): + l = list(range(n)) + return (0 in l, 1 in l, n - 1 in l, n in l, + 0 not in l, 1 not in l, n - 1 not in l, n not in l, + ) + +def list_index1(n, v): + l = list(range(n, 0, -1)) + return l.index(v) + +def list_index2(n, v, start): + l = list(range(n, 0, -1)) + return l.index(v, start) + +def list_index3(n, v, start, stop): + l = list(range(n, 0, -1)) + return l.index(v, start, stop) + +def list_remove(n, v): + l = list(range(n - 1, -1, -1)) + l.remove(v) + return l + +def list_insert(n, pos, v): + l = list(range(0, n)) + l.insert(pos, v) + return l + +def list_count(n, v): + l = [] + for x in range(n): + l.append(x & 3) + return l.count(v) + +def list_reverse(n): + l = list(range(n)) + l.reverse() + return l + +def list_add(m, n): + a = list(range(0, m)) + b = list(range(100, 100 + n)) + res = a + b + res.append(42) # check result is a copy + return a, b, res + +def list_add_heterogeneous(): + a = [1] + b = [2.0] + c = a + b + d = b + a + # check result is a copy + a.append(3) + b.append(4.0) + return a, b, c, d + +def list_add_inplace(m, n): + a = list(range(0, m)) + b = list(range(100, 100 + n)) + a += b + return a, b + +def list_add_inplace_heterogeneous(): + a = [1] + b = [2.0] + a += b + b += a + return a, b + +def list_mul(n, v): + a = list(range(n)) + return a * v + +def list_mul2(n, v): + a = list(range(n)) + return v * a + +def list_mul_inplace(n, v): + a = list(range(n)) + a *= v + return a + +def list_bool(n): + a = list(range(n)) + return bool(a), (True if a else False) + +def eq_usecase(a, b): + return list(a) == list(b) + +def ne_usecase(a, b): + return list(a) != list(b) + +def gt_usecase(a, b): + return list(a) > list(b) + +def ge_usecase(a, b): + return list(a) >= list(b) + +def lt_usecase(a, b): + return list(a) < list(b) + +def le_usecase(a, b): + return list(a) <= list(b) + +def identity_usecase(n): + a = list(range(n)) + b = a + c = a[:] + return (a is b), (a is not b), (a is c), (a is not c) + +def bool_list_usecase(): + # Exercise getitem, setitem, iteration with bool values (issue #1373) + l = [False] + l[0] = True + x = False + for v in l: + x = x ^ v + return l, x + +def reflect_simple(l, ll): + x = l.pop() + y = l.pop() + l[0] = 42. + l.extend(ll) + return l, x, y + +def reflect_conditional(l, ll): + # `l` may or may not actually reflect a Python list + if ll[0]: + l = [11., 22., 33., 44.] + x = l.pop() + y = l.pop() + l[0] = 42. + l.extend(ll) + return l, x, y + +def reflect_exception(l): + l.append(42) + raise ZeroDivisionError + +def reflect_dual(l, ll): + l.append(ll.pop()) + return l is ll + + +class TestLists(MemoryLeakMixin, TestCase): + + def test_create_list(self): + pyfunc = create_list + cfunc = njit((types.int32, types.int32, types.int32))(pyfunc) + self.assertEqual(cfunc(1, 2, 3), pyfunc(1, 2, 3)) + + def test_create_nested_list(self): + pyfunc = create_nested_list + cfunc = njit((types.int32, types.int32, types.int32, + types.int32, types.int32, types.int32))(pyfunc) + self.assertEqual(cfunc(1, 2, 3, 4, 5, 6), pyfunc(1, 2, 3, 4, 5, 6)) + + def check_unary_with_size(self, pyfunc, precise=True): + cfunc = jit(nopython=True)(pyfunc) + # Use various sizes, to stress the allocation algorithm + for n in [0, 3, 16, 70, 400]: + eq = self.assertPreciseEqual if precise else self.assertEqual + eq(cfunc(n), pyfunc(n)) + + def test_constructor(self): + self.check_unary_with_size(list_constructor) + + def test_constructor_empty(self): + self.disable_leak_check() + cfunc = jit(nopython=True)(list_constructor_empty) + with self.assertRaises(errors.TypingError) as raises: + cfunc() + errmsg = str(raises.exception) + self.assertIn("Cannot infer the type of variable", errmsg) + self.assertIn("list(undefined)", errmsg) + # check error message went in + self.assertIn("For Numba to be able to compile a list", errmsg) + + def test_constructor_empty_but_typeable(self): + args = [np.int32(1), 10., 1 + 3j, [7], [17., 14.], np.array([10])] + pyfunc = list_constructor_empty_but_typeable + for arg in args: + cfunc = jit(nopython=True)(pyfunc) + expected = pyfunc(arg) + got = cfunc(arg) + self.assertPreciseEqual(got, expected) + + def test_append(self): + self.check_unary_with_size(list_append) + + def test_append_heterogeneous(self): + self.check_unary_with_size(list_append_heterogeneous, precise=False) + + def test_extend(self): + self.check_unary_with_size(list_extend) + + def test_extend_heterogeneous(self): + self.check_unary_with_size(list_extend_heterogeneous, precise=False) + + def test_pop0(self): + self.check_unary_with_size(list_pop0) + + def test_pop1(self): + pyfunc = list_pop1 + cfunc = jit(nopython=True)(pyfunc) + for n in [5, 40]: + for i in [0, 1, n - 2, n - 1, -1, -2, -n + 3, -n + 1]: + expected = pyfunc(n, i) + self.assertPreciseEqual(cfunc(n, i), expected) + + def test_pop_errors(self): + # XXX References are leaked when an exception is raised + self.disable_leak_check() + cfunc = jit(nopython=True)(list_pop1) + with self.assertRaises(IndexError) as cm: + cfunc(0, 5) + self.assertEqual(str(cm.exception), "pop from empty list") + with self.assertRaises(IndexError) as cm: + cfunc(1, 5) + self.assertEqual(str(cm.exception), "pop index out of range") + + def test_insert(self): + pyfunc = list_insert + cfunc = jit(nopython=True)(pyfunc) + for n in [5, 40]: + indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1] + for i in indices: + expected = pyfunc(n, i, 42) + self.assertPreciseEqual(cfunc(n, i, 42), expected) + + def test_len(self): + self.check_unary_with_size(list_len) + + def test_getitem(self): + self.check_unary_with_size(list_getitem) + + def test_setitem(self): + self.check_unary_with_size(list_setitem) + + def check_slicing2(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + sizes = [5, 40] + for n in sizes: + indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n] + for start, stop in itertools.product(indices, indices): + expected = pyfunc(n, start, stop) + self.assertPreciseEqual(cfunc(n, start, stop), expected) + + def test_getslice2(self): + self.check_slicing2(list_getslice2) + + def test_setslice2(self): + pyfunc = list_setslice2 + cfunc = jit(nopython=True)(pyfunc) + sizes = [5, 40] + for n, n_src in itertools.product(sizes, sizes): + indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n] + for start, stop in itertools.product(indices, indices): + expected = pyfunc(n, n_src, start, stop) + self.assertPreciseEqual(cfunc(n, n_src, start, stop), expected) + + def test_getslice3(self): + pyfunc = list_getslice3 + cfunc = jit(nopython=True)(pyfunc) + for n in [10]: + indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n] + steps = [4, 1, -1, 2, -3] + for start, stop, step in itertools.product(indices, indices, steps): + expected = pyfunc(n, start, stop, step) + self.assertPreciseEqual(cfunc(n, start, stop, step), expected) + + def test_setslice3(self): + pyfunc = list_setslice3 + cfunc = jit(nopython=True)(pyfunc) + for n in [10]: + indices = [0, 1, n - 2, -1, -2, -n + 3, -n - 1, -n] + steps = [4, 1, -1, 2, -3] + for start, stop, step in itertools.product(indices, indices, steps): + expected = pyfunc(n, start, stop, step) + self.assertPreciseEqual(cfunc(n, start, stop, step), expected) + + def test_setslice3_resize(self): + # XXX References are leaked when an exception is raised + self.disable_leak_check() + pyfunc = list_setslice3_arbitrary + cfunc = jit(nopython=True)(pyfunc) + # step == 1 => can resize + cfunc(5, 10, 0, 2, 1) + # step != 1 => cannot resize + with self.assertRaises(ValueError) as cm: + cfunc(5, 100, 0, 3, 2) + self.assertIn("cannot resize", str(cm.exception)) + + def test_delslice0(self): + self.check_unary_with_size(list_delslice0) + + def test_delslice1(self): + self.check_slicing2(list_delslice1) + + def test_delslice2(self): + self.check_slicing2(list_delslice2) + + def test_invalid_slice(self): + self.disable_leak_check() + pyfunc = list_getslice3 + cfunc = jit(nopython=True)(pyfunc) + with self.assertRaises(ValueError) as cm: + cfunc(10, 1, 2, 0) + self.assertEqual(str(cm.exception), "slice step cannot be zero") + + def test_iteration(self): + self.check_unary_with_size(list_iteration) + + def test_reverse(self): + self.check_unary_with_size(list_reverse) + + def test_contains(self): + self.check_unary_with_size(list_contains) + + def check_index_result(self, pyfunc, cfunc, args): + try: + expected = pyfunc(*args) + except ValueError: + with self.assertRaises(ValueError): + cfunc(*args) + else: + self.assertPreciseEqual(cfunc(*args), expected) + + def test_index1(self): + self.disable_leak_check() + pyfunc = list_index1 + cfunc = jit(nopython=True)(pyfunc) + for v in (0, 1, 5, 10, 99999999): + self.check_index_result(pyfunc, cfunc, (16, v)) + + def test_index2(self): + self.disable_leak_check() + pyfunc = list_index2 + cfunc = jit(nopython=True)(pyfunc) + n = 16 + for v in (0, 1, 5, 10, 99999999): + indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1] + for start in indices: + self.check_index_result(pyfunc, cfunc, (16, v, start)) + + def test_index3(self): + self.disable_leak_check() + pyfunc = list_index3 + cfunc = jit(nopython=True)(pyfunc) + n = 16 + for v in (0, 1, 5, 10, 99999999): + indices = [0, 1, n - 2, n - 1, n + 1, -1, -2, -n + 3, -n - 1] + for start, stop in itertools.product(indices, indices): + self.check_index_result(pyfunc, cfunc, (16, v, start, stop)) + + def test_index_exception1(self): + pyfunc = list_index3 + cfunc = jit(nopython=True)(pyfunc) + msg = 'arg "start" must be an Integer.' + with self.assertRaisesRegex(errors.TypingError, msg): + cfunc(10, 0, 'invalid', 5) + + def test_index_exception2(self): + pyfunc = list_index3 + cfunc = jit(nopython=True)(pyfunc) + msg = 'arg "stop" must be an Integer.' + with self.assertRaisesRegex(errors.TypingError, msg): + cfunc(10, 0, 0, 'invalid') + + def test_remove(self): + pyfunc = list_remove + cfunc = jit(nopython=True)(pyfunc) + n = 16 + for v in (0, 1, 5, 15): + expected = pyfunc(n, v) + self.assertPreciseEqual(cfunc(n, v), expected) + + def test_remove_error(self): + self.disable_leak_check() + pyfunc = list_remove + cfunc = jit(nopython=True)(pyfunc) + with self.assertRaises(ValueError) as cm: + cfunc(10, 42) + self.assertEqual(str(cm.exception), "list.remove(x): x not in list") + + def test_count(self): + pyfunc = list_count + cfunc = jit(nopython=True)(pyfunc) + for v in range(5): + self.assertPreciseEqual(cfunc(18, v), pyfunc(18, v)) + + def test_clear(self): + self.check_unary_with_size(list_clear) + + def test_copy(self): + self.check_unary_with_size(list_copy) + + def check_add(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + sizes = [0, 3, 50, 300] + for m, n in itertools.product(sizes, sizes): + expected = pyfunc(m, n) + self.assertPreciseEqual(cfunc(m, n), expected) + + def test_add(self): + self.check_add(list_add) + + def test_add_heterogeneous(self): + pyfunc = list_add_heterogeneous + cfunc = jit(nopython=True)(pyfunc) + expected = pyfunc() + self.assertEqual(cfunc(), expected) + + def test_add_inplace(self): + self.check_add(list_add_inplace) + + def test_add_inplace_heterogeneous(self): + pyfunc = list_add_inplace_heterogeneous + cfunc = jit(nopython=True)(pyfunc) + expected = pyfunc() + self.assertEqual(cfunc(), expected) + + def check_mul(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + for n in [0, 3, 50, 300]: + for v in [1, 2, 3, 0, -1, -42]: + expected = pyfunc(n, v) + self.assertPreciseEqual(cfunc(n, v), expected) + + def test_mul(self): + self.check_mul(list_mul) + + def test_mul2(self): + self.check_mul(list_mul2) + + def test_mul_inplace(self): + self.check_mul(list_mul_inplace) + + @unittest.skipUnless(sys.maxsize >= 2**32, + "need a 64-bit system to test for MemoryError") + def test_mul_error(self): + self.disable_leak_check() + pyfunc = list_mul + cfunc = jit(nopython=True)(pyfunc) + # Fail in malloc() + with self.assertRaises(MemoryError): + cfunc(1, 2**58) + if sys.platform.startswith('darwin'): + libc = ct.CDLL('libc.dylib') + libc.printf("###Please ignore the above error message i.e. \ +can't allocate region. It is in fact the purpose of this test to \ +request more memory than can be provided###\n".encode("UTF-8")) + # Overflow size computation when multiplying by item size + with self.assertRaises(MemoryError): + cfunc(1, 2**62) + + def test_bool(self): + pyfunc = list_bool + cfunc = jit(nopython=True)(pyfunc) + for n in [0, 1, 3]: + expected = pyfunc(n) + self.assertPreciseEqual(cfunc(n), expected) + + def test_list_passing(self): + # Check one can pass a list from a Numba function to another + @jit(nopython=True) + def inner(lst): + return len(lst), lst[-1] + + @jit(nopython=True) + def outer(n): + l = list(range(n)) + return inner(l) + + self.assertPreciseEqual(outer(5), (5, 4)) + + def _test_compare(self, pyfunc): + def eq(args): + self.assertIs(cfunc(*args), pyfunc(*args), + "mismatch for arguments %s" % (args,)) + + cfunc = jit(nopython=True)(pyfunc) + eq(((1, 2), (1, 2))) + eq(((1, 2, 3), (1, 2))) + eq(((1, 2), (1, 2, 3))) + eq(((1, 2, 4), (1, 2, 3))) + eq(((1.0, 2.0, 3.0), (1, 2, 3))) + eq(((1.0, 2.0, 3.5), (1, 2, 3))) + + def test_eq(self): + self._test_compare(eq_usecase) + + def test_ne(self): + self._test_compare(ne_usecase) + + def test_le(self): + self._test_compare(le_usecase) + + def test_lt(self): + self._test_compare(lt_usecase) + + def test_ge(self): + self._test_compare(ge_usecase) + + def test_gt(self): + self._test_compare(gt_usecase) + + def test_identity(self): + pyfunc = identity_usecase + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(3), pyfunc(3)) + + def test_bool_list(self): + # Check lists of bools compile and run successfully + pyfunc = bool_list_usecase + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(), pyfunc()) + + +class TestUnboxing(MemoryLeakMixin, TestCase): + """ + Test unboxing of Python lists into native Numba lists. + """ + + @contextlib.contextmanager + def assert_type_error(self, msg): + with self.assertRaises(TypeError) as raises: + yield + if msg is not None: + self.assertRegex(str(raises.exception), msg) + + def check_unary(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + def check(arg): + expected = pyfunc(arg) + got = cfunc(arg) + self.assertPreciseEqual(got, expected) + return check + + def test_numbers(self): + check = self.check_unary(unbox_usecase) + check([1, 2]) + check([1j, 2.5j]) + + def test_tuples(self): + check = self.check_unary(unbox_usecase2) + check([(1, 2), (3, 4)]) + check([(1, 2j), (3, 4j)]) + check([(), (), ()]) + + def test_list_inside_tuple(self): + check = self.check_unary(unbox_usecase3) + check((1, [2, 3, 4])) + + def test_list_of_tuples_inside_tuple(self): + check = self.check_unary(unbox_usecase4) + check((1, [(2,), (3,)])) + + def test_errors(self): + # See #1545 and #1594: error checking should ensure the list is + # homogeneous + msg = "can't unbox heterogeneous list" + pyfunc = noop + cfunc = jit(nopython=True)(pyfunc) + lst = [1, 2.5] + with self.assert_type_error(msg): + cfunc(lst) + # The list hasn't been changed (bogus reflecting) + self.assertEqual(lst, [1, 2.5]) + with self.assert_type_error(msg): + cfunc([1, 2j]) + # Same when the list is nested in a tuple or namedtuple + with self.assert_type_error(msg): + cfunc((1, [1, 2j])) + with self.assert_type_error(msg): + cfunc(Point(1, [1, 2j])) + # Issue #1638: tuples of different size. + # Note the check is really on the tuple side. + lst = [(1,), (2, 3)] + with self.assertRaises(TypeError) as raises: + cfunc(lst) + msg = ("can't unbox heterogeneous list: " + "UniTuple({0} x 1) != UniTuple({0} x 2)") + self.assertEqual(str(raises.exception), msg.format(types.intp)) + + + +class TestListReflection(MemoryLeakMixin, TestCase): + """ + Test reflection of native Numba lists on Python list objects. + """ + + def check_reflection(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + samples = [([1., 2., 3., 4.], [0.]), + ([1., 2., 3., 4.], [5., 6., 7., 8., 9.]), + ] + for dest, src in samples: + expected = list(dest) + got = list(dest) + pyres = pyfunc(expected, src) + with self.assertRefCount(got, src): + cres = cfunc(got, src) + self.assertPreciseEqual(cres, pyres) + self.assertPreciseEqual(expected, got) + self.assertEqual(pyres[0] is expected, cres[0] is got) + del pyres, cres + + def test_reflect_simple(self): + self.check_reflection(reflect_simple) + + def test_reflect_conditional(self): + self.check_reflection(reflect_conditional) + + def test_reflect_exception(self): + """ + When the function exits with an exception, lists should still be + reflected. + """ + pyfunc = reflect_exception + cfunc = jit(nopython=True)(pyfunc) + l = [1, 2, 3] + with self.assertRefCount(l): + with self.assertRaises(ZeroDivisionError): + cfunc(l) + self.assertPreciseEqual(l, [1, 2, 3, 42]) + + def test_reflect_same_list(self): + """ + When the same list object is reflected twice, behaviour should + be consistent. + """ + pyfunc = reflect_dual + cfunc = jit(nopython=True)(pyfunc) + pylist = [1, 2, 3] + clist = pylist[:] + expected = pyfunc(pylist, pylist) + got = cfunc(clist, clist) + self.assertPreciseEqual(expected, got) + self.assertPreciseEqual(pylist, clist) + self.assertRefCountEqual(pylist, clist) + + def test_reflect_clean(self): + """ + When the list wasn't mutated, no reflection should take place. + """ + cfunc = jit(nopython=True)(noop) + # Use a complex, as Python integers can be cached + l = [12.5j] + ids = [id(x) for x in l] + cfunc(l) + self.assertEqual([id(x) for x in l], ids) + + +class ManagedListTestCase(MemoryLeakMixin, TestCase): + + def assert_list_element_precise_equal(self, expect, got): + self.assertEqual(len(expect), len(got)) + for a, b in zip(expect, got): + self.assertPreciseEqual(a, b) + + +class TestListManagedElements(ManagedListTestCase): + "Test list containing objects that need refct" + + def _check_element_equal(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + con = [np.arange(3).astype(np.intp), np.arange(5).astype(np.intp)] + expect = list(con) + pyfunc(expect) + got = list(con) + cfunc(got) + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_reflect_passthru(self): + def pyfunc(con): + pass + self._check_element_equal(pyfunc) + + def test_reflect_appended(self): + def pyfunc(con): + con.append(np.arange(10).astype(np.intp)) + + self._check_element_equal(pyfunc) + + def test_reflect_setitem(self): + def pyfunc(con): + con[1] = np.arange(10) + + self._check_element_equal(pyfunc) + + def test_reflect_popped(self): + def pyfunc(con): + con.pop() + + self._check_element_equal(pyfunc) + + def test_reflect_insert(self): + """make sure list.insert() doesn't crash for refcounted objects (see #7553) + """ + + def pyfunc(con): + con.insert(1, np.arange(4).astype(np.intp)) + + self._check_element_equal(pyfunc) + + def test_append(self): + def pyfunc(): + con = [] + for i in range(300): + con.append(np.arange(i, ).astype(np.intp)) + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_append_noret(self): + # This test make sure local dtor works + def pyfunc(): + con = [] + for i in range(300): + con.append(np.arange(i)) + c = 0.0 + for arr in con: + c += arr.sum() / (1 + arr.size) + return c + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assertEqual(expect, got) + + def test_reassign_refct(self): + def pyfunc(): + con = [] + for i in range(5): + con.append(np.arange(2)) + con[0] = np.arange(4) + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_get_slice(self): + def pyfunc(): + con = [] + for i in range(5): + con.append(np.arange(2)) + return con[2:4] + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_set_slice(self): + def pyfunc(): + con = [] + for i in range(5): + con.append(np.arange(2)) + con[1:3] = con[2:4] + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_pop(self): + def pyfunc(): + con = [] + for i in range(20): + con.append(np.arange(i + 1)) + while len(con) > 2: + con.pop() + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_pop_loc(self): + def pyfunc(): + con = [] + for i in range(1000): + con.append(np.arange(i + 1)) + while len(con) > 2: + con.pop(1) + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_del_range(self): + def pyfunc(): + con = [] + for i in range(20): + con.append(np.arange(i + 1)) + del con[3:10] + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + + def test_list_of_list(self): + def pyfunc(): + con = [] + for i in range(10): + con.append([0] * i) + return con + + cfunc = jit(nopython=True)(pyfunc) + expect = pyfunc() + got = cfunc() + + self.assertEqual(expect, got) + + + +def expect_reflection_failure(fn): + def wrapped(self, *args, **kwargs): + self.disable_leak_check() + with self.assertRaises(TypeError) as raises: + fn(self, *args, **kwargs) + expect_msg = 'cannot reflect element of reflected container' + self.assertIn(expect_msg, str(raises.exception)) + + return wrapped + + +class TestListOfList(ManagedListTestCase): + + def compile_and_test(self, pyfunc, *args): + from copy import deepcopy + expect_args = deepcopy(args) + expect = pyfunc(*expect_args) + + njit_args = deepcopy(args) + cfunc = jit(nopython=True)(pyfunc) + got = cfunc(*njit_args) + + self.assert_list_element_precise_equal( + expect=expect, got=got + ) + # Check reflection + self.assert_list_element_precise_equal( + expect=expect_args, got=njit_args + ) + + def test_returning_list_of_list(self): + def pyfunc(): + a = [[np.arange(i)] for i in range(4)] + return a + + self.compile_and_test(pyfunc) + + @expect_reflection_failure + def test_heterogeneous_list_error(self): + def pyfunc(x): + return x[1] + + cfunc = jit(nopython=True)(pyfunc) + l2 = [[np.zeros(i) for i in range(5)], + [np.ones(i)+1j for i in range(5)]] + l3 = [[np.zeros(i) for i in range(5)], [(1,)]] + l4 = [[1], [{1}]] + l5 = [[1], [{'a': 1}]] + + # TODO: this triggers a reflection error. + # Remove this line when nested reflection is supported + cfunc(l2) + + # error_cases + with self.assertRaises(TypeError) as raises: + cfunc(l2) + + self.assertIn( + ("reflected list(array(float64, 1d, C)) != " + "reflected list(array(complex128, 1d, C))"), + str(raises.exception) + ) + + with self.assertRaises(TypeError) as raises: + cfunc(l3) + + self.assertIn( + ("reflected list(array(float64, 1d, C)) != " + "reflected list((int64 x 1))"), + str(raises.exception) + ) + + with self.assertRaises(TypeError) as raises: + cfunc(l4) + self.assertIn( + "reflected list(int64) != reflected list(reflected set(int64))", + str(raises.exception) + ) + + with self.assertRaises(ValueError) as raises: + cfunc(l5) + self.assertIn( + "Cannot type list element of ", + str(raises.exception) + ) + + @expect_reflection_failure + def test_list_of_list_reflected(self): + def pyfunc(l1, l2): + l1.append(l2) + l1[-1].append(123) + + cfunc = jit(nopython=True)(pyfunc) + l1 = [[0, 1], [2, 3]] + l2 = [4, 5] + expect = list(l1), list(l2) + got = list(l1), list(l2) + pyfunc(*expect) + cfunc(*got) + self.assertEqual(expect, got) + + @expect_reflection_failure + def test_heterogeneous_list(self): + def pyfunc(x): + return x[1] + + l1 = [[np.zeros(i) for i in range(5)], [np.ones(i) for i in range(5)]] + + cfunc = jit(nopython=True)(pyfunc) + l1_got = cfunc(l1) + self.assertPreciseEqual(pyfunc(l1), l1_got) + + @expect_reflection_failure + def test_c01(self): + def bar(x): + return x.pop() + + r = [[np.zeros(0)], [np.zeros(10)*1j]] + # TODO: this triggers a reflection error. + # Remove this line when nested reflection is supported + self.compile_and_test(bar, r) + + with self.assertRaises(TypeError) as raises: + self.compile_and_test(bar, r) + self.assertIn( + ("reflected list(array(float64, 1d, C)) != " + "reflected list(array(complex128, 1d, C))"), + str(raises.exception), + ) + + def test_c02(self): + def bar(x): + x.append(x) + return x + + r = [[np.zeros(0)]] + + with self.assertRaises(errors.TypingError) as raises: + self.compile_and_test(bar, r) + self.assertIn( + "Invalid use of BoundFunction(list.append", + str(raises.exception), + ) + + def test_c03(self): + def bar(x): + f = x + f[0] = 1 + return f + + r = [[np.arange(3)]] + + with self.assertRaises(errors.TypingError) as raises: + self.compile_and_test(bar, r) + self.assertIn( + "invalid setitem with value of {} to element of {}".format( + typeof(1), + typeof(r[0]), + ), + str(raises.exception), + ) + + def test_c04(self): + def bar(x): + f = x + f[0][0] = 10 + return f + + r = [[np.arange(3)]] + with self.assertRaises(errors.TypingError) as raises: + self.compile_and_test(bar, r) + self.assertIn( + "invalid setitem with value of {} to element of {}".format( + typeof(10), + typeof(r[0][0]), + ), + str(raises.exception), + ) + + @expect_reflection_failure + def test_c05(self): + def bar(x): + f = x + f[0][0] = np.array([x for x in np.arange(10).astype(np.intp)]) + return f + + r = [[np.arange(3).astype(np.intp)]] + self.compile_and_test(bar, r) + + def test_c06(self): + def bar(x): + f = x + f[0][0] = np.array([x + 1j for x in np.arange(10)]) + return f + + r = [[np.arange(3)]] + with self.assertRaises(errors.TypingError) as raises: + self.compile_and_test(bar, r) + self.assertIn("invalid setitem with value", str(raises.exception)) + + @expect_reflection_failure + def test_c07(self): + self.disable_leak_check() + + def bar(x): + return x[-7] + + r = [[np.arange(3)]] + cfunc = jit(nopython=True)(bar) + with self.assertRaises(IndexError) as raises: + cfunc(r) + self.assertIn("getitem out of range", str(raises.exception)) + + def test_c08(self): + self.disable_leak_check() + + def bar(x): + x[5] = 7 + return x + + r = [1, 2, 3] + cfunc = jit(nopython=True)(bar) + with self.assertRaises(IndexError) as raises: + cfunc(r) + self.assertIn("setitem out of range", str(raises.exception)) + + def test_c09(self): + def bar(x): + x[-2] = 7j + return x + + r = [1, 2, 3] + with self.assertRaises(errors.TypingError) as raises: + self.compile_and_test(bar, r) + self.assertIn("invalid setitem with value", str(raises.exception)) + + @expect_reflection_failure + def test_c10(self): + def bar(x): + x[0], x[1] = x[1], x[0] + return x + + r = [[1, 2, 3], [4, 5, 6]] + self.compile_and_test(bar, r) + + @expect_reflection_failure + def test_c11(self): + def bar(x): + x[:] = x[::-1] + return x + + r = [[1, 2, 3], [4, 5, 6]] + self.compile_and_test(bar, r) + + def test_c12(self): + def bar(x): + del x[-1] + return x + + r = [x for x in range(10)] + self.compile_and_test(bar, r) + + +class Item(object): + def __init__(self, many, scalar): + self.many = many + self.scalar = scalar + + +class Container(object): + def __init__(self, n): + self.data = [[np.arange(i).astype(np.float64)] for i in range(n)] + + def more(self, n): + for i in range(n): + self.data.append([np.arange(i).astype(np.float64)]) + + +class TestListAndJitClasses(ManagedListTestCase): + def make_jitclass_element(self): + spec = [ + ('many', types.float64[:]), + ('scalar', types.float64), + ] + JCItem = jitclass(spec)(Item) + return JCItem + + def make_jitclass_container(self): + spec = { + 'data': types.List(dtype=types.List(types.float64[::1])), + } + JCContainer = jitclass(spec)(Container) + return JCContainer + + def assert_list_element_with_tester(self, tester, expect, got): + for x, y in zip(expect, got): + tester(x, y) + + def test_jitclass_instance_elements(self): + JCItem = self.make_jitclass_element() + + def pyfunc(xs): + xs[1], xs[0] = xs[0], xs[1] + return xs + + def eq(x, y): + self.assertPreciseEqual(x.many, y.many) + self.assertPreciseEqual(x.scalar, y.scalar) + + cfunc = jit(nopython=True)(pyfunc) + + arg = [JCItem(many=np.random.random(n + 1), scalar=n * 1.2) + for n in range(5)] + + expect_arg = list(arg) + got_arg = list(arg) + + expect_res = pyfunc(expect_arg) + got_res = cfunc(got_arg) + + self.assert_list_element_with_tester(eq, expect_arg, got_arg) + self.assert_list_element_with_tester(eq, expect_res, got_res) + + def test_jitclass_containing_list(self): + JCContainer = self.make_jitclass_container() + + expect = Container(n=4) + got = JCContainer(n=4) + self.assert_list_element_precise_equal(got.data, expect.data) + expect.more(3) + got.more(3) + self.assert_list_element_precise_equal(got.data, expect.data) + + +class TestListInitialValues(MemoryLeakMixin, TestCase): + """Tests that lists carry their initial value if present""" + + def test_homogeneous_and_literal(self): + def bar(l): + ... + + @overload(bar) + def ol_bar(l): + if l.initial_value is None: + return lambda l: literally(l) + self.assertTrue(isinstance(l, types.List)) + self.assertEqual(l.initial_value, [1, 2, 3]) + self.assertEqual(hasattr(l, 'literal_value'), False) + return lambda l: l + + @njit + def foo(): + # keys and values all have literal representation + x = [1, 2, 3] + bar(x) + + foo() + + def test_heterogeneous_but_castable_to_homogeneous(self): + def bar(l): + ... + + @overload(bar) + def ol_bar(l): + self.assertTrue(isinstance(l, types.List)) + self.assertEqual(l.initial_value, None) + self.assertEqual(hasattr(l, 'literal_value'), False) + return lambda l: l + + @njit + def foo(): + # This list will be typed based on 1j, i.e. complex128 + # as the values are not all literals, there's no "initial_value" + # available irrespective of whether it's possible to rip this + # information out of the bytecode. + x = [1j, 2, 3] + bar(x) + + foo() + + def test_mutation_not_carried(self): + def bar(d): + ... + + @overload(bar) + def ol_bar(d): + if d.initial_value is None: + return lambda d: literally(d) + self.assertTrue(isinstance(d, types.List)) + self.assertEqual(d.initial_value, [1, 2, 3]) + return lambda d: d + + @njit + def foo(): + # This list is mutated, check the initial_value carries + # correctly and is not mutated + x = [1, 2, 3] + x.append(4) + bar(x) + + foo() + + def test_mutation_not_carried_single_function(self): + # this is another pattern for using literally + + @njit + def nop(*args): + pass + + for fn, iv in (nop, None), (literally, [1, 2, 3]): + @njit + def baz(x): + pass + + def bar(z): + pass + + @overload(bar) + def ol_bar(z): + def impl(z): + fn(z) + baz(z) + return impl + + @njit + def foo(): + x = [1, 2, 3] + bar(x) + x.append(2) + return x + + foo() + # baz should be specialised based on literally being invoked and + # the literal/unliteral arriving at the call site + larg = baz.signatures[0][0] + self.assertEqual(larg.initial_value, iv) + + def test_list_of_list_ctor(self): + # see issue 6082 + @njit + def bar(x): + pass + + @njit + def foo(): + x = [[1, 2, 3, 4, 5], [1, 2, 3, 4, 6]] + bar(x) + + foo() + larg = bar.signatures[0][0] + self.assertEqual(larg.initial_value, None) + self.assertEqual(larg.dtype.initial_value, None) + + +class TestLiteralLists(MemoryLeakMixin, TestCase): + + def test_basic_compile(self): + @njit + def foo(): + l = [1, 'a'] + + foo() + + def test_literal_value_passthrough(self): + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertTrue(isinstance(x, types.LiteralList)) + lv = x.literal_value + self.assertTrue(isinstance(lv, list)) + self.assertEqual(lv[0], types.literal(1)) + self.assertEqual(lv[1], types.literal('a')) + self.assertEqual(lv[2], types.Array(types.float64, 1, 'C')) + self.assertEqual(lv[3], types.List(types.intp, reflected=False, + initial_value=[1, 2, 3])) + self.assertTrue(isinstance(lv[4], types.LiteralList)) + self.assertEqual(lv[4].literal_value[0], types.literal('cat')) + self.assertEqual(lv[4].literal_value[1], types.literal(10)) + return lambda x: x + + @njit + def foo(): + otherhomogeneouslist = [1, 2, 3] + otherheterogeneouslist = ['cat', 10] + zeros = np.zeros(5) + l = [1, 'a', zeros, otherhomogeneouslist, otherheterogeneouslist] + bar(l) + + foo() + + def test_literal_value_involved_passthrough(self): + + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertTrue(isinstance(x, types.LiteralStrKeyDict)) + dlv = x.literal_value + inner_literal = {types.literal('g'): types.literal('h'), + types.literal('i'): + types.Array(types.float64, 1, 'C')} + inner_dict = types.LiteralStrKeyDict(inner_literal) + outer_literal = {types.literal('a'): + types.LiteralList([types.literal(1), + types.literal('a'), + types.DictType( + types.unicode_type, + types.intp, + initial_value={'f': 1}), + inner_dict]), + types.literal('b'): types.literal(2), + types.literal('c'): types.List(types.complex128, + reflected=False)} + def check_same(a, b): + if (isinstance(a, types.LiteralList) and + isinstance(b, types.LiteralList)): + for i, j in zip(a.literal_value, b.literal_value): + check_same(a.literal_value, b.literal_value) + elif (isinstance(a, list) and + isinstance(b, list)): + for i, j in zip(a, b): + check_same(i, j) + elif (isinstance(a, types.LiteralStrKeyDict) and + isinstance(b, types.LiteralStrKeyDict)): + for (ki, vi), (kj, vj) in zip(a.literal_value.items(), + b.literal_value.items()): + check_same(ki, kj) + check_same(vi, vj) + elif (isinstance(a, dict) and + isinstance(b, dict)): + for (ki, vi), (kj, vj) in zip(a.items(), b.items()): + check_same(ki, kj) + check_same(vi, vj) + else: + self.assertEqual(a, b) + check_same(dlv, outer_literal) + return lambda x: x + + @njit + def foo(): + # this stretches what's possible with LiteralStrKeyDict and + # LiteralList, it's nested and contains typed.Dict and np arrays + # as well as more constant type things. + l = {'a': [1, 'a', {'f': 1}, {'g': 'h', 'i': np.zeros(5)}], 'b': 2, + 'c': [1j, 2j, 3j]} + bar(l) + + foo() + + def test_mutation_failure(self): + + def staticsetitem(): + l = ['a', 1] + l[0] = 'b' + + def delitem(): + l = ['a', 1] + del l[0] + + def append(): + l = ['a', 1] + l.append(2j) + + def extend(): + l = ['a', 1] + l.extend([2j, 3j]) + + def insert(): + l = ['a', 1] + l.insert(0, 2j) + + def remove(): + l = ['a', 1] + l.remove('a') + + def pop(): + l = ['a', 1] + l.pop() + + def clear(): + l = ['a', 1] + l.clear() + + def sort(): + l = ['a', 1] + l.sort() + + def reverse(): + l = ['a', 1] + l.reverse() + + illegals = (staticsetitem, delitem, append, extend, insert, remove, pop, + clear, sort, reverse) + + for test in illegals: + with self.subTest(test.__name__): + with self.assertRaises(errors.TypingError) as raises: + njit(test)() + expect = "Cannot mutate a literal list" + self.assertIn(expect, str(raises.exception)) + + def test_count(self): + @njit + def foo(): + l = ['a', 1, 'a', 2, 'a', 3, 'b', 4, 'b', 5, 'c'] + r = [] + for x in 'abc': + r.append(l.count(x)) + return r + + self.assertEqual(foo.py_func(), foo()) + + def test_len(self): + @njit + def foo(): + l = ['a', 1, 'a', 2, 'a', 3, 'b', 4, 'b', 5, 'c'] + return len(l) + + self.assertEqual(foo.py_func(), foo()) + + def test_contains(self): + @njit + def foo(): + l = ['a', 1, 'a', 2, 'a', 3, 'b', 4, 'b', 5, 'c'] + r = [] + for x in literal_unroll(('a', 'd', 2, 6)): + r.append(x in l) + return r + + self.assertEqual(foo.py_func(), foo()) + + def test_getitem(self): + + @njit + def foo(x): + l = ['a', 1] + return l[x] + + with self.assertRaises(errors.TypingError) as raises: + foo(0) + expect = "Cannot __getitem__ on a literal list" + self.assertIn(expect, str(raises.exception)) + + def test_staticgetitem(self): + + @njit + def foo(): + l = ['a', 1] + return l[0], l[1] + + self.assertEqual(foo.py_func(), foo()) + + def test_staticgetitem_slice(self): + # this is forbidden by typing as there's no way to serialize a list of + # any kind as required by returning a (static) slice of a LiteralList + @njit + def foo(): + l = ['a', 'b', 1] + return l[:2] + + with self.assertRaises(errors.TypingError) as raises: + foo() + expect = "Cannot __getitem__ on a literal list" + self.assertIn(expect, str(raises.exception)) + + def test_setitem(self): + + @njit + def foo(x): + l = ['a', 1] + l[x] = 'b' + + with self.assertRaises(errors.TypingError) as raises: + foo(0) + expect = "Cannot mutate a literal list" + self.assertIn(expect, str(raises.exception)) + + def test_unify(self): + + @njit + def foo(x): + if x + 1 > 3: + l = ['a', 1] + else: + l = ['b', 2] + return l[0] + + for x in (-100, 100): + self.assertEqual(foo.py_func(x), foo(x)) + + def test_not_unify(self): + + @njit + def foo(x): + if x + 1 > 3: + l = ['a', 1, 2j] + else: + l = ['b', 2] + return l[0], l[1], l[0], l[1] # defeat py310 inliner + + with self.assertRaises(errors.TypingError) as raises: + foo(100) + expect = "Cannot unify LiteralList" + self.assertIn(expect, str(raises.exception)) + + def test_index(self): + @njit + def foo(): + l = ['a', 1] + l.index('a') + + with self.assertRaises(errors.TypingError) as raises: + foo() + expect = "list.index is unsupported for literal lists" + self.assertIn(expect, str(raises.exception)) + + def test_copy(self): + @njit + def foo(): + l = ['a', 1].copy() + return l[0], l[1] + + self.assertEqual(foo(), foo.py_func()) + + def test_tuple_not_in_mro(self): + # Related to #6094, make sure that LiteralList does not inherit from + # types.BaseTuple as this breaks isinstance checks. + def bar(x): + pass + + @overload(bar) + def ol_bar(x): + self.assertFalse(isinstance(x, types.BaseTuple)) + self.assertTrue(isinstance(x, types.LiteralList)) + return lambda x: ... + + @njit + def foo(): + l = ['a', 1] + bar(l) + + foo() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_literal_dispatch.py b/venv/lib/python3.10/site-packages/numba/tests/test_literal_dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..6d759f06e70c66ac50b3e5359dc9c9d7013f75c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_literal_dispatch.py @@ -0,0 +1,400 @@ +import numpy as np + +import numba +import unittest +from numba.tests.support import TestCase +from numba import njit +from numba.core import types, errors, cgutils +from numba.core.typing import signature +from numba.core.datamodel import models +from numba.core.extending import ( + overload, SentryLiteralArgs, overload_method, register_model, intrinsic, +) +from numba.misc.special import literally + + +class TestLiteralDispatch(TestCase): + def check_literal_basic(self, literal_args): + @njit + def foo(x): + return literally(x) + + # Test with int + for lit in literal_args: + self.assertEqual(foo(lit), lit) + + for lit, sig in zip(literal_args, foo.signatures): + self.assertEqual(sig[0].literal_value, lit) + + def test_literal_basic(self): + self.check_literal_basic([123, 321]) + self.check_literal_basic(["abc", "cb123"]) + + def test_literal_nested(self): + @njit + def foo(x): + return literally(x) * 2 + + @njit + def bar(y, x): + return foo(y) + x + + y, x = 3, 7 + self.assertEqual(bar(y, x), y * 2 + x) + [foo_sig] = foo.signatures + self.assertEqual(foo_sig[0], types.literal(y)) + [bar_sig] = bar.signatures + self.assertEqual(bar_sig[0], types.literal(y)) + self.assertNotIsInstance(bar_sig[1], types.Literal) + + def test_literally_freevar(self): + # Try referring to numba.literally not in the globals + import numba + + @njit + def foo(x): + return numba.literally(x) + + self.assertEqual(foo(123), 123) + self.assertEqual(foo.signatures[0][0], types.literal(123)) + + def test_mutual_recursion_literal(self): + def get_functions(decor): + @decor + def outer_fac(n, value): + if n < 1: + return value + return n * inner_fac(n - 1, value) + + @decor + def inner_fac(n, value): + if n < 1: + return literally(value) + return n * outer_fac(n - 1, value) + + return outer_fac, inner_fac + + ref_outer_fac, ref_inner_fac = get_functions(lambda x: x) + outer_fac, inner_fac = get_functions(njit) + + self.assertEqual(outer_fac(10, 12), ref_outer_fac(10, 12)) + self.assertEqual(outer_fac.signatures[0][1].literal_value, 12) + self.assertEqual(inner_fac.signatures[0][1].literal_value, 12) + + self.assertEqual(inner_fac(11, 13), ref_inner_fac(11, 13)) + self.assertEqual(outer_fac.signatures[1][1].literal_value, 13) + self.assertEqual(inner_fac.signatures[1][1].literal_value, 13) + + def test_literal_nested_multi_arg(self): + @njit + def foo(a, b, c): + return inner(a, c) + + @njit + def inner(x, y): + return x + literally(y) + + kwargs = dict(a=1, b=2, c=3) + got = foo(**kwargs) + expect = (lambda a, b, c: a + c)(**kwargs) + self.assertEqual(got, expect) + [foo_sig] = foo.signatures + self.assertEqual(foo_sig[2], types.literal(3)) + + def test_unsupported_literal_type(self): + @njit + def foo(a, b, c): + return inner(a, c) + + @njit + def inner(x, y): + return x + literally(y) + + arr = np.arange(10) + with self.assertRaises(errors.LiteralTypingError) as raises: + foo(a=1, b=2, c=arr) + self.assertIn("numpy.ndarray", str(raises.exception)) + + def test_biliteral(self): + # Test usecase with more than one literal call + @njit + def foo(a, b, c): + return inner(a, b) + inner(b, c) + + @njit + def inner(x, y): + return x + literally(y) + + kwargs = dict(a=1, b=2, c=3) + got = foo(**kwargs) + expect = (lambda a, b, c: a + b + b + c)(**kwargs) + self.assertEqual(got, expect) + [(type_a, type_b, type_c)] = foo.signatures + self.assertNotIsInstance(type_a, types.Literal) + self.assertIsInstance(type_b, types.Literal) + self.assertEqual(type_b.literal_value, 2) + self.assertIsInstance(type_c, types.Literal) + self.assertEqual(type_c.literal_value, 3) + + def test_literally_varargs(self): + @njit + def foo(a, *args): + return literally(args) + + with self.assertRaises(errors.LiteralTypingError): + foo(1, 2, 3) + + @njit + def bar(a, b): + foo(a, b) + + with self.assertRaises(errors.TypingError) as raises: + bar(1, 2) + self.assertIn( + "Cannot request literal type", + str(raises.exception), + ) + + @unittest.expectedFailure + def test_literally_defaults(self): + # Problem with OmittedArg + @njit + def foo(a, b=1): + return (a, literally(b)) + foo(a=1) + + @unittest.expectedFailure + def test_literally_defaults_inner(self): + # Problem with Omitted + @njit + def foo(a, b=1): + return (a, literally(b)) + + @njit + def bar(a): + return foo(a) + 1 + + bar(1) + + def test_literally_from_module(self): + # Problem with Omitted + @njit + def foo(x): + return numba.literally(x) + + got = foo(123) + self.assertEqual(got, foo.py_func(123)) + self.assertIsInstance(foo.signatures[0][0], types.Literal) + + def test_non_literal(self): + @njit + def foo(a, b): + return literally(1 + a) + + with self.assertRaises(errors.TypingError) as raises: + foo(1, 2) + self.assertIn( + "Invalid use of non-Literal type", + str(raises.exception), + ) + + def test_inlined_literal(self): + # Check that literally accepts inlined literal + @njit + def foo(a, b): + v = 1000 + return a + literally(v) + literally(b) + + got = foo(1, 2) + self.assertEqual(got, foo.py_func(1, 2)) + + @njit + def bar(): + a = 100 + b = 9 + return foo(a=b, b=a) + + got = bar() + self.assertEqual(got, bar.py_func()) + + def test_aliased_variable(self): + @njit + def foo(a, b, c): + def closure(d): + return literally(d) + 10 * inner(a, b) + # The inlining of the closure will create an alias to c + return closure(c) + + @njit + def inner(x, y): + return x + literally(y) + + kwargs = dict(a=1, b=2, c=3) + got = foo(**kwargs) + expect = (lambda a, b, c: c + 10 * (a + b))(**kwargs) + self.assertEqual(got, expect) + [(type_a, type_b, type_c)] = foo.signatures + self.assertNotIsInstance(type_a, types.Literal) + self.assertIsInstance(type_b, types.Literal) + self.assertEqual(type_b.literal_value, 2) + self.assertIsInstance(type_c, types.Literal) + self.assertEqual(type_c.literal_value, 3) + + def test_overload_explicit(self): + # This test represents a more controlled usage with ensuring literal + # typing for an argument. + def do_this(x, y): + return x + y + + @overload(do_this) + def ov_do_this(x, y): + SentryLiteralArgs(['x']).for_function(ov_do_this).bind(x, y) + return lambda x, y: x + y + + @njit + def foo(a, b): + return do_this(a, b) + + a = 123 + b = 321 + r = foo(a, b) + self.assertEqual(r, a + b) + [type_a, type_b] = foo.signatures[0] + self.assertIsInstance(type_a, types.Literal) + self.assertEqual(type_a.literal_value, a) + self.assertNotIsInstance(type_b, types.Literal) + + def test_overload_implicit(self): + # This test represents the preferred usage style for using literally + # in overload. Here, literally() is used inside the "implementation" + # function of the overload. + def do_this(x, y): + return x + y + + @njit + def hidden(x, y): + return literally(x) + y + + @overload(do_this) + def ov_do_this(x, y): + if isinstance(x, types.Integer): + # At this point, `x` can be a literal or not + return lambda x, y: hidden(x, y) + + @njit + def foo(a, b): + return do_this(a, b) + + a = 123 + b = 321 + r = foo(a, b) + self.assertEqual(r, a + b) + [type_a, type_b] = foo.signatures[0] + self.assertIsInstance(type_a, types.Literal) + self.assertEqual(type_a.literal_value, a) + self.assertNotIsInstance(type_b, types.Literal) + + def test_overload_error_loop(self): + # Test a case where a infinite compiling loop is caused because a + # literal type is requested but an error would raise for the + # literal-ized code path. This causes the overload resolution to + # retry by "de-literal-izing" the values. + def do_this(x, y): + return x + y + + @njit + def hidden(x, y): + return literally(x) + y + + @overload(do_this) + def ov_do_this(x, y): + if isinstance(y, types.IntegerLiteral): + # This error is however suppressed because a non-literal + # version is valid. + raise errors.NumbaValueError("oops") + else: + def impl(x, y): + return hidden(x, y) + return impl + + @njit + def foo(a, b): + return do_this(a, literally(b)) + + # Expect raising CompilerError to stop re-compiling with duplicated + # literal typing request. + with self.assertRaises(errors.CompilerError) as raises: + foo(a=123, b=321) + self.assertIn("Repeated literal typing request", + str(raises.exception)) + + +class TestLiteralDispatchWithCustomType(TestCase): + def make_dummy_type(self): + class Dummy(object): + def lit(self, a): + return a + + class DummyType(types.Type): + def __init__(self): + super(DummyType, self).__init__(name="dummy") + + @register_model(DummyType) + class DummyTypeModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [] + super(DummyTypeModel, self).__init__(dmm, fe_type, members) + + @intrinsic + def init_dummy(typingctx): + def codegen(context, builder, signature, args): + dummy = cgutils.create_struct_proxy( + signature.return_type)(context, builder) + + return dummy._getvalue() + + sig = signature(DummyType()) + return sig, codegen + + @overload(Dummy) + def dummy_overload(): + def ctor(): + return init_dummy() + + return ctor + + return (DummyType, Dummy) + + def test_overload_method(self): + # from issue #5011 + DummyType, Dummy = self.make_dummy_type() + + @overload_method(DummyType, 'lit') + def lit_overload(self, a): + def impl(self, a): + return literally(a) # <-- using literally here + + return impl + + @njit + def test_impl(a): + d = Dummy() + + return d.lit(a) + + # Successful case + self.assertEqual(test_impl(5), 5) + + # Failing case + @njit + def inside(a): + return test_impl(a + 1) + + with self.assertRaises(errors.TypingError) as raises: + inside(4) + + self.assertIn("Cannot request literal type.", str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_llvm_pass_timings.py b/venv/lib/python3.10/site-packages/numba/tests/test_llvm_pass_timings.py new file mode 100644 index 0000000000000000000000000000000000000000..cdb128fb303ee70ff83fb835e5c135b62b4cb4fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_llvm_pass_timings.py @@ -0,0 +1,124 @@ +import unittest + +from numba import njit +from numba.tests.support import TestCase, override_config +from numba.misc import llvm_pass_timings as lpt + + +timings_raw1 = """ +===-------------------------------------------------------------------------=== + ... Pass execution timing report ... +===-------------------------------------------------------------------------=== + Total Execution Time: 0.0001 seconds (0.0001 wall clock) + + ---User Time--- --System Time-- --User+System-- ---Wall Time--- --- Name --- + 0.0001 ( 90.1%) 0.0001 ( 90.1%) 0.0001 ( 90.1%) 0.0001 ( 90.1%) A1 + 0.0000 ( 9.9%) 0.0000 ( 9.9%) 0.0000 ( 9.9%) 0.0000 ( 9.9%) A2 + 0.0001 (100.0%) 0.0001 (100.0%) 0.0001 (100.0%) 0.0001 (100.0%) Total + +""" # noqa: E501 + +timings_raw2 = """ +===-------------------------------------------------------------------------=== + ... Pass execution timing report ... +===-------------------------------------------------------------------------=== + Total Execution Time: 0.0001 seconds (0.0001 wall clock) + + ---User Time--- --System Time-- --User+System-- ---Wall Time--- --- Name --- + 0.0001 ( 90.1%) ----- 0.0001 ( 90.1%) 0.0001 ( 90.1%) A1 + 0.0000 ( 9.9%) ----- 0.0000 ( 9.9%) 0.0000 ( 9.9%) A2 + 0.0001 (100.0%) ----- 0.0001 (100.0%) 0.0001 (100.0%) Total + +""" # noqa: E501 + + +class TestLLVMPassTimings(TestCase): + + def test_usage(self): + @njit + def foo(n): + c = 0 + for i in range(n): + c += i + return c + + with override_config('LLVM_PASS_TIMINGS', True): + foo(10) + + md = foo.get_metadata(foo.signatures[0]) + timings = md['llvm_pass_timings'] + # Check: timing is of correct type + self.assertIsInstance(timings, lpt.PassTimingsCollection) + # Check: basic for __str__ + text = str(timings) + self.assertIn("Module passes (full optimization)", text) + # Check: there must be more than one record + self.assertGreater(len(timings), 0) + # Check: __getitem__ + last = timings[-1] + self.assertIsInstance(last, lpt.NamedTimings) + # Check: NamedTimings + self.assertIsInstance(last.name, str) + self.assertIsInstance(last.timings, lpt.ProcessedPassTimings) + + def test_analyze(self): + @njit + def foo(n): + c = 0 + for i in range(n): + for j in range(i): + c += j + return c + + with override_config('LLVM_PASS_TIMINGS', True): + foo(10) + + md = foo.get_metadata(foo.signatures[0]) + timings_collection = md['llvm_pass_timings'] + # Check: get_total_time() + self.assertIsInstance(timings_collection.get_total_time(), float) + # Check: summary() + self.assertIsInstance(timings_collection.summary(), str) + # Check: list_longest_first() ordering + longest_first = timings_collection.list_longest_first() + self.assertEqual(len(longest_first), len(timings_collection)) + last = longest_first[0].timings.get_total_time() + for rec in longest_first[1:]: + cur = rec.timings.get_total_time() + self.assertGreaterEqual(last, cur) + cur = last + + def test_parse_raw(self): + timings1 = lpt.ProcessedPassTimings(timings_raw1) + self.assertAlmostEqual(timings1.get_total_time(), 0.0001) + self.assertIsInstance(timings1.summary(), str) + + timings2 = lpt.ProcessedPassTimings(timings_raw2) + self.assertAlmostEqual(timings2.get_total_time(), 0.0001) + self.assertIsInstance(timings2.summary(), str) + + +class TestLLVMPassTimingsDisabled(TestCase): + def test_disabled_behavior(self): + @njit + def foo(n): + c = 0 + for i in range(n): + c += i + return c + + with override_config('LLVM_PASS_TIMINGS', False): + foo(10) + + md = foo.get_metadata(foo.signatures[0]) + timings = md['llvm_pass_timings'] + # Check that the right message is returned + self.assertEqual(timings.summary(), "No pass timings were recorded") + # Check that None is returned + self.assertIsNone(timings.get_total_time()) + # Check that empty list is returned + self.assertEqual(timings.list_longest_first(), []) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_llvm_version_check.py b/venv/lib/python3.10/site-packages/numba/tests/test_llvm_version_check.py new file mode 100644 index 0000000000000000000000000000000000000000..bea99fed9550a250075811cd8531f6c0b048dd66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_llvm_version_check.py @@ -0,0 +1,42 @@ +import importlib +import sys + +import unittest + + +class TestLlvmVersion(unittest.TestCase): + + def test_llvmlite_version(self): + # test the system it's running on + import llvmlite + import numba + self.assertTrue(numba.__version__) + + llvmlite_version = llvmlite.__version__ + def cleanup(): + llvmlite.__version__ = llvmlite_version + self.addCleanup(cleanup) + + # explicitly test all 3 cases of version string + ver = numba._min_llvmlite_version + version_pass = '%d.%d.%d' % ver + git_version_pass = '%d.%d.%d-10-g92584ed' % ver + rc_version_pass = '%d.%d.%drc1' % (ver[0], ver[1], ver[2] + 1) + version_fail = '%d.%d.0' % (ver[0], ver[1] - 1) + git_version_fail = '%d.%d.9-10-g92584ed' % (ver[0], ver[1] - 1) + + ver_pass = (version_pass, git_version_pass, rc_version_pass) + ver_fail = (version_fail, git_version_fail) + for v in ver_pass: + llvmlite.__version__ = v + importlib.reload(numba) + self.assertTrue(numba.__version__) + + for v in ver_fail: + with self.assertRaises(ImportError): + llvmlite.__version__ = v + importlib.reload(numba) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_locals.py b/venv/lib/python3.10/site-packages/numba/tests/test_locals.py new file mode 100644 index 0000000000000000000000000000000000000000..9546f58c1e6dfea5138996e7debcdb0b5a3828eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_locals.py @@ -0,0 +1,18 @@ +from numba import float32, njit +import unittest + + +def foo(): + x = 123 + return x + + +class TestLocals(unittest.TestCase): + + def test_seed_types(self): + cfunc = njit((), locals={'x': float32})(foo) + self.assertEqual(cfunc.nopython_signatures[0].return_type, float32) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_looplifting.py b/venv/lib/python3.10/site-packages/numba/tests/test_looplifting.py new file mode 100644 index 0000000000000000000000000000000000000000..cd04b74874bfad2b3f252a17a218d74cc7ca5739 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_looplifting.py @@ -0,0 +1,560 @@ +from io import StringIO +import numpy as np + +from numba.core import types +from numba.core.compiler import compile_extra, Flags +from numba.tests.support import TestCase, tag, MemoryLeakMixin +import unittest + + +looplift_flags = Flags() +looplift_flags.force_pyobject = True +looplift_flags.enable_looplift = True + +pyobject_looplift_flags = looplift_flags.copy() +pyobject_looplift_flags.enable_pyobject_looplift = True + + +def compile_isolated(pyfunc, argtypes, **kwargs): + from numba.core.registry import cpu_target + + kwargs.setdefault('return_type', None) + kwargs.setdefault('locals', {}) + return compile_extra( + cpu_target.typing_context, + cpu_target.target_context, + pyfunc, + argtypes, + **kwargs, + ) + + +def lift1(x): + # Outer needs object mode because of np.empty() + a = np.empty(3) + for i in range(a.size): + # Inner is nopython-compliant + a[i] = x + return a + + +def lift2(x): + # Outer needs object mode because of np.empty() + a = np.empty((3, 4)) + for i in range(a.shape[0]): + for j in range(a.shape[1]): + # Inner is nopython-compliant + a[i, j] = x + return a + + +def lift3(x): + # Output variable from the loop + _ = object() + a = np.arange(5, dtype=np.int64) + c = 0 + for i in range(a.shape[0]): + c += a[i] * x + return c + +def lift4(x): + # Output two variables from the loop + _ = object() + a = np.arange(5, dtype=np.int64) + c = 0 + d = 0 + for i in range(a.shape[0]): + c += a[i] * x + d += c + return c + d + +def lift5(x): + _ = object() + a = np.arange(4) + for i in range(a.shape[0]): + # Inner has a break statement + if i > 2: + break + return a + +def lift_gen1(x): + # Outer needs object mode because of np.empty() + a = np.empty(3) + yield 0 + for i in range(a.size): + # Inner is nopython-compliant + a[i] = x + yield np.sum(a) + +def lift_issue2561(): + np.empty(1) # This forces objectmode because no nrt + for i in range(10): + for j in range(10): + return 1 + return 2 + +def reject1(x): + a = np.arange(4) + for i in range(a.shape[0]): + # Inner returns a variable from outer "scope" => cannot loop-lift + return a + return a + + +def reject_gen1(x): + _ = object() + a = np.arange(4) + for i in range(a.shape[0]): + # Inner is a generator => cannot loop-lift + yield a[i] + +def reject_gen2(x): + _ = object() + a = np.arange(3) + for i in range(a.size): + # Middle has a yield => cannot loop-lift + res = a[i] + x + for j in range(i): + # Inner is nopython-compliant, but the current algorithm isn't + # able to separate it. + res = res ** 2 + yield res + +def reject_npm1(x): + a = np.empty(3, dtype=np.int32) + for i in range(a.size): + # Inner uses object() => cannot loop-lift + _ = object() + a[i] = np.arange(i + 1)[i] + + return a + + +class TestLoopLifting(MemoryLeakMixin, TestCase): + + def try_lift(self, pyfunc, argtypes): + from numba.core.registry import cpu_target + + cres = compile_extra( + cpu_target.typing_context, + cpu_target.target_context, + pyfunc, argtypes, + return_type=None, flags=looplift_flags, locals={}, + ) + # One lifted loop + self.assertEqual(len(cres.lifted), 1) + return cres + + def assert_lifted_native(self, cres): + # Check if we have lifted in nopython mode + jitloop = cres.lifted[0] + [loopcres] = jitloop.overloads.values() + self.assertTrue(loopcres.fndesc.native) # Lifted function is native + + def check_lift_ok(self, pyfunc, argtypes, args): + """ + Check that pyfunc can loop-lift even in nopython mode. + """ + cres = self.try_lift(pyfunc, argtypes) + expected = pyfunc(*args) + got = cres.entry_point(*args) + self.assert_lifted_native(cres) + # Check return values + self.assertPreciseEqual(expected, got) + + def check_lift_generator_ok(self, pyfunc, argtypes, args): + """ + Check that pyfunc (a generator function) can loop-lift even in + nopython mode. + """ + cres = self.try_lift(pyfunc, argtypes) + expected = list(pyfunc(*args)) + got = list(cres.entry_point(*args)) + self.assert_lifted_native(cres) + # Check return values + self.assertPreciseEqual(expected, got) + + def check_no_lift(self, pyfunc, argtypes, args): + """ + Check that pyfunc can't loop-lift. + """ + cres = compile_isolated(pyfunc, argtypes, + flags=looplift_flags) + self.assertFalse(cres.lifted) + expected = pyfunc(*args) + got = cres.entry_point(*args) + # Check return values + self.assertPreciseEqual(expected, got) + + def check_no_lift_generator(self, pyfunc, argtypes, args): + """ + Check that pyfunc (a generator function) can't loop-lift. + """ + cres = compile_isolated(pyfunc, argtypes, + flags=looplift_flags) + self.assertFalse(cres.lifted) + expected = list(pyfunc(*args)) + got = list(cres.entry_point(*args)) + self.assertPreciseEqual(expected, got) + + def test_lift1(self): + self.check_lift_ok(lift1, (types.intp,), (123,)) + + def test_lift2(self): + self.check_lift_ok(lift2, (types.intp,), (123,)) + + def test_lift3(self): + self.check_lift_ok(lift3, (types.intp,), (123,)) + + def test_lift4(self): + self.check_lift_ok(lift4, (types.intp,), (123,)) + + def test_lift5(self): + self.check_lift_ok(lift5, (types.intp,), (123,)) + + def test_lift_issue2561(self): + self.check_lift_ok(lift_issue2561, (), ()) + + def test_lift_gen1(self): + self.check_lift_generator_ok(lift_gen1, (types.intp,), (123,)) + + def test_reject1(self): + self.check_no_lift(reject1, (types.intp,), (123,)) + + def test_reject_gen1(self): + self.check_no_lift_generator(reject_gen1, (types.intp,), (123,)) + + def test_reject_gen2(self): + self.check_no_lift_generator(reject_gen2, (types.intp,), (123,)) + + +class TestLoopLiftingAnnotate(TestCase): + def test_annotate_1(self): + """ + Verify that annotation works as expected with one lifted loop + """ + from numba import jit + + # dummy function to force objmode + def bar(): + pass + + def foo(x): + bar() # force obj + for i in range(x.size): + x[i] += 1 + + return x + + cfoo = jit(forceobj=True)(foo) + + x = np.arange(10) + xcopy = x.copy() + r = cfoo(x) + np.testing.assert_equal(r, xcopy + 1) + + buf = StringIO() + cfoo.inspect_types(file=buf) + annotation = buf.getvalue() + buf.close() + + self.assertIn("The function contains lifted loops", annotation) + line = foo.__code__.co_firstlineno + 2 # 2 lines down from func head + self.assertIn("Loop at line {line}".format(line=line), annotation) + self.assertIn("Has 1 overloads", annotation) + + def test_annotate_2(self): + """ + Verify that annotation works as expected with two lifted loops + """ + from numba import jit + + # dummy function to force objmode + def bar(): + pass + + def foo(x): + bar() # force obj + # first lifted loop + for i in range(x.size): + x[i] += 1 + # second lifted loop + for j in range(x.size): + x[j] *= 2 + return x + + cfoo = jit(forceobj=True)(foo) + + x = np.arange(10) + xcopy = x.copy() + r = cfoo(x) + np.testing.assert_equal(r, (xcopy + 1) * 2) + + buf = StringIO() + cfoo.inspect_types(file=buf) + annotation = buf.getvalue() + buf.close() + + self.assertIn("The function contains lifted loops", annotation) + line1 = foo.__code__.co_firstlineno + 3 # 3 lines down from func head + line2 = foo.__code__.co_firstlineno + 6 # 6 lines down from func head + self.assertIn("Loop at line {line}".format(line=line1), annotation) + self.assertIn("Loop at line {line}".format(line=line2), annotation) + + +class TestLoopLiftingInAction(MemoryLeakMixin, TestCase): + def assert_has_lifted(self, jitted, loopcount): + lifted = jitted.overloads[jitted.signatures[0]].lifted + self.assertEqual(len(lifted), loopcount) + + def test_issue_734(self): + from numba import jit, void, int32, double + + @jit(void(int32, double[:]), forceobj=True) + def forloop_with_if(u, a): + if u == 0: + for i in range(a.shape[0]): + a[i] = a[i] * 2.0 + else: + for i in range(a.shape[0]): + a[i] = a[i] + 1.0 + + for u in (0, 1): + nb_a = np.arange(10, dtype='int32') + np_a = np.arange(10, dtype='int32') + forloop_with_if(u, nb_a) + forloop_with_if.py_func(u, np_a) + self.assertPreciseEqual(nb_a, np_a) + + def test_issue_812(self): + from numba import jit + + @jit('f8[:](f8[:])', forceobj=True) + def test(x): + res = np.zeros(len(x)) + ind = 0 + for ii in range(len(x)): + ind += 1 + res[ind] = x[ind] + if x[ind] >= 10: + break + + # Invalid loopjitting will miss the usage of `ind` in the + # following loop. + for ii in range(ind + 1, len(x)): + res[ii] = 0 + return res + + x = np.array([1., 4, 2, -3, 5, 2, 10, 5, 2, 6]) + np.testing.assert_equal(test.py_func(x), test(x)) + + def test_issue_2368(self): + from numba import jit + + def lift_issue2368(a, b): + s = 0 + for e in a: + s += e + h = b.__hash__() + return s, h + + a = np.ones(10) + b = object() + jitted = jit(forceobj=True)(lift_issue2368) + + expected = lift_issue2368(a, b) + got = jitted(a, b) + + self.assertEqual(expected[0], got[0]) + self.assertEqual(expected[1], got[1]) + + jitloop = jitted.overloads[jitted.signatures[0]].lifted[0] + [loopcres] = jitloop.overloads.values() + # assert lifted function is native + self.assertTrue(loopcres.fndesc.native) + + def test_no_iteration_w_redef(self): + # redefinition of res in the loop with no use of res should not + # prevent lifting + from numba import jit + + @jit(forceobj=True) + def test(n): + res = 0 + for i in range(n): + res = i + return res + + # loop count = 1, loop lift but loop body not execute + self.assertEqual(test.py_func(-1), test(-1)) + self.assert_has_lifted(test, loopcount=1) + # loop count = 1, loop will lift and will execute + self.assertEqual(test.py_func(1), test(1)) + self.assert_has_lifted(test, loopcount=1) + + def test_no_iteration(self): + from numba import jit + + @jit(forceobj=True) + def test(n): + res = 0 + for i in range(n): + res += i + return res + + # loop count = 1 + self.assertEqual(test.py_func(-1), test(-1)) + self.assert_has_lifted(test, loopcount=1) + # loop count = 1 + self.assertEqual(test.py_func(1), test(1)) + self.assert_has_lifted(test, loopcount=1) + + def test_define_in_loop_body(self): + # tests a definition in a loop that leaves the loop is liftable + from numba import jit + + @jit(forceobj=True) + def test(n): + for i in range(n): + res = i + return res + + # loop count = 1 + self.assertEqual(test.py_func(1), test(1)) + self.assert_has_lifted(test, loopcount=1) + + def test_invalid_argument(self): + """Test a problem caused by invalid discovery of loop argument + when a variable is used afterwards but not before. + + Before the fix, this will result in:: + + numba.ir.NotDefinedError: 'i' is not defined + """ + from numba import jit + + @jit(forceobj=True) + def test(arg): + if type(arg) == np.ndarray: # force object mode + if arg.ndim == 1: + result = 0.0 + j = 0 + for i in range(arg.shape[0]): + pass + else: + raise Exception + else: + result = 0.0 + i, j = 0, 0 + return result + + arg = np.arange(10) + self.assertEqual(test.py_func(arg), test(arg)) + + def test_conditionally_defined_in_loop(self): + from numba import jit + @jit(forceobj=True) + def test(): + x = 5 + y = 0 + for i in range(2): + if i > 0: + x = 6 + y += x + return y, x + + self.assertEqual(test.py_func(), test()) + self.assert_has_lifted(test, loopcount=1) + + def test_stack_offset_error_when_has_no_return(self): + from numba import jit + import warnings + + def pyfunc(a): + if a: + for i in range(10): + pass + + with warnings.catch_warnings(): + warnings.simplefilter("error") + + cfunc = jit(forceobj=True)(pyfunc) + self.assertEqual(pyfunc(True), cfunc(True)) + + def test_variable_scope_bug(self): + """ + https://github.com/numba/numba/issues/2179 + + Looplifting transformation is using the wrong version of variable `h`. + """ + from numba import jit + + def bar(x): + return x + + def foo(x): + h = 0. + for k in range(x): + h = h + k + h = h - bar(x) + return h + + cfoo = jit(forceobj=True)(foo) + self.assertEqual(foo(10), cfoo(10)) + + def test_recompilation_loop(self): + """ + https://github.com/numba/numba/issues/2481 + """ + from numba import jit + + def foo(x, y): + # slicing to make array `x` into different layout + # to cause a new compilation of the lifted loop + A = x[::y] + c = 1 + for k in range(A.size): + object() # to force objectmode and looplifting + c = c * A[::-1][k] # the slice that is failing in static_getitem + return c + + cfoo = jit(forceobj=True)(foo) + # First run just works + args = np.arange(10), 1 + self.assertEqual(foo(*args), cfoo(*args)) + # Exactly 1 lifted loop so far + self.assertEqual(len(cfoo.overloads[cfoo.signatures[0]].lifted), 1) + lifted = cfoo.overloads[cfoo.signatures[0]].lifted[0] + # The lifted loop has 1 signature + self.assertEqual(len(lifted.signatures), 1) + # Use different argument to trigger a new compilation of the lifted loop + args = np.arange(10), -1 + self.assertEqual(foo(*args), cfoo(*args)) + # Ensure that is really a new overload for the lifted loop + self.assertEqual(len(lifted.signatures), 2) + + + def test_lift_objectmode_issue_4223(self): + from numba import jit + + @jit(forceobj=True) + def foo(a, b, c, d, x0, y0, n): + xs, ys = np.zeros(n), np.zeros(n) + xs[0], ys[0] = x0, y0 + for i in np.arange(n-1): + xs[i+1] = np.sin(a * ys[i]) + c * np.cos(a * xs[i]) + ys[i+1] = np.sin(b * xs[i]) + d * np.cos(b * ys[i]) + object() # ensure object mode + return xs, ys + + kwargs = dict(a=1.7, b=1.7, c=0.6, d=1.2, x0=0, y0=0, n=200) + got = foo(**kwargs) + expected = foo.py_func(**kwargs) + self.assertPreciseEqual(got[0], expected[0]) + self .assertPreciseEqual(got[1], expected[1]) + [lifted] = foo.overloads[foo.signatures[0]].lifted + self.assertEqual(len(lifted.nopython_signatures), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_make_function_to_jit_function.py b/venv/lib/python3.10/site-packages/numba/tests/test_make_function_to_jit_function.py new file mode 100644 index 0000000000000000000000000000000000000000..29161fff594545fffceee89dbe9390cb349f1dc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_make_function_to_jit_function.py @@ -0,0 +1,284 @@ +from numba import njit +from numba.core import errors +from numba.core.extending import overload +import numpy as np + +import unittest + + +@njit +def consumer(func, *args): + return func(*args) + + +@njit +def consumer2arg(func1, func2): + return func2(func1) + + +_global = 123 + + +class TestMakeFunctionToJITFunction(unittest.TestCase): + """ + This tests the pass that converts ir.Expr.op == make_function (i.e. closure) + into a JIT function. + """ + # NOTE: testing this is a bit tricky. The function receiving a JIT'd closure + # must also be under JIT control so as to handle the JIT'd closure + # correctly, however, in the case of running the test implementations in the + # interpreter, the receiving function cannot be JIT'd else it will receive + # the Python closure and then complain about pyobjects as arguments. + # The way around this is to use a factory function to close over either the + # jitted or standard python function as the consumer depending on context. + + def test_escape(self): + + def impl_factory(consumer_func): + def impl(): + def inner(): + return 10 + return consumer_func(inner) + return impl + + cfunc = njit(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + self.assertEqual(impl(), cfunc()) + + def test_nested_escape(self): + + def impl_factory(consumer_func): + def impl(): + def inner(): + return 10 + + def innerinner(x): + return x() + return consumer_func(inner, innerinner) + return impl + + cfunc = njit(impl_factory(consumer2arg)) + impl = impl_factory(consumer2arg.py_func) + + self.assertEqual(impl(), cfunc()) + + def test_closure_in_escaper(self): + + def impl_factory(consumer_func): + def impl(): + def callinner(): + def inner(): + return 10 + return inner() + return consumer_func(callinner) + return impl + + cfunc = njit(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + self.assertEqual(impl(), cfunc()) + + def test_close_over_consts(self): + + def impl_factory(consumer_func): + def impl(): + y = 10 + + def callinner(z): + return y + z + _global + return consumer_func(callinner, 6) + return impl + + cfunc = njit(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + self.assertEqual(impl(), cfunc()) + + def test_close_over_consts_w_args(self): + + def impl_factory(consumer_func): + def impl(x): + y = 10 + + def callinner(z): + return y + z + _global + return consumer_func(callinner, x) + return impl + + cfunc = njit(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + a = 5 + self.assertEqual(impl(a), cfunc(a)) + + def test_with_overload(self): + + def foo(func, *args): + nargs = len(args) + if nargs == 1: + return func(*args) + elif nargs == 2: + return func(func(*args)) + + @overload(foo) + def foo_ol(func, *args): + # specialise on the number of args, as per `foo` + nargs = len(args) + if nargs == 1: + def impl(func, *args): + return func(*args) + return impl + elif nargs == 2: + def impl(func, *args): + return func(func(*args)) + return impl + + def impl_factory(consumer_func): + def impl(x): + y = 10 + + def callinner(*z): + return y + np.sum(np.asarray(z)) + _global + # run both specialisations, 1 arg, and 2 arg. + return foo(callinner, x), foo(callinner, x, x) + return impl + + cfunc = njit(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + a = 5 + self.assertEqual(impl(a), cfunc(a)) + + def test_basic_apply_like_case(self): + def apply(array, func): + return func(array) + + @overload(apply) + def ov_apply(array, func): + return lambda array, func: func(array) + + def impl(array): + def mul10(x): + return x * 10 + return apply(array, mul10) + + cfunc = njit(impl) + + a = np.arange(10) + np.testing.assert_allclose(impl(a), cfunc(a)) + + @unittest.skip("Needs option/flag inheritance to work") + def test_jit_option_inheritance(self): + + def impl_factory(consumer_func): + def impl(x): + def inner(val): + return 1 / val + return consumer_func(inner, x) + return impl + + cfunc = njit(error_model='numpy')(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + a = 0 + self.assertEqual(impl(a), cfunc(a)) + + # this needs true SSA to be able to work correctly, check error for now + def test_multiply_defined_freevar(self): + + @njit + def impl(c): + if c: + x = 3 + + def inner(y): + return y + x + + r = consumer(inner, 1) + else: + x = 6 + + def inner(y): + return y + x + + r = consumer(inner, 2) + return r + + with self.assertRaises(errors.TypingError) as e: + impl(1) + + self.assertIn("Cannot capture a constant value for variable", + str(e.exception)) + + def test_non_const_in_escapee(self): + + @njit + def impl(x): + z = np.arange(x) + + def inner(val): + return 1 + z + val # z is non-const freevar + return consumer(inner, x) + + with self.assertRaises(errors.TypingError) as e: + impl(1) + + self.assertIn("Cannot capture the non-constant value associated", + str(e.exception)) + + def test_escape_with_kwargs(self): + + def impl_factory(consumer_func): + def impl(): + t = 12 + + def inner(a, b, c, mydefault1=123, mydefault2=456): + z = 4 + return mydefault1 + mydefault2 + z + t + a + b + c + # this is awkward, top and tail closure inlining with a escapees + # in the middle that do/don't have defaults. + return (inner(1, 2, 5, 91, 53), + consumer_func(inner, 1, 2, 3, 73), + consumer_func(inner, 1, 2, 3,), + inner(1, 2, 4)) + return impl + + cfunc = njit(impl_factory(consumer)) + impl = impl_factory(consumer.py_func) + + np.testing.assert_allclose(impl(), cfunc()) + + def test_escape_with_kwargs_override_kwargs(self): + + @njit + def specialised_consumer(func, *args): + x, y, z = args # unpack to avoid `CALL_FUNCTION_EX` + a = func(x, y, z, mydefault1=1000) + b = func(x, y, z, mydefault2=1000) + c = func(x, y, z, mydefault1=1000, mydefault2=1000) + return a + b + c + + def impl_factory(consumer_func): + def impl(): + t = 12 + + def inner(a, b, c, mydefault1=123, mydefault2=456): + z = 4 + return mydefault1 + mydefault2 + z + t + a + b + c + # this is awkward, top and tail closure inlining with a escapees + # in the middle that get defaults specified in the consumer + return (inner(1, 2, 5, 91, 53), + consumer_func(inner, 1, 2, 11), + consumer_func(inner, 1, 2, 3,), + inner(1, 2, 4)) + return impl + + cfunc = njit(impl_factory(specialised_consumer)) + impl = impl_factory(specialised_consumer.py_func) + + np.testing.assert_allclose(impl(), cfunc()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_mandelbrot.py b/venv/lib/python3.10/site-packages/numba/tests/test_mandelbrot.py new file mode 100644 index 0000000000000000000000000000000000000000..7a9fc2dd1411e6f52c77654e9dd51e87a612b0a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_mandelbrot.py @@ -0,0 +1,28 @@ +import unittest +from numba import njit +from numba.core import types + + +def is_in_mandelbrot(c): + i = 0 + z = 0.0j + for i in range(100): + z = z ** 2 + c + if (z.real * z.real + z.imag * z.imag) >= 4: + return False + return True + + +class TestMandelbrot(unittest.TestCase): + + def test_mandelbrot(self): + pyfunc = is_in_mandelbrot + cfunc = njit((types.complex64,))(pyfunc) + + points = [0+0j, 1+0j, 0+1j, 1+1j, 0.1+0.1j] + for p in points: + self.assertEqual(cfunc(p), pyfunc(p)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_mangling.py b/venv/lib/python3.10/site-packages/numba/tests/test_mangling.py new file mode 100644 index 0000000000000000000000000000000000000000..ad1ff95c8f4115cb53fa7cfc111d9dead2bb2bc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_mangling.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +""" +Test function name mangling. +The mangling affects the ABI of numba compiled binaries. +""" +from numba.core import types +from numba.core.funcdesc import default_mangler +from numba.tests.support import unittest, TestCase + + +class TestMangling(TestCase): + def test_one_args(self): + fname = 'foo' + argtypes = types.int32, + name = default_mangler(fname, argtypes) + self.assertEqual(name, '_Z3fooi') + + def test_two_args(self): + fname = 'foo' + argtypes = types.int32, types.float32 + name = default_mangler(fname, argtypes) + self.assertEqual(name, '_Z3fooif') + + def test_unicode_fname(self): + fname = u'foಠ' + argtypes = types.int32, types.float32 + name = default_mangler(fname, argtypes) + self.assertIsInstance(name, str) + # manually encode it + unichar = fname[2] + enc = ''.join('_{:02x}'.format(c) + for c in unichar.encode('utf8')) + text = 'fo' + enc + expect = '_Z{}{}if'.format(len(text), text) + self.assertEqual(name, expect) + # ensure result chars are in the right charset + self.assertRegex(name, r'^_Z[a-zA-Z0-9_\$]+$') + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_map_filter_reduce.py b/venv/lib/python3.10/site-packages/numba/tests/test_map_filter_reduce.py new file mode 100644 index 0000000000000000000000000000000000000000..c6f46a156f04f804030e2c391fc022b9b1d8aefb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_map_filter_reduce.py @@ -0,0 +1,91 @@ +from numba import njit + +from functools import reduce +import unittest + + +class TestMap(unittest.TestCase): + + def test_basic_map_external_func(self): + func = njit(lambda x: x + 10) + + def impl(): + return [y for y in map(func, range(10))] + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + def test_basic_map_closure(self): + def impl(): + return [y for y in map(lambda x: x + 10, range(10))] + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + def test_basic_map_closure_multiple_iterator(self): + def impl(): + args = range(10), range(10, 20) + return [y for y in map(lambda a, b: (a + 10, b + 5), *args)] + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + +class TestFilter(unittest.TestCase): + + def test_basic_filter_external_func(self): + func = njit(lambda x: x > 0) + + def impl(): + return [y for y in filter(func, range(-10, 10))] + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + def test_basic_filter_closure(self): + def impl(): + return [y for y in filter(lambda x: x > 0, range(-10, 10))] + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + def test_basic_filter_none_func(self): + def impl(): + return [y for y in filter(None, range(-10, 10))] + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + +class TestReduce(unittest.TestCase): + + def test_basic_reduce_external_func(self): + func = njit(lambda x, y: x + y) + + def impl(): + return reduce(func, range(-10, 10)) + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + def test_basic_reduce_closure(self): + + def impl(): + def func(x, y): + return x + y + return reduce(func, range(-10, 10), 100) + + cfunc = njit(impl) + + self.assertEqual(impl(), cfunc()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_mathlib.py b/venv/lib/python3.10/site-packages/numba/tests/test_mathlib.py new file mode 100644 index 0000000000000000000000000000000000000000..82863574ef40d4ef75decb7dfe3e4a814e29e568 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_mathlib.py @@ -0,0 +1,559 @@ +import itertools +import math +import sys +import unittest +import warnings + +import numpy as np + +from numba import njit, types +from numba.tests.support import TestCase +from numba.np import numpy_support + + +def sin(x): + return math.sin(x) + + +def cos(x): + return math.cos(x) + + +def tan(x): + return math.tan(x) + + +def sinh(x): + return math.sinh(x) + + +def cosh(x): + return math.cosh(x) + + +def tanh(x): + return math.tanh(x) + + +def asin(x): + return math.asin(x) + + +def acos(x): + return math.acos(x) + + +def atan(x): + return math.atan(x) + + +def atan2(y, x): + return math.atan2(y, x) + + +def asinh(x): + return math.asinh(x) + + +def acosh(x): + return math.acosh(x) + + +def atanh(x): + return math.atanh(x) + + +def sqrt(x): + return math.sqrt(x) + + +def npy_sqrt(x): + return np.sqrt(x) + + +def exp(x): + return math.exp(x) + + +def expm1(x): + return math.expm1(x) + + +def log(x): + return math.log(x) + + +def log1p(x): + return math.log1p(x) + + +def log10(x): + return math.log10(x) + + +def log2(x): + return math.log2(x) + + +def floor(x): + return math.floor(x) + + +def ceil(x): + return math.ceil(x) + + +def trunc(x): + return math.trunc(x) + + +def isnan(x): + return math.isnan(x) + + +def isinf(x): + return math.isinf(x) + + +def isfinite(x): + return math.isfinite(x) + + +def hypot(x, y): + return math.hypot(x, y) + + +def nextafter(x, y): + return math.nextafter(x, y) + + +def degrees(x): + return math.degrees(x) + + +def radians(x): + return math.radians(x) + + +def erf(x): + return math.erf(x) + + +def erfc(x): + return math.erfc(x) + + +def gamma(x): + return math.gamma(x) + + +def lgamma(x): + return math.lgamma(x) + + +def pow(x, y): + return math.pow(x, y) + +def gcd(x, y): + return math.gcd(x, y) + +def copysign(x, y): + return math.copysign(x, y) + + +def frexp(x): + return math.frexp(x) + + +def ldexp(x, e): + return math.ldexp(x, e) + + +def get_constants(): + return math.pi, math.e + + +class TestMathLib(TestCase): + + def test_constants(self): + cfunc = njit(get_constants) + self.assertPreciseEqual(cfunc(), cfunc.py_func()) + + def run_unary(self, pyfunc, x_types, x_values, prec='exact', **kwargs): + cfunc = njit(pyfunc) + for tx, vx in zip(x_types, x_values): + got = cfunc(vx) + expected = pyfunc(vx) + actual_prec = 'single' if tx is types.float32 else prec + msg = 'for input %r' % (vx,) + self.assertPreciseEqual(got, expected, prec=actual_prec, msg=msg, + **kwargs) + + def run_binary(self, pyfunc, x_types, x_values, y_values, prec='exact'): + cfunc = njit(pyfunc) + for ty, x, y in zip(x_types, x_values, y_values): + got = cfunc(x, y) + expected = pyfunc(x, y) + actual_prec = 'single' if ty is types.float32 else prec + msg = 'for inputs (%r, %r)' % (x, y) + self.assertPreciseEqual(got, expected, prec=actual_prec, msg=msg) + + def check_predicate_func(self, pyfunc): + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float32, types.float32, + types.float64, types.float64, types.float64] + x_values = [0, 0, 0, 0, 0, 0, + float('inf'), 0.0, float('nan'), + float('inf'), 0.0, float('nan')] + self.run_unary(pyfunc, x_types, x_values) + + def test_sin(self): + pyfunc = sin + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + @unittest.skipIf(sys.platform == 'win32', + "not exactly equal on win32 (issue #597)") + def test_cos(self): + pyfunc = cos + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + def test_tan(self): + pyfunc = tan + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + def test_sqrt(self): + pyfunc = sqrt + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [2, 1, 2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + def test_npy_sqrt(self): + pyfunc = npy_sqrt + x_values = [2, 1, 2, 2, 1, 2, .1, .2] + # XXX poor precision for int16 inputs + x_types = [types.int16, types.uint16] + self.run_unary(pyfunc, x_types, x_values, prec='single') + x_types = [types.int32, types.int64, + types.uint32, types.uint64, + types.float32, types.float64] + self.run_unary(pyfunc, x_types, x_values) + + def test_exp(self): + pyfunc = exp + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + def test_expm1(self): + pyfunc = expm1 + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + def test_log(self): + pyfunc = log + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1] + self.run_unary(pyfunc, x_types, x_values) + + def test_log1p(self): + pyfunc = log1p + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1] + self.run_unary(pyfunc, x_types, x_values) + + def test_log10(self): + pyfunc = log10 + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1] + self.run_unary(pyfunc, x_types, x_values) + + def test_log2(self): + pyfunc = log2 + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 10, 100, 1000, 100000, 1000000, 0.1, 1.1] + self.run_unary(pyfunc, x_types, x_values) + + def test_asin(self): + pyfunc = asin + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_acos(self): + pyfunc = acos + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_atan(self): + pyfunc = atan + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + self.run_unary(pyfunc, x_types, x_values) + + def test_atan2(self): + pyfunc = atan2 + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + y_values = [x * 2 for x in x_values] + self.run_binary(pyfunc, x_types, x_values, y_values) + + def test_asinh(self): + pyfunc = asinh + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values, prec='double') + + def test_acosh(self): + pyfunc = acosh + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_atanh(self): + pyfunc = atanh + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [0, 0, 0, 0, 0, 0, 0.1, 0.1] + self.run_unary(pyfunc, x_types, x_values, prec='double') + + def test_sinh(self): + pyfunc = sinh + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_cosh(self): + pyfunc = cosh + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_tanh(self): + pyfunc = tanh + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [0, 0, 0, 0, 0, 0, 0.1, 0.1] + self.run_unary(pyfunc, x_types, x_values) + + def test_floor(self): + pyfunc = floor + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [0, 0, 0, 0, 0, 0, 0.1, 1.9] + self.run_unary(pyfunc, x_types, x_values) + + def test_ceil(self): + pyfunc = ceil + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [0, 0, 0, 0, 0, 0, 0.1, 1.9] + self.run_unary(pyfunc, x_types, x_values) + + def test_trunc(self): + pyfunc = trunc + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [0, 0, 0, 0, 0, 0, 0.1, 1.9] + self.run_unary(pyfunc, x_types, x_values) + + def test_isnan(self): + self.check_predicate_func(isnan) + + def test_isinf(self): + self.check_predicate_func(isinf) + + def test_isfinite(self): + self.check_predicate_func(isfinite) + + def test_hypot(self): + pyfunc = hypot + x_types = [types.int64, types.uint64, + types.float32, types.float64] + x_values = [1, 2, 3, 4, 5, 6, .21, .34] + y_values = [x + 2 for x in x_values] + # Issue #563: precision issues with math.hypot() under Windows. + prec = 'single' + self.run_binary(pyfunc, x_types, x_values, y_values, prec) + # Check that values that overflow in naive implementations do not + # in the numba impl + + def naive_hypot(x, y): + return math.sqrt(x * x + y * y) + + cfunc = njit(pyfunc) + for fltty in (types.float32, types.float64): + dt = numpy_support.as_dtype(fltty).type + val = dt(np.finfo(dt).max / 30.) + nb_ans = cfunc(val, val) + self.assertPreciseEqual(nb_ans, pyfunc(val, val), prec='single') + self.assertTrue(np.isfinite(nb_ans)) + + with warnings.catch_warnings(): + warnings.simplefilter("error", RuntimeWarning) + self.assertRaisesRegex(RuntimeWarning, + 'overflow encountered in .*scalar', + naive_hypot, val, val) + + def test_nextafter(self): + pyfunc = nextafter + x_types = [types.float32, types.float64, + types.int32, types.int64, + types.uint32, types.uint64] + x_values = [0.0, .21, .34, 1005382.042, -25.328] + y1_values = [x + 2 for x in x_values] + y2_values = [x - 2 for x in x_values] + + self.run_binary(pyfunc, x_types, x_values, y1_values) + self.run_binary(pyfunc, x_types, x_values, y2_values) + + # Test using pos/neg inf + self.run_binary(pyfunc, x_types, [0.0, -.5, .5], [math.inf]*3) + self.run_binary(pyfunc, x_types, [0.0, -.5, .5], [-math.inf]*3) + + # if both args to nextafter are equal, then it is returned unchanged. + self.run_binary(pyfunc, x_types, x_values, x_values) + + def test_degrees(self): + pyfunc = degrees + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_radians(self): + pyfunc = radians + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [1, 1, 1, 1, 1, 1, 1., 1.] + self.run_unary(pyfunc, x_types, x_values) + + def test_erf(self): + pyfunc = erf + x_values = [1., 1., -1., -0.0, 0.0, 0.5, 5, float('inf')] + x_types = [types.float32, types.float64] * (len(x_values) // 2) + self.run_unary(pyfunc, x_types, x_values, prec='double', ulps=2) + + def test_erfc(self): + pyfunc = erfc + x_values = [1., 1., -1., -0.0, 0.0, 0.5, 5, float('inf')] + x_types = [types.float32, types.float64] * (len(x_values) // 2) + self.run_unary(pyfunc, x_types, x_values, prec='double', ulps=4) + + def test_gamma(self): + pyfunc = gamma + x_values = [1., -0.9, -0.5, 0.5] + x_types = [types.float32, types.float64] * (len(x_values) // 2) + self.run_unary(pyfunc, x_types, x_values, prec='double', ulps=3) + x_values = [-0.1, 0.1, 2.5, 10.1, 50., float('inf')] + x_types = [types.float64] * len(x_values) + self.run_unary(pyfunc, x_types, x_values, prec='double', ulps=8) + + def test_lgamma(self): + pyfunc = lgamma + x_values = [1., -0.9, -0.1, 0.1, 200., 1e10, 1e30, float('inf')] + x_types = [types.float32, types.float64] * (len(x_values) // 2) + self.run_unary(pyfunc, x_types, x_values, prec='double') + + def test_pow(self): + pyfunc = pow + x_types = [types.int16, types.int32, types.int64, + types.uint16, types.uint32, types.uint64, + types.float32, types.float64] + x_values = [-2, -1, -2, 2, 1, 2, .1, .2] + y_values = [x * 2 for x in x_values] + self.run_binary(pyfunc, x_types, x_values, y_values) + + def test_gcd(self): + from itertools import product, repeat, chain + pyfunc = gcd + signed_args = product( + sorted(types.signed_domain), *repeat((-2, -1, 0, 1, 2, 7, 10), 2) + ) + unsigned_args = product( + sorted(types.unsigned_domain), *repeat((0, 1, 2, 7, 9, 16), 2) + ) + x_types, x_values, y_values = zip(*chain(signed_args, unsigned_args)) + self.run_binary(pyfunc, x_types, x_values, y_values) + + def test_copysign(self): + pyfunc = copysign + value_types = [types.float32, types.float64] + values = [-2, -1, -0.0, 0.0, 1, 2, float('-inf'), float('inf'), + float('nan')] + x_types, x_values, y_values = list(zip( + *itertools.product(value_types, values, values))) + self.run_binary(pyfunc, x_types, x_values, y_values) + + def test_frexp(self): + pyfunc = frexp + x_types = [types.float32, types.float64] + x_values = [-2.5, -0.0, 0.0, 3.5, + float('-inf'), float('inf'), float('nan')] + self.run_unary(pyfunc, x_types, x_values, prec='exact') + + def test_ldexp(self): + pyfunc = ldexp + cfunc = njit(pyfunc) + for fltty in (types.float32, types.float64): + for args in [(2.5, -2), (2.5, 1), (0.0, 0), (0.0, 1), + (-0.0, 0), (-0.0, 1), + (float('inf'), 0), (float('-inf'), 0), + (float('nan'), 0)]: + msg = 'for input %r' % (args,) + self.assertPreciseEqual(cfunc(*args), pyfunc(*args)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_maxmin.py b/venv/lib/python3.10/site-packages/numba/tests/test_maxmin.py new file mode 100644 index 0000000000000000000000000000000000000000..ae62681328e0de5a6e69c9ea38ad79bea7de299d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_maxmin.py @@ -0,0 +1,39 @@ +from numba import njit +from numba.core import types +import unittest + + +def domax3(a, b, c): + return max(a, b, c) + + +def domin3(a, b, c): + return min(a, b, c) + + +class TestMaxMin(unittest.TestCase): + def test_max3(self): + pyfunc = domax3 + argtys = (types.int32, types.float32, types.double) + cfunc = njit(argtys)(pyfunc) + + a = 1 + b = 2 + c = 3 + + self.assertEqual(pyfunc(a, b, c), cfunc(a, b, c)) + + def test_min3(self): + pyfunc = domin3 + argtys = (types.int32, types.float32, types.double) + cfunc = njit(argtys)(pyfunc) + + a = 1 + b = 2 + c = 3 + + self.assertEqual(pyfunc(a, b, c), cfunc(a, b, c)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_misc_coverage_support.py b/venv/lib/python3.10/site-packages/numba/tests/test_misc_coverage_support.py new file mode 100644 index 0000000000000000000000000000000000000000..7dee5cdd5054dddbefffee8775fda191e46ab01b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_misc_coverage_support.py @@ -0,0 +1,75 @@ +import unittest +from unittest.mock import patch + +from numba.tests.support import TestCase + +from numba import njit +from numba.core import ir +from numba.misc.coverage_support import NotifyLocBase, _the_registry + + +class TestMiscCoverageSupport(TestCase): + @TestCase.run_test_in_subprocess(envvars={"NUMBA_JIT_COVERAGE": "1"}) + def test_custom_loc_notifier(self): + class MyNotify(NotifyLocBase): + records = [] + + def notify(self, loc): + self.records.append(("NOTIFY", loc)) + + def close(self): + self.records.append(("CLOSE", None)) + + # Patch to install registry for testing + new_the_registry = _the_registry + [MyNotify] + gv = "numba.misc.coverage_support._the_registry" + with patch(gv, new_the_registry): + + @njit + def foo(): + return 123 + + res = foo() + + self.assertEqual(res, 123) + + # offset by +2 because: + # +1 for the decorator + # +1 for the `def` line + first_offset = 2 + offset = foo.__code__.co_firstlineno + first_offset + loc = ir.Loc(__file__, 1) + self.assertIn(("NOTIFY", loc.with_lineno(offset)), MyNotify.records) + self.assertIn(("CLOSE", None), MyNotify.records) + + # Test dead branch pruned + with patch(gv, new_the_registry): + cond = False + + @njit + def foo(): + if cond: + return 321 + return 123 + + res = foo() + + self.assertEqual(res, 123) + + # `if cond` line is compiled + offset = foo.__code__.co_firstlineno + first_offset + self.assertIn(("NOTIFY", loc.with_lineno(offset)), MyNotify.records) + + # ` return 321` line is not compiled + self.assertNotIn( + ("NOTIFY", loc.with_lineno(offset + 1)), MyNotify.records + ) + + # ` return 123` line is compiled + self.assertIn(("NOTIFY", loc.with_lineno(offset + 2)), MyNotify.records) + + self.assertIn(("CLOSE", None), MyNotify.records) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_mixed_tuple_unroller.py b/venv/lib/python3.10/site-packages/numba/tests/test_mixed_tuple_unroller.py new file mode 100644 index 0000000000000000000000000000000000000000..455169def76ad17b350af73e445c847fbbe37708 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_mixed_tuple_unroller.py @@ -0,0 +1,2047 @@ +from collections import namedtuple +import numpy as np + +from numba.tests.support import (TestCase, MemoryLeakMixin, + skip_parfors_unsupported, captured_stdout) +from numba import njit, typed, literal_unroll, prange +from numba.core import types, errors, ir +from numba.testing import unittest +from numba.core.extending import overload +from numba.core.compiler_machinery import (PassManager, register_pass, + FunctionPass, AnalysisPass) +from numba.core.compiler import CompilerBase +from numba.core.untyped_passes import (FixupArgs, TranslateByteCode, + IRProcessing, InlineClosureLikes, + SimplifyCFG, IterLoopCanonicalization, + LiteralUnroll, PreserveIR) +from numba.core.typed_passes import (NopythonTypeInference, IRLegalization, + NoPythonBackend, PartialTypeInference, + NativeLowering) +from numba.core.ir_utils import (compute_cfg_from_blocks, flatten_labels) +from numba.core.types.functions import _header_lead + +_X_GLOBAL = (10, 11) + + +class TestLiteralTupleInterpretation(MemoryLeakMixin, TestCase): + + def check(self, func, var): + cres = func.overloads[func.signatures[0]] + ty = cres.fndesc.typemap[var] + self.assertTrue(isinstance(ty, types.Tuple)) + for subty in ty: + self.assertTrue(isinstance(subty, types.Literal), "non literal") + + def test_homogeneous_literal(self): + @njit + def foo(): + x = (1, 2, 3) + return x[1] + + self.assertEqual(foo(), foo.py_func()) + self.check(foo, 'x') + + def test_heterogeneous_literal(self): + @njit + def foo(): + x = (1, 2, 3, 'a') + return x[3] + + self.assertEqual(foo(), foo.py_func()) + self.check(foo, 'x') + + def test_non_literal(self): + @njit + def foo(): + x = (1, 2, 3, 'a', 1j) + return x[4] + + self.assertEqual(foo(), foo.py_func()) + with self.assertRaises(AssertionError) as e: + self.check(foo, 'x') + + self.assertIn("non literal", str(e.exception)) + + +@register_pass(mutates_CFG=False, analysis_only=False) +class ResetTypeInfo(FunctionPass): + _name = "reset_the_type_information" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + state.typemap = None + state.return_type = None + state.calltypes = None + return True + + +class TestLoopCanonicalisation(MemoryLeakMixin, TestCase): + + def get_pipeline(use_canonicaliser, use_partial_typing=False): + class NewCompiler(CompilerBase): + + def define_pipelines(self): + pm = PassManager("custom_pipeline") + + # untyped + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(IRProcessing, "processing IR") + pm.add_pass(InlineClosureLikes, + "inline calls to locally defined closures") + if use_partial_typing: + pm.add_pass(PartialTypeInference, "do partial typing") + if use_canonicaliser: + pm.add_pass(IterLoopCanonicalization, "Canonicalise loops") + pm.add_pass(SimplifyCFG, "Simplify the CFG") + + # typed + if use_partial_typing: + pm.add_pass(ResetTypeInfo, "resets the type info state") + + pm.add_pass(NopythonTypeInference, "nopython frontend") + + # legalise + pm.add_pass(IRLegalization, "ensure IR is legal") + + # preserve + pm.add_pass(PreserveIR, "save IR for later inspection") + + # lower + pm.add_pass(NativeLowering, "native lowering") + pm.add_pass(NoPythonBackend, "nopython mode backend") + + # finalise the contents + pm.finalize() + + return [pm] + return NewCompiler + + # generate variants + LoopIgnoringCompiler = get_pipeline(False) + LoopCanonicalisingCompiler = get_pipeline(True) + TypedLoopCanonicalisingCompiler = get_pipeline(True, True) + + def test_simple_loop_in_depth(self): + """ This heavily checks a simple loop transform """ + + def get_info(pipeline): + @njit(pipeline_class=pipeline) + def foo(tup): + acc = 0 + for i in tup: + acc += i + return acc + + x = (1, 2, 3) + self.assertEqual(foo(x), foo.py_func(x)) + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + return func_ir, cres.fndesc + + ignore_loops_ir, ignore_loops_fndesc = \ + get_info(self.LoopIgnoringCompiler) + canonicalise_loops_ir, canonicalise_loops_fndesc = \ + get_info(self.LoopCanonicalisingCompiler) + + # check CFG is the same + def compare_cfg(a, b): + a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks)) + b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks)) + self.assertEqual(a_cfg, b_cfg) + + compare_cfg(ignore_loops_ir, canonicalise_loops_ir) + + # check there's three more call types in the canonicalised one: + # len(tuple arg) + # range(of the len() above) + # getitem(tuple arg, index) + self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3, + len(canonicalise_loops_fndesc.calltypes)) + + def find_getX(fd, op): + return [x for x in fd.calltypes.keys() + if isinstance(x, ir.Expr) and x.op == op] + + il_getiters = find_getX(ignore_loops_fndesc, "getiter") + self.assertEqual(len(il_getiters), 1) # tuple iterator + + cl_getiters = find_getX(canonicalise_loops_fndesc, "getiter") + self.assertEqual(len(cl_getiters), 1) # loop range iterator + + cl_getitems = find_getX(canonicalise_loops_fndesc, "getitem") + self.assertEqual(len(cl_getitems), 1) # tuple getitem induced by loop + + # check the value of the untransformed IR getiter is now the value of + # the transformed getitem + self.assertEqual(il_getiters[0].value.name, cl_getitems[0].value.name) + + # check the type of the transformed IR getiter is a range iter + range_inst = canonicalise_loops_fndesc.calltypes[cl_getiters[0]].args[0] + self.assertTrue(isinstance(range_inst, types.RangeType)) + + def test_transform_scope(self): + """ This checks the transform, when there's no typemap, will happily + transform a loop on something that's not tuple-like + """ + def get_info(pipeline): + @njit(pipeline_class=pipeline) + def foo(): + acc = 0 + for i in [1, 2, 3]: + acc += i + return acc + + self.assertEqual(foo(), foo.py_func()) + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + return func_ir, cres.fndesc + + ignore_loops_ir, ignore_loops_fndesc = \ + get_info(self.LoopIgnoringCompiler) + canonicalise_loops_ir, canonicalise_loops_fndesc = \ + get_info(self.LoopCanonicalisingCompiler) + + # check CFG is the same + def compare_cfg(a, b): + a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks)) + b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks)) + self.assertEqual(a_cfg, b_cfg) + + compare_cfg(ignore_loops_ir, canonicalise_loops_ir) + + # check there's three more call types in the canonicalised one: + # len(literal list) + # range(of the len() above) + # getitem(literal list arg, index) + self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3, + len(canonicalise_loops_fndesc.calltypes)) + + def find_getX(fd, op): + return [x for x in fd.calltypes.keys() + if isinstance(x, ir.Expr) and x.op == op] + + il_getiters = find_getX(ignore_loops_fndesc, "getiter") + self.assertEqual(len(il_getiters), 1) # list iterator + + cl_getiters = find_getX(canonicalise_loops_fndesc, "getiter") + self.assertEqual(len(cl_getiters), 1) # loop range iterator + + cl_getitems = find_getX(canonicalise_loops_fndesc, "getitem") + self.assertEqual(len(cl_getitems), 1) # list getitem induced by loop + + # check the value of the untransformed IR getiter is now the value of + # the transformed getitem + self.assertEqual(il_getiters[0].value.name, cl_getitems[0].value.name) + + # check the type of the transformed IR getiter is a range iter + range_inst = canonicalise_loops_fndesc.calltypes[cl_getiters[0]].args[0] + self.assertTrue(isinstance(range_inst, types.RangeType)) + + @unittest.skip("Waiting for pass to be enabled for all tuples") + def test_influence_of_typed_transform(self): + """ This heavily checks a typed transformation only impacts tuple + induced loops""" + + def get_info(pipeline): + @njit(pipeline_class=pipeline) + def foo(tup): + acc = 0 + for i in range(4): + for y in tup: + for j in range(3): + acc += 1 + return acc + + x = (1, 2, 3) + self.assertEqual(foo(x), foo.py_func(x)) + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['func_ir'] + return func_ir, cres.fndesc + + ignore_loops_ir, ignore_loops_fndesc = \ + get_info(self.LoopIgnoringCompiler) + canonicalise_loops_ir, canonicalise_loops_fndesc = \ + get_info(self.TypedLoopCanonicalisingCompiler) + + # check CFG is the same + def compare_cfg(a, b): + a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks)) + b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks)) + self.assertEqual(a_cfg, b_cfg) + + compare_cfg(ignore_loops_ir, canonicalise_loops_ir) + + # check there's three more call types in the canonicalised one: + # len(tuple arg) + # range(of the len() above) + # getitem(tuple arg, index) + self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3, + len(canonicalise_loops_fndesc.calltypes)) + + def find_getX(fd, op): + return [x for x in fd.calltypes.keys() + if isinstance(x, ir.Expr) and x.op == op] + + il_getiters = find_getX(ignore_loops_fndesc, "getiter") + self.assertEqual(len(il_getiters), 3) # 1 * tuple + 2 * loop range + + cl_getiters = find_getX(canonicalise_loops_fndesc, "getiter") + self.assertEqual(len(cl_getiters), 3) # 3 * loop range iterator + + cl_getitems = find_getX(canonicalise_loops_fndesc, "getitem") + self.assertEqual(len(cl_getitems), 1) # tuple getitem induced by loop + + # check the value of the untransformed IR getiter is now the value of + # the transformed getitem + self.assertEqual(il_getiters[1].value.name, cl_getitems[0].value.name) + + # check the type of the transformed IR getiter's are all range iter + for x in cl_getiters: + range_inst = canonicalise_loops_fndesc.calltypes[x].args[0] + self.assertTrue(isinstance(range_inst, types.RangeType)) + + def test_influence_of_typed_transform_literal_unroll(self): + """ This heavily checks a typed transformation only impacts loops with + literal_unroll marker""" + + def get_info(pipeline): + @njit(pipeline_class=pipeline) + def foo(tup): + acc = 0 + for i in range(4): + for y in literal_unroll(tup): + for j in range(3): + acc += 1 + return acc + + x = (1, 2, 3) + self.assertEqual(foo(x), foo.py_func(x)) + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + return func_ir, cres.fndesc + + ignore_loops_ir, ignore_loops_fndesc = \ + get_info(self.LoopIgnoringCompiler) + canonicalise_loops_ir, canonicalise_loops_fndesc = \ + get_info(self.TypedLoopCanonicalisingCompiler) + + # check CFG is the same + def compare_cfg(a, b): + a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks)) + b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks)) + self.assertEqual(a_cfg, b_cfg) + + compare_cfg(ignore_loops_ir, canonicalise_loops_ir) + + # check there's three more call types in the canonicalised one: + # len(tuple arg) + # range(of the len() above) + # getitem(tuple arg, index) + self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3, + len(canonicalise_loops_fndesc.calltypes)) + + def find_getX(fd, op): + return [x for x in fd.calltypes.keys() + if isinstance(x, ir.Expr) and x.op == op] + + il_getiters = find_getX(ignore_loops_fndesc, "getiter") + self.assertEqual(len(il_getiters), 3) # 1 * tuple + 2 * loop range + + cl_getiters = find_getX(canonicalise_loops_fndesc, "getiter") + self.assertEqual(len(cl_getiters), 3) # 3 * loop range iterator + + cl_getitems = find_getX(canonicalise_loops_fndesc, "getitem") + self.assertEqual(len(cl_getitems), 1) # tuple getitem induced by loop + + # check the value of the untransformed IR getiter is now the value of + # the transformed getitem + self.assertEqual(il_getiters[1].value.name, cl_getitems[0].value.name) + + # check the type of the transformed IR getiter's are all range iter + for x in cl_getiters: + range_inst = canonicalise_loops_fndesc.calltypes[x].args[0] + self.assertTrue(isinstance(range_inst, types.RangeType)) + + @unittest.skip("Waiting for pass to be enabled for all tuples") + def test_lots_of_loops(self): + """ This heavily checks a simple loop transform """ + + def get_info(pipeline): + @njit(pipeline_class=pipeline) + def foo(tup): + acc = 0 + for i in tup: + acc += i + for j in tup + (4, 5, 6): + acc += 1 - j + if j > 5: + break + else: + acc -= 2 + for i in tup: + acc -= i % 2 + + return acc + + x = (1, 2, 3) + self.assertEqual(foo(x), foo.py_func(x)) + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + return func_ir, cres.fndesc + + ignore_loops_ir, ignore_loops_fndesc = \ + get_info(self.LoopIgnoringCompiler) + canonicalise_loops_ir, canonicalise_loops_fndesc = \ + get_info(self.LoopCanonicalisingCompiler) + + # check CFG is the same + def compare_cfg(a, b): + a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks)) + b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks)) + self.assertEqual(a_cfg, b_cfg) + + compare_cfg(ignore_loops_ir, canonicalise_loops_ir) + + # check there's three * N more call types in the canonicalised one: + # len(tuple arg) + # range(of the len() above) + # getitem(tuple arg, index) + self.assertEqual(len(ignore_loops_fndesc.calltypes) + 3 * 3, + len(canonicalise_loops_fndesc.calltypes)) + + def test_inlined_loops(self): + """ Checks a loop appearing from a closure """ + + def get_info(pipeline): + @njit(pipeline_class=pipeline) + def foo(tup): + def bar(n): + acc = 0 + for i in range(n): + acc += 1 + return acc + + acc = 0 + for i in tup: + acc += i + acc += bar(i) + + return acc + + x = (1, 2, 3) + self.assertEqual(foo(x), foo.py_func(x)) + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + return func_ir, cres.fndesc + + ignore_loops_ir, ignore_loops_fndesc = \ + get_info(self.LoopIgnoringCompiler) + canonicalise_loops_ir, canonicalise_loops_fndesc = \ + get_info(self.LoopCanonicalisingCompiler) + + # check CFG is the same + def compare_cfg(a, b): + a_cfg = compute_cfg_from_blocks(flatten_labels(a.blocks)) + b_cfg = compute_cfg_from_blocks(flatten_labels(b.blocks)) + self.assertEqual(a_cfg, b_cfg) + + compare_cfg(ignore_loops_ir, canonicalise_loops_ir) + + # check there's 2 * N - 1 more call types in the canonicalised one: + # The -1 comes from the closure being inlined and and the call removed. + # len(tuple arg) + # range(of the len() above) + # getitem(tuple arg, index) + self.assertEqual(len(ignore_loops_fndesc.calltypes) + 5, + len(canonicalise_loops_fndesc.calltypes)) + + +class TestMixedTupleUnroll(MemoryLeakMixin, TestCase): + + def test_01(self): + # test a case which is already in loop canonical form + @njit + def foo(idx, z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for i in range(len(literal_unroll(a))): + acc += a[i] + if acc.real < 26: + acc -= 1 + else: + break + return acc + + f = 9 + k = f + + self.assertEqual(foo(2, k), foo.py_func(2, k)) + + def test_02(self): + # same as test_1 but without the explicit loop canonicalisation + + @njit + def foo(idx, z): + x = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for a in literal_unroll(x): + acc += a + if acc.real < 26: + acc -= 1 + else: + break + return acc + + f = 9 + k = f + + self.assertEqual(foo(2, k), foo.py_func(2, k)) + + def test_03(self): + # two unrolls + @njit + def foo(idx, z): + x = (12, 12.7, 3j, 4, z, 2 * z) + y = ('foo', z, 2 * z) + acc = 0 + for a in literal_unroll(x): + acc += a + if acc.real < 26: + acc -= 1 + else: + for t in literal_unroll(y): + acc += t is False + break + return acc + + f = 9 + k = f + + self.assertEqual(foo(2, k), foo.py_func(2, k)) + + def test_04(self): + # mixed ref counted types + @njit + def foo(tup): + acc = 0 + for a in literal_unroll(tup): + acc += a.sum() + return acc + + n = 10 + tup = (np.ones((n,)), np.ones((n, n)), np.ones((n, n, n))) + self.assertEqual(foo(tup), foo.py_func(tup)) + + def test_05(self): + # mix unroll and static_getitem + @njit + def foo(tup1, tup2): + acc = 0 + for a in literal_unroll(tup1): + if a == 'a': + acc += tup2[0].sum() + elif a == 'b': + acc += tup2[1].sum() + elif a == 'c': + acc += tup2[2].sum() + elif a == 12: + acc += tup2[3].sum() + elif a == 3j: + acc += tup2[4].sum() + else: + raise RuntimeError("Unreachable") + return acc + + n = 10 + tup1 = ('a', 'b', 'c', 12, 3j,) + tup2 = (np.ones((n,)), np.ones((n, n)), np.ones((n, n, n)), + np.ones((n, n, n, n)), np.ones((n, n, n, n, n))) + self.assertEqual(foo(tup1, tup2), foo.py_func(tup1, tup2)) + + @unittest.skip("needs more clever branch prune") + def test_06(self): + # This wont work because both sides of the branch need typing as neither + # can be pruned by the current pruner + @njit + def foo(tup): + acc = 0 + str_buf = typed.List.empty_list(types.unicode_type) + for a in literal_unroll(tup): + if a == 'a': + str_buf.append(a) + else: + acc += a + return acc + + tup = ('a', 12) + self.assertEqual(foo(tup), foo.py_func(tup)) + + def test_07(self): + # A mix bag of stuff as an arg to a function that unifies as `intp`. + @njit + def foo(tup): + acc = 0 + for a in literal_unroll(tup): + acc += len(a) + return acc + + n = 10 + tup = (np.ones((n,)), np.ones((n, n)), "ABCDEFGHJI", (1, 2, 3), + (1, 'foo', 2, 'bar'), {3, 4, 5, 6, 7}) + self.assertEqual(foo(tup), foo.py_func(tup)) + + def test_08(self): + # dispatch to functions + + @njit + def foo(tup1, tup2): + acc = 0 + for a in literal_unroll(tup1): + if a == 'a': + acc += tup2[0]() + elif a == 'b': + acc += tup2[1]() + elif a == 'c': + acc += tup2[2]() + return acc + + def gen(x): + def impl(): + return x + return njit(impl) + + tup1 = ('a', 'b', 'c', 12, 3j, ('f',)) + tup2 = (gen(1), gen(2), gen(3)) + self.assertEqual(foo(tup1, tup2), foo.py_func(tup1, tup2)) + + def test_09(self): + # illegal RHS, has a mixed tuple being index dynamically + + @njit + def foo(tup1, tup2): + acc = 0 + idx = 0 + for a in literal_unroll(tup1): + if a == 'a': + acc += tup2[idx] + elif a == 'b': + acc += tup2[idx] + elif a == 'c': + acc += tup2[idx] + idx += 1 + return idx, acc + + @njit + def func1(): + return 1 + + @njit + def func2(): + return 2 + + @njit + def func3(): + return 3 + + tup1 = ('a', 'b', 'c') + tup2 = (1j, 1, 2) + + with self.assertRaises(errors.TypingError) as raises: + foo(tup1, tup2) + + self.assertIn(_header_lead, str(raises.exception)) + + def test_10(self): + # dispatch on literals triggering @overload resolution + + def dt(value): + if value == "apple": + return 1 + elif value == "orange": + return 2 + elif value == "banana": + return 3 + elif value == 0xca11ab1e: + return 0x5ca1ab1e + value + + @overload(dt, inline='always') + def ol_dt(li): + if isinstance(li, types.StringLiteral): + value = li.literal_value + if value == "apple": + def impl(li): + return 1 + elif value == "orange": + def impl(li): + return 2 + elif value == "banana": + def impl(li): + return 3 + return impl + elif isinstance(li, types.IntegerLiteral): + value = li.literal_value + if value == 0xca11ab1e: + def impl(li): + # close over the dispatcher :) + return 0x5ca1ab1e + value + return impl + + @njit + def foo(): + acc = 0 + for t in literal_unroll(('apple', 'orange', 'banana', 3390155550)): + acc += dt(t) + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_11(self): + + @njit + def foo(): + x = [] + z = ('apple', 'orange', 'banana') + for i in range(len(literal_unroll(z))): + t = z[i] + if t == "apple": + x.append("0") + elif t == "orange": + x.append(t) + elif t == "banana": + x.append("2.0") + return x + + self.assertEqual(foo(), foo.py_func()) + + def test_11a(self): + + @njit + def foo(): + x = typed.List() + z = ('apple', 'orange', 'banana') + for i in range(len(literal_unroll(z))): + t = z[i] + if t == "apple": + x.append("0") + elif t == "orange": + x.append(t) + elif t == "banana": + x.append("2.0") + return x + + self.assertEqual(foo(), foo.py_func()) + + def test_12(self): + # unroll the same target twice + @njit + def foo(idx, z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for i in literal_unroll(a): + acc += i + if acc.real < 26: + acc -= 1 + else: + for x in literal_unroll(a): + acc += x + break + if a[0] < 23: + acc += 2 + return acc + + f = 9 + k = f + + self.assertEqual(foo(2, k), foo.py_func(2, k)) + + def test_13(self): + # nesting unrolls is illegal + @njit + def foo(idx, z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for i in literal_unroll(a): + acc += i + if acc.real < 26: + acc -= 1 + else: + for x in literal_unroll(a): + for j in literal_unroll(a): + acc += j + acc += x + for x in literal_unroll(a): + acc += x + for x in literal_unroll(a): + acc += x + if a[0] < 23: + acc += 2 + return acc + + f = 9 + k = f + + with self.assertRaises(errors.UnsupportedError) as raises: + foo(2, k) + + self.assertIn("Nesting of literal_unroll is unsupported", + str(raises.exception)) + + def test_14(self): + # unituple unroll can return derivative of the induction var + + @njit + def foo(): + x = (1, 2, 3, 4) + acc = 0 + for a in literal_unroll(x): + acc += a + return a + + self.assertEqual(foo(), foo.py_func()) + + def test_15(self): + # mixed tuple unroll cannot return derivative of the induction var + + @njit + def foo(x): + acc = 0 + for a in literal_unroll(x): + acc += len(a) + return a + + n = 5 + tup = (np.ones((n,)), np.ones((n, n)), "ABCDEFGHJI", (1, 2, 3), + (1, 'foo', 2, 'bar'), {3, 4, 5, 6, 7}) + + with self.assertRaises(errors.TypingError) as raises: + foo(tup) + + self.assertIn("Cannot unify", str(raises.exception)) + + def test_16(self): + # unituple slice and unroll is ok + + def dt(value): + if value == 1000: + return "a" + elif value == 2000: + return "b" + elif value == 3000: + return "c" + elif value == 4000: + return "d" + + @overload(dt, inline='always') + def ol_dt(li): + if isinstance(li, types.IntegerLiteral): + value = li.literal_value + if value == 1000: + def impl(li): + return "a" + elif value == 2000: + def impl(li): + return "b" + elif value == 3000: + def impl(li): + return "c" + elif value == 4000: + def impl(li): + return "d" + return impl + + @njit + def foo(): + x = (1000, 2000, 3000, 4000) + acc = "" + for a in literal_unroll(x[:2]): + acc += dt(a) + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_17(self): + # mixed tuple slice and unroll is ok + + def dt(value): + if value == 1000: + return "a" + elif value == 2000: + return "b" + elif value == 3000: + return "c" + elif value == 4000: + return "d" + elif value == 'f': + return "EFF" + + @overload(dt, inline='always') + def ol_dt(li): + if isinstance(li, types.IntegerLiteral): + value = li.literal_value + if value == 1000: + def impl(li): + return "a" + elif value == 2000: + def impl(li): + return "b" + elif value == 3000: + def impl(li): + return "c" + elif value == 4000: + def impl(li): + return "d" + return impl + elif isinstance(li, types.StringLiteral): + value = li.literal_value + if value == 'f': + def impl(li): + return "EFF" + return impl + + @njit + def foo(): + x = (1000, 2000, 3000, 'f') + acc = "" + for a in literal_unroll(x[1:]): + acc += dt(a) + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_18(self): + # unituple backwards slice + @njit + def foo(): + x = (1000, 2000, 3000, 4000, 5000, 6000) + count = 0 + for a in literal_unroll(x[::-1]): + count += 1 + if a < 3000: + break + return count + + self.assertEqual(foo(), foo.py_func()) + + def test_19(self): + # mixed bag of refcounted + @njit + def foo(): + acc = 0 + l1 = [1, 2, 3, 4] + l2 = [10, 20] + tup = (l1, l2) + a1 = np.arange(20) + a2 = np.ones(5, dtype=np.complex128) + tup = (l1, a1, l2, a2) + for t in literal_unroll(tup): + acc += len(t) + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_20(self): + # testing partial type inference survives as the list append in the + # unrolled version is full inferable + @njit + def foo(): + l = [] + a1 = np.arange(20) + a2 = np.ones(5, dtype=np.complex128) + tup = (a1, a2) + for t in literal_unroll(tup): + l.append(t.sum()) + return l + + self.assertEqual(foo(), foo.py_func()) + + def test_21(self): + # unroll in closure that gets inlined + @njit + def foo(z): + b = (23, 23.9, 6j, 8) + + def bar(): + acc = 0 + for j in literal_unroll(b): + acc += j + return acc + outer_acc = 0 + for x in (1, 2, 3, 4): + outer_acc += bar() + x + + return outer_acc + + f = 9 + k = f + self.assertEqual(foo(k), foo.py_func(k)) + + def test_22(self): + # NOTE: This test "worked" as a side effect of a bug that was discovered + # during work that added support for Python 3.12. In the literal_unroll + # transform, there was an accidental overwrite of a dictionary key that + # could occur if nested unrolls happened to have the conditions: + # + # 1. outer unroll induction varible not used in the induced loop. + # 2. the `max_label` from numba.core.ir_utils was in a specific state + # such that a collision occurred. + # 3. the bug existed in the algorithm that checked for getitem access. + # + # This exceedingly rare usecase is now considered illegal as a result, + # by banning this behaviour the analysis is considerably more simple. + # Further there is no loss of functionality as the outer loop can be + # trivially replaced (in the following) by using a `range(len())` based + # iteration over `a` as there's no need for the loop in `a` to be + # versioned! + + @njit + def foo(z): + a = (12, 12.7, 3j, 4, z, 2 * z, 'a') + b = (23, 23.9, 6j, 8) + + def bar(): + acc = 0 + for j in literal_unroll(b): + acc += j + return acc + acc = 0 + # this loop is induced in `x` but `x` is not used, there is a nest + # here by virtue of inlining + for x in literal_unroll(a): + acc += bar() + + return acc + + f = 9 + k = f + + with self.assertRaises(errors.UnsupportedError) as raises: + foo(k) + + self.assertIn("Nesting of literal_unroll is unsupported", + str(raises.exception)) + + def test_23(self): + # unroll from closure that ends up banned as it leads to nesting + @njit + def foo(z): + b = (23, 23.9, 6j, 8) + + def bar(): + acc = 0 + for j in literal_unroll(b): + acc += j + return acc + outer_acc = 0 + # this drives an inlined literal_unroll loop but also has access to + # the induction variable, this is a nested literal_unroll so is + # banned + for x in literal_unroll(b): + outer_acc += bar() + x + + return outer_acc + + f = 9 + k = f + + with self.assertRaises(errors.UnsupportedError) as raises: + foo(k) + + self.assertIn("Nesting of literal_unroll is unsupported", + str(raises.exception)) + + def test_24(self): + # unroll something unsupported + @njit + def foo(): + for x in literal_unroll("ABCDE"): + print(x) + + with self.assertRaises(errors.UnsupportedError) as raises: + foo() + + msg = "argument should be a tuple or a list of constant values" + self.assertIn(msg, str(raises.exception)) + + def test_25(self): + # use unroll by reference/alias + @njit + def foo(): + val = literal_unroll(((1, 2, 3), (2j, 3j), [1, 2], "xyz")) + alias1 = val + alias2 = alias1 + lens = [] + for x in alias2: + lens.append(len(x)) + return lens + + self.assertEqual(foo(), foo.py_func()) + + def test_26(self): + # var defined in unrolled body escapes + # untouched variable is untouched + # read only variable is only read + # mutated is muted correctly + @njit + def foo(z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + count = 0 + untouched = 54 + read_only = 17 + mutated = np.empty((len(a),), dtype=np.complex128) + for x in literal_unroll(a): + acc += x + mutated[count] = x + count += 1 + escape = count + read_only + return escape, acc, untouched, read_only, mutated + + f = 9 + k = f + + self.assertPreciseEqual(foo(k), foo.py_func(k)) + + @skip_parfors_unsupported + def test_27(self): + # parfors loop in unrolled loop + @njit(parallel=True) + def foo(z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for x in literal_unroll(a): + for k in prange(10): + acc += 1 + return acc + + f = 9 + k = f + + self.assertEqual(foo(k), foo.py_func(k)) + + @skip_parfors_unsupported + def test_28(self): + # parfors reducing on the unrolled induction var + @njit(parallel=True) + def foo(z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for x in literal_unroll(a): + for k in prange(10): + acc += x + return acc + + f = 9 + k = f + + # summation is unstable + np.testing.assert_allclose(foo(k), foo.py_func(k)) + + @skip_parfors_unsupported + def test_29(self): + # This "works" but parfors is not producing a parallel loop + # TODO: fix + @njit(parallel=True) + def foo(z): + a = (12, 12.7, 3j, 4, z, 2 * z) + acc = 0 + for k in prange(10): + for x in literal_unroll(a): + acc += x + return acc + + f = 9 + k = f + + self.assertEqual(foo(k), foo.py_func(k)) + + def test_30(self): + # function escaping containing an unroll + @njit + def foo(): + const = 1234 + + def bar(t): + acc = 0 + a = (12, 12.7, 3j, 4) + for x in literal_unroll(a): + acc += x + const + return acc, t + return [x for x in map(bar, (1, 2))] + + self.assertEqual(foo(), foo.py_func()) + + def test_31(self): + # this is testing that generators can survive partial typing + # invalid function escaping, map uses zip which can't handle the mixed + # tuple + @njit + def foo(): + const = 1234 + + def bar(t): + acc = 0 + a = (12, 12.7, 3j, 4) + for x in literal_unroll(a): + acc += x + const + return acc, t + return [x for x in map(bar, (1, 2j))] + + with self.assertRaises(errors.TypingError) as raises: + foo() + + self.assertIn(_header_lead, str(raises.exception)) + self.assertIn("zip", str(raises.exception)) + + def test_32(self): + # test yielding from an unroll + @njit + def gen(a): + for x in literal_unroll(a): + yield x + + @njit + def foo(): + return [x for x in gen((1, 2.3, 4j,))] + + self.assertEqual(foo(), foo.py_func()) + + def test_33(self): + # test yielding from unroll in escaping function that is consumed and + # yields + + @njit + def consumer(func, arg): + yield func(arg) + + def get(cons): + @njit + def foo(): + def gen(a): + for x in literal_unroll(a): + yield x + return [next(x) for x in cons(gen, (1, 2.3, 4j,))] + return foo + + cfunc = get(consumer) + pyfunc = get(consumer.py_func).py_func + + self.assertEqual(cfunc(), pyfunc()) + + def test_34(self): + # mixed bag, redefinition of tuple + @njit + def foo(): + acc = 0 + l1 = [1, 2, 3, 4] + l2 = [10, 20] + if acc - 2 > 3: + tup = (l1, l2) + else: + a1 = np.arange(20) + a2 = np.ones(5, dtype=np.complex128) + tup = (l1, a1, l2, a2) + for t in literal_unroll(tup): + acc += len(t) + return acc + + with self.assertRaises(errors.UnsupportedError) as raises: + foo() + + self.assertIn("Invalid use of", str(raises.exception)) + self.assertIn("found multiple definitions of variable", + str(raises.exception)) + + +class TestConstListUnroll(MemoryLeakMixin, TestCase): + + def test_01(self): + + @njit + def foo(): + a = [12, 12.7, 3j, 4] + acc = 0 + for i in range(len(literal_unroll(a))): + acc += a[i] + if acc.real < 26: + acc -= 1 + else: + break + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_02(self): + # same as test_1 but without the explicit loop canonicalisation + + @njit + def foo(): + x = [12, 12.7, 3j, 4] + acc = 0 + for a in literal_unroll(x): + acc += a + if acc.real < 26: + acc -= 1 + else: + break + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_03(self): + # two unrolls + @njit + def foo(): + x = [12, 12.7, 3j, 4] + y = ['foo', 8] + acc = 0 + for a in literal_unroll(x): + acc += a + if acc.real < 26: + acc -= 1 + else: + for t in literal_unroll(y): + acc += t is False + break + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_04(self): + # two unrolls, one is a const list, one is a tuple + @njit + def foo(): + x = [12, 12.7, 3j, 4] + y = ('foo', 8) + acc = 0 + for a in literal_unroll(x): + acc += a + if acc.real < 26: + acc -= 1 + else: + for t in literal_unroll(y): + acc += t is False + break + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_05(self): + # illegal, list has to be const + @njit + def foo(tup1, tup2): + acc = 0 + for a in literal_unroll(tup1): + if a[0] > 1: + acc += tup2[0].sum() + return acc + + n = 10 + tup1 = [np.zeros(10), np.zeros(10)] + tup2 = (np.ones((n,)), np.ones((n, n)), np.ones((n, n, n)), + np.ones((n, n, n, n)), np.ones((n, n, n, n, n))) + + with self.assertRaises(errors.UnsupportedError) as raises: + foo(tup1, tup2) + + msg = "Invalid use of literal_unroll with a function argument" + self.assertIn(msg, str(raises.exception)) + + def test_06(self): + # illegal: list containing non const + @njit + def foo(): + n = 10 + tup = [np.ones((n,)), np.ones((n, n)), "ABCDEFGHJI", (1, 2, 3), + (1, 'foo', 2, 'bar'), {3, 4, 5, 6, 7}] + acc = 0 + for a in literal_unroll(tup): + acc += len(a) + return acc + + with self.assertRaises(errors.UnsupportedError) as raises: + foo() + + self.assertIn("Found non-constant value at position 0", + str(raises.exception)) + + def test_7(self): + # dispatch on literals triggering @overload resolution + + def dt(value): + if value == "apple": + return 1 + elif value == "orange": + return 2 + elif value == "banana": + return 3 + elif value == 0xca11ab1e: + return 0x5ca1ab1e + value + + @overload(dt, inline='always') + def ol_dt(li): + if isinstance(li, types.StringLiteral): + value = li.literal_value + if value == "apple": + def impl(li): + return 1 + elif value == "orange": + def impl(li): + return 2 + elif value == "banana": + def impl(li): + return 3 + return impl + elif isinstance(li, types.IntegerLiteral): + value = li.literal_value + if value == 0xca11ab1e: + def impl(li): + # close over the dispatcher :) + return 0x5ca1ab1e + value + return impl + + @njit + def foo(): + acc = 0 + for t in literal_unroll(['apple', 'orange', 'banana', 3390155550]): + acc += dt(t) + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_8(self): + + @njit + def foo(): + x = [] + z = ['apple', 'orange', 'banana'] + for i in range(len(literal_unroll(z))): + t = z[i] + if t == "apple": + x.append("0") + elif t == "orange": + x.append(t) + elif t == "banana": + x.append("2.0") + return x + + self.assertEqual(foo(), foo.py_func()) + + def test_9(self): + # unroll the same target twice + @njit + def foo(idx, z): + a = [12, 12.7, 3j, 4] + acc = 0 + for i in literal_unroll(a): + acc += i + if acc.real < 26: + acc -= 1 + else: + for x in literal_unroll(a): + acc += x + break + if a[0] < 23: + acc += 2 + return acc + + f = 9 + k = f + + self.assertEqual(foo(2, k), foo.py_func(2, k)) + + def test_10(self): + # nesting unrolls is illegal + @njit + def foo(idx, z): + a = (12, 12.7, 3j, 4, z, 2 * z) + b = [12, 12.7, 3j, 4] + acc = 0 + for i in literal_unroll(a): + acc += i + if acc.real < 26: + acc -= 1 + else: + for x in literal_unroll(a): + for j in literal_unroll(b): + acc += j + acc += x + for x in literal_unroll(a): + acc += x + for x in literal_unroll(a): + acc += x + if a[0] < 23: + acc += 2 + return acc + + f = 9 + k = f + + with self.assertRaises(errors.UnsupportedError) as raises: + foo(2, k) + + self.assertIn("Nesting of literal_unroll is unsupported", + str(raises.exception)) + + def test_11(self): + # homogeneous const list unroll can return derivative of the induction + # var + + @njit + def foo(): + x = [1, 2, 3, 4] + acc = 0 + for a in literal_unroll(x): + acc += a + return a + + self.assertEqual(foo(), foo.py_func()) + + def test_12(self): + # mixed unroll cannot return derivative of the induction var + @njit + def foo(): + acc = 0 + x = [1, 2, 'a'] + for a in literal_unroll(x): + acc += bool(a) + return a + + with self.assertRaises(errors.TypingError) as raises: + foo() + + self.assertIn("Cannot unify", str(raises.exception)) + + def test_13(self): + # list slice is illegal + + @njit + def foo(): + x = [1000, 2000, 3000, 4000] + acc = 0 + for a in literal_unroll(x[:2]): + acc += a + return acc + + with self.assertRaises(errors.UnsupportedError) as raises: + foo() + + self.assertIn("Invalid use of literal_unroll", str(raises.exception)) + + def test_14(self): + # list mutate is illegal + + @njit + def foo(): + x = [1000, 2000, 3000, 4000] + acc = 0 + for a in literal_unroll(x): + acc += a + x.append(10) + return acc + + with self.assertRaises(errors.TypingError) as raises: + foo() + + self.assertIn("Unknown attribute 'append' of type Tuple", + str(raises.exception)) + + +class TestMore(TestCase): + def test_invalid_use_of_unroller(self): + @njit + def foo(): + x = (10, 20) + r = 0 + for a in literal_unroll(x, x): + r += a + return r + + with self.assertRaises(errors.UnsupportedError) as raises: + foo() + self.assertIn( + "literal_unroll takes one argument, found 2", + str(raises.exception), + ) + + def test_non_constant_list(self): + + @njit + def foo(y): + x = [10, y] + r = 0 + for a in literal_unroll(x): + r += a + return r + + with self.assertRaises(errors.UnsupportedError) as raises: + foo(10) + self.assertIn( + ("Found non-constant value at position 1 in a list argument to " + "literal_unroll"), + str(raises.exception) + ) + + @unittest.skip("numba.literally not supported yet") + def test_literally_constant_list(self): + # FAIL. May need to consider it in a future PR + from numba import literally + + @njit + def foo(y): + x = [10, literally(y)] + r = 0 + for a in literal_unroll(x): + r += a + return r + + # Found non-constant value at position 1 in a list argument to + # literal_unroll + foo(12) + + @njit + def bar(): + return foo(12) + + # Found non-constant value at position 1 in a list argument to + # literal_unroll + bar() + + @unittest.skip("inlining of foo doesn't have const prop so y isn't const") + def test_inlined_unroll_list(self): + @njit(inline='always') + def foo(y): + x = [10, y] + r = 0 + for a in literal_unroll(x): + r += a + return r + + @njit + def bar(): + return foo(12) + + self.assertEqual(bar(), 10 + 12) + + def test_unroll_tuple_arg(self): + @njit + def foo(y): + x = (10, y) + r = 0 + for a in literal_unroll(x): + r += a + return r + + self.assertEqual(foo(12), foo.py_func(12)) + self.assertEqual(foo(1.2), foo.py_func(1.2)) + + def test_unroll_tuple_arg2(self): + @njit + def foo(x): + r = 0 + for a in literal_unroll(x): + r += a + return r + + self.assertEqual(foo((12, 1.2)), foo.py_func((12, 1.2))) + self.assertEqual(foo((12, 1.2)), foo.py_func((12, 1.2))) + + def test_unroll_tuple_alias(self): + @njit + def foo(): + x = (10, 1.2) + out = 0 + for i in literal_unroll(x): + j = i + k = j + out += j + k + i + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_tuple_nested(self): + + @njit + def foo(): + x = ((10, 1.2), (1j, 3.)) + out = 0 + for i in literal_unroll(x): + for j in (i): + out += j + return out + + with self.assertRaises(errors.TypingError) as raises: + foo() + + self.assertIn("getiter", str(raises.exception)) + re = r".*Tuple\(int[0-9][0-9], float64\).*" + self.assertRegex(str(raises.exception), re) + + def test_unroll_tuple_of_dict(self): + + @njit + def foo(): + x = {} + x["a"] = 1 + x["b"] = 2 + y = {} + y[3] = "c" + y[4] = "d" + + for it in literal_unroll((x, y)): + for k, v in it.items(): + print(k, v) + + with captured_stdout() as stdout: + foo() + lines = stdout.getvalue().splitlines() + self.assertEqual( + lines, + ['a 1', 'b 2', '3 c', '4 d'], + ) + + def test_unroll_named_tuple(self): + ABC = namedtuple('ABC', ['a', 'b', 'c']) + + @njit + def foo(): + abc = ABC(1, 2j, 3.4) + out = 0 + for i in literal_unroll(abc): + out += i + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_named_tuple_arg(self): + ABC = namedtuple('ABC', ['a', 'b', 'c']) + + @njit + def foo(x): + out = 0 + for i in literal_unroll(x): + out += i + return out + + abc = ABC(1, 2j, 3.4) + + self.assertEqual(foo(abc), foo.py_func(abc)) + + def test_unroll_named_unituple(self): + ABC = namedtuple('ABC', ['a', 'b', 'c']) + + @njit + def foo(): + abc = ABC(1, 2, 3) + out = 0 + for i in literal_unroll(abc): + out += i + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_named_unituple_arg(self): + ABC = namedtuple('ABC', ['a', 'b', 'c']) + + @njit + def foo(x): + out = 0 + for i in literal_unroll(x): + out += i + return out + + abc = ABC(1, 2, 3) + + self.assertEqual(foo(abc), foo.py_func(abc)) + + def test_unroll_global_tuple(self): + + @njit + def foo(): + out = 0 + for i in literal_unroll(_X_GLOBAL): + out += i + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_freevar_tuple(self): + x = (10, 11) + + @njit + def foo(): + out = 0 + for i in literal_unroll(x): + out += i + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_function_tuple(self): + @njit + def a(): + return 1 + + @njit + def b(): + return 2 + + x = (a, b) + + @njit + def foo(): + out = 0 + for f in literal_unroll(x): + out += f() + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_indexing_list(self): + # See issue #5477 + @njit + def foo(cont): + i = 0 + acc = 0 + normal_list = [a for a in cont] + heter_tuple = ('a', 25, 0.23, None) + for item in literal_unroll(heter_tuple): + acc += normal_list[i] + i += 1 + print(item) + return i, acc + + data = [j for j in range(4)] + + # send stdout to nowhere, just check return values + with captured_stdout(): + self.assertEqual(foo(data), foo.py_func(data)) + + # now capture stdout for jit function and check + with captured_stdout() as stdout: + foo(data) + lines = stdout.getvalue().splitlines() + self.assertEqual( + lines, + ['a', '25', '0.23', 'None'], + ) + + def test_unroller_as_freevar(self): + mixed = (np.ones((1,)), np.ones((1, 1)), np.ones((1, 1, 1))) + from numba import literal_unroll as freevar_unroll + + @njit + def foo(): + out = 0 + for i in freevar_unroll(mixed): + out += i.ndim + return out + + self.assertEqual(foo(), foo.py_func()) + + def test_unroll_with_non_conformant_loops_present(self): + # See issue #8311 + + @njit('(Tuple((int64, float64)),)') + def foo(tup): + for t in literal_unroll(tup): + pass + + x = 1 + while x == 1: + x = 0 + + def test_literal_unroll_legalize_var_names01(self): + # See issue #8939 + test = np.array([(1, 2), (2, 3)], dtype=[("a1", "f8"), ("a2", "f8")]) + fields = tuple(test.dtype.fields.keys()) + + @njit + def foo(arr): + res = 0 + for k in literal_unroll(fields): + res = res + np.abs(arr[k]).sum() + return res + + self.assertEqual(foo(test), 8.0) + + def test_literal_unroll_legalize_var_names02(self): + # See issue #8939 + test = np.array([(1, 2), (2, 3)], + dtype=[("a1[0]", "f8"), ("a2[1]", "f8")]) + fields = tuple(test.dtype.fields.keys()) + + @njit + def foo(arr): + res = 0 + for k in literal_unroll(fields): + res = res + np.abs(arr[k]).sum() + return res + + self.assertEqual(foo(test), 8.0) + + +def capture(real_pass): + """ Returns a compiler pass that captures the mutation state reported + by the pass used in the argument""" + @register_pass(mutates_CFG=False, analysis_only=True) + class ResultCapturer(AnalysisPass): + _name = "capture_%s" % real_pass._name + _real_pass = real_pass + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + result = real_pass().run_pass(state) + mutation_results = state.metadata.setdefault('mutation_results', {}) + mutation_results[real_pass] = result + return result + + return ResultCapturer + + +class CapturingCompiler(CompilerBase): + """ Simple pipeline that wraps passes with the ResultCapturer pass""" + + def define_pipelines(self): + pm = PassManager("Capturing Compiler") + + def add_pass(x, y): + return pm.add_pass(capture(x), y) + + add_pass(TranslateByteCode, "analyzing bytecode") + add_pass(FixupArgs, "fix up args") + add_pass(IRProcessing, "processing IR") + add_pass(LiteralUnroll, "handles literal_unroll") + + # typing + add_pass(NopythonTypeInference, "nopython frontend") + + # legalise + add_pass(IRLegalization, + "ensure IR is legal prior to lowering") + + # lower + add_pass(NativeLowering, "native lowering") + add_pass(NoPythonBackend, "nopython mode backend") + pm.finalize() + return [pm] + + +class TestLiteralUnrollPassTriggering(TestCase): + + def test_literal_unroll_not_invoked(self): + @njit(pipeline_class=CapturingCompiler) + def foo(): + acc = 0 + for i in (1, 2, 3): + acc += i + return acc + + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertFalse(cres.metadata['mutation_results'][LiteralUnroll]) + + def test_literal_unroll_is_invoked(self): + @njit(pipeline_class=CapturingCompiler) + def foo(): + acc = 0 + for i in literal_unroll((1, 2, 3)): + acc += i + return acc + + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll]) + + def test_literal_unroll_is_invoked_via_alias(self): + alias = literal_unroll + + @njit(pipeline_class=CapturingCompiler) + def foo(): + acc = 0 + for i in alias((1, 2, 3)): + acc += i + return acc + + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll]) + + def test_literal_unroll_assess_empty_function(self): + @njit(pipeline_class=CapturingCompiler) + def foo(): + pass + + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertFalse(cres.metadata['mutation_results'][LiteralUnroll]) + + def test_literal_unroll_not_in_globals(self): + f = """def foo():\n\tpass""" + l = {} + exec(f, {}, l) + foo = njit(pipeline_class=CapturingCompiler)(l['foo']) + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertFalse(cres.metadata['mutation_results'][LiteralUnroll]) + + def test_literal_unroll_globals_and_locals(self): + f = """def foo():\n\tfor x in literal_unroll((1,)):\n\t\tpass""" + l = {} + exec(f, {}, l) + foo = njit(pipeline_class=CapturingCompiler)(l['foo']) + with self.assertRaises(errors.TypingError) as raises: + foo() + self.assertIn("Untyped global name 'literal_unroll'", + str(raises.exception)) + + # same as above but now add literal_unroll to globals + l = {} + exec(f, {'literal_unroll': literal_unroll}, l) + foo = njit(pipeline_class=CapturingCompiler)(l['foo']) + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll]) + + # same as above, but now with import + from textwrap import dedent + f = """ + def gen(): + from numba import literal_unroll + def foo(): + for x in literal_unroll((1,)): + pass + return foo + bar = gen() + """ + l = {} + exec(dedent(f), {}, l) + foo = njit(pipeline_class=CapturingCompiler)(l['bar']) + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll]) + + # same as above, but now with import as something else + from textwrap import dedent + f = """ + def gen(): + from numba import literal_unroll as something_else + def foo(): + for x in something_else((1,)): + pass + return foo + bar = gen() + """ + l = {} + exec(dedent(f), {}, l) + foo = njit(pipeline_class=CapturingCompiler)(l['bar']) + foo() + cres = foo.overloads[foo.signatures[0]] + self.assertTrue(cres.metadata['mutation_results'][LiteralUnroll]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_moved_modules.py b/venv/lib/python3.10/site-packages/numba/tests/test_moved_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..4ccaa06c9a4d4b7b81d291681ebb382fdd8769be --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_moved_modules.py @@ -0,0 +1,30 @@ +"""Tests for moved modules and their redirection from old path +""" +from numba.tests.support import TestCase + + +class TestMovedModule(TestCase): + """Testing moved modules in Q1 2020 but were decided to kept as public API + """ + def tests_numba_types(self): + import numba.types + import numba.core.types as types + # The old module IS NOT the new module + self.assertIsNot(numba.types, types) + # Attribute access are there + self.assertIs(numba.types.intp, types.intp) + self.assertIs(numba.types.float64, types.float64) + self.assertIs(numba.types.Array, types.Array) + # Submodule access through old import path is possible + import numba.types.misc + self.assertIs(types.misc, numba.types.misc) + self.assertIs(types.misc.Optional, numba.types.misc.Optional) + # Import time code could be executed twice and causes the following to + # fail. + self.assertIs(types.StringLiteral, numba.types.misc.StringLiteral) + # Check numba.types.container + from numba.types import containers + self.assertIs(types.containers, containers) + self.assertIs(types.containers.Sequence, containers.Sequence) + from numba.types.containers import Sequence + self.assertIs(Sequence, containers.Sequence) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_multi3.py b/venv/lib/python3.10/site-packages/numba/tests/test_multi3.py new file mode 100644 index 0000000000000000000000000000000000000000..018637922ac9dfc54a3b76ce1212df183c724d75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_multi3.py @@ -0,0 +1,43 @@ +import random + +import numpy as np + +from numba import njit +from numba.core import types +import unittest + +class TestMulti3(unittest.TestCase): + """ + This test is only relevant for 32-bit architectures. + + Test __multi3 implementation in _helperlib.c. + The symbol defines a i128 multiplication. + It is necessary for working around an issue in LLVM (see issue #969). + The symbol does not exist in 32-bit platform, and should not be used by + LLVM. However, optimization passes will create i65 multiplication that + is then lowered to __multi3. + """ + def test_multi3(self): + @njit("(int64,)") + def func(x): + res = 0 + for i in range(x): + res += i + return res + + x_cases = [-1, 0, 1, 3, 4, 8, + 0xffffffff - 1, 0xffffffff, 0xffffffff + 1, + 0x123456789abcdef, -0x123456789abcdef] + for _ in range(500): + x_cases.append(random.randint(0, 0xffffffff)) + + def expected(x): + if x <= 0: return 0 + return ((x * (x - 1)) // 2) & (2**64 - 1) + + for x in x_cases: + self.assertEqual(expected(x), func(x)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_nan.py b/venv/lib/python3.10/site-packages/numba/tests/test_nan.py new file mode 100644 index 0000000000000000000000000000000000000000..a38a486c1a635b20ff3e2ead149ce8b2f9786b12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_nan.py @@ -0,0 +1,37 @@ +import unittest +from numba import jit +from numba.core import types + +enable_pyobj_flags = {'forceobj': True} +no_pyobj_flags = {'nopython': True} + + +def isnan(x): + return x != x + + +def isequal(x): + return x == x + + +class TestNaN(unittest.TestCase): + + def test_nans(self, flags=enable_pyobj_flags): + pyfunc = isnan + cfunc = jit((types.float64,), **flags)(pyfunc) + + self.assertTrue(cfunc(float('nan'))) + self.assertFalse(cfunc(1.0)) + + pyfunc = isequal + cfunc = jit((types.float64,), **flags)(pyfunc) + + self.assertFalse(cfunc(float('nan'))) + self.assertTrue(cfunc(1.0)) + + def test_nans_npm(self): + self.test_nans(flags=no_pyobj_flags) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ndarray_subclasses.py b/venv/lib/python3.10/site-packages/numba/tests/test_ndarray_subclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..944d314e7112024ff7bb22022d040847a1162c2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ndarray_subclasses.py @@ -0,0 +1,346 @@ +""" +Test NumPy Subclassing features +""" + +import builtins +import unittest +from numbers import Number +from functools import wraps + +import numpy as np +from llvmlite import ir + +import numba +from numba import njit, typeof, objmode +from numba.core import cgutils, types, typing +from numba.core.pythonapi import box +from numba.core.errors import TypingError +from numba.core.registry import cpu_target +from numba.extending import (intrinsic, lower_builtin, overload_classmethod, + register_model, type_callable, typeof_impl, + register_jitable) +from numba.np import numpy_support + +from numba.tests.support import TestCase, MemoryLeakMixin + +# A quick util to allow logging within jit code + +_logger = None + + +def _do_log(*args): + if _logger is not None: + _logger.append(args) + + +@register_jitable +def log(*args): + with objmode(): + _do_log(*args) + + +def use_logger(fn): + @wraps(fn) + def core(*args, **kwargs): + global _logger + _logger = [] + return fn(*args, **kwargs) + return core + + +class MyArray(np.ndarray): + # Tell Numba to not seamlessly treat this type as a regular ndarray. + __numba_array_subtype_dispatch__ = True + + # __array__ is not needed given that this is a ndarray subclass + # + # def __array__(self, dtype=None): + # return self + + # Interoperate with NumPy outside of Numba. + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == "__call__": + N = None + scalars = [] + for inp in inputs: + if isinstance(inp, Number): + scalars.append(inp) + elif isinstance(inp, (type(self), np.ndarray)): + if isinstance(inp, type(self)): + scalars.append(np.ndarray(inp.shape, inp.dtype, inp)) + else: + scalars.append(inp) + if N is not None: + if N != inp.shape: + raise TypeError("inconsistent sizes") + else: + N = inp.shape + else: + return NotImplemented + ret = ufunc(*scalars, **kwargs) + return self.__class__(ret.shape, ret.dtype, ret) + else: + return NotImplemented + + +class MyArrayType(types.Array): + def __init__(self, dtype, ndim, layout, readonly=False, aligned=True): + name = f"MyArray({ndim}, {dtype}, {layout})" + super().__init__(dtype, ndim, layout, readonly=readonly, + aligned=aligned, name=name) + + def copy(self, *args, **kwargs): + # This is here to future-proof. + # The test here never uses this. + raise NotImplementedError + + # Tell Numba typing how to combine MyArrayType with other ndarray types. + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == "__call__": + for inp in inputs: + if not isinstance(inp, (types.Array, types.Number)): + return NotImplemented + # Ban if all arguments are MyArrayType + if all(isinstance(inp, MyArrayType) for inp in inputs): + return NotImplemented + return MyArrayType + else: + return NotImplemented + + @property + def box_type(self): + return MyArray + + +@typeof_impl.register(MyArray) +def typeof_ta_ndarray(val, c): + try: + dtype = numpy_support.from_dtype(val.dtype) + except NotImplementedError: + raise ValueError("Unsupported array dtype: %s" % (val.dtype,)) + layout = numpy_support.map_layout(val) + readonly = not val.flags.writeable + return MyArrayType(dtype, val.ndim, layout, readonly=readonly) + + +@register_model(MyArrayType) +class MyArrayTypeModel(numba.core.datamodel.models.StructModel): + def __init__(self, dmm, fe_type): + ndim = fe_type.ndim + members = [ + ('meminfo', types.MemInfoPointer(fe_type.dtype)), + ('parent', types.pyobject), + ('nitems', types.intp), + ('itemsize', types.intp), + ('data', types.CPointer(fe_type.dtype)), + ('shape', types.UniTuple(types.intp, ndim)), + ('strides', types.UniTuple(types.intp, ndim)), + ('extra_field', types.intp), + ] + super(MyArrayTypeModel, self).__init__(dmm, fe_type, members) + + +@type_callable(MyArray) +def type_myarray(context): + def typer(shape, dtype, buf): + out = MyArrayType( + dtype=buf.dtype, ndim=len(shape), layout=buf.layout + ) + return out + + return typer + + +@lower_builtin(MyArray, types.UniTuple, types.DType, types.Array) +def impl_myarray(context, builder, sig, args): + from numba.np.arrayobj import make_array, populate_array + + srcaryty = sig.args[-1] + shape, dtype, buf = args + + srcary = make_array(srcaryty)(context, builder, value=buf) + # Copy source array and remove the parent field to avoid boxer re-using + # the original ndarray instance. + retary = make_array(sig.return_type)(context, builder) + populate_array(retary, + data=srcary.data, + shape=srcary.shape, + strides=srcary.strides, + itemsize=srcary.itemsize, + meminfo=srcary.meminfo) + + ret = retary._getvalue() + context.nrt.incref(builder, sig.return_type, ret) + return ret + + +@box(MyArrayType) +def box_array(typ, val, c): + assert c.context.enable_nrt + np_dtype = numpy_support.as_dtype(typ.dtype) + dtypeptr = c.env_manager.read_const(c.env_manager.add_const(np_dtype)) + newary = c.pyapi.nrt_adapt_ndarray_to_python(typ, val, dtypeptr) + # Steals NRT ref + c.context.nrt.decref(c.builder, typ, val) + return newary + + +@overload_classmethod(MyArrayType, "_allocate") +def _ol_array_allocate(cls, allocsize, align): + """Implements a Numba-only classmethod on the array type. + """ + def impl(cls, allocsize, align): + log("LOG _ol_array_allocate", allocsize, align) + return allocator_MyArray(allocsize, align) + + return impl + + +@intrinsic +def allocator_MyArray(typingctx, allocsize, align): + def impl(context, builder, sig, args): + context.nrt._require_nrt() + size, align = args + + mod = builder.module + u32 = ir.IntType(32) + voidptr = cgutils.voidptr_t + + get_alloc_fnty = ir.FunctionType(voidptr, ()) + get_alloc_fn = cgutils.get_or_insert_function( + mod, get_alloc_fnty, name="_nrt_get_sample_external_allocator" + ) + ext_alloc = builder.call(get_alloc_fn, ()) + + fnty = ir.FunctionType(voidptr, [cgutils.intp_t, u32, voidptr]) + fn = cgutils.get_or_insert_function( + mod, fnty, name="NRT_MemInfo_alloc_safe_aligned_external" + ) + fn.return_value.add_attribute("noalias") + if isinstance(align, builtins.int): + align = context.get_constant(types.uint32, align) + else: + assert align.type == u32, "align must be a uint32" + call = builder.call(fn, [size, align, ext_alloc]) + call.name = "allocate_MyArray" + return call + + mip = types.MemInfoPointer(types.voidptr) # return untyped pointer + sig = typing.signature(mip, allocsize, align) + return sig, impl + + +class TestNdarraySubclasses(MemoryLeakMixin, TestCase): + + def test_myarray_return(self): + """This tests the path to `MyArrayType.box_type` + """ + @njit + def foo(a): + return a + 1 + + buf = np.arange(4) + a = MyArray(buf.shape, buf.dtype, buf) + expected = foo.py_func(a) + got = foo(a) + self.assertIsInstance(got, MyArray) + self.assertIs(type(expected), type(got)) + self.assertPreciseEqual(expected, got) + + def test_myarray_passthru(self): + @njit + def foo(a): + return a + + buf = np.arange(4) + a = MyArray(buf.shape, buf.dtype, buf) + expected = foo.py_func(a) + got = foo(a) + self.assertIsInstance(got, MyArray) + self.assertIs(type(expected), type(got)) + self.assertPreciseEqual(expected, got) + + def test_myarray_convert(self): + @njit + def foo(buf): + return MyArray(buf.shape, buf.dtype, buf) + + buf = np.arange(4) + expected = foo.py_func(buf) + got = foo(buf) + self.assertIsInstance(got, MyArray) + self.assertIs(type(expected), type(got)) + self.assertPreciseEqual(expected, got) + + def test_myarray_asarray_non_jit(self): + def foo(buf): + converted = MyArray(buf.shape, buf.dtype, buf) + return np.asarray(converted) + buf + + buf = np.arange(4) + got = foo(buf) + self.assertIs(type(got), np.ndarray) + self.assertPreciseEqual(got, buf + buf) + + @unittest.expectedFailure + def test_myarray_asarray(self): + self.disable_leak_check() + + @njit + def foo(buf): + converted = MyArray(buf.shape, buf.dtype, buf) + return np.asarray(converted) + + buf = np.arange(4) + got = foo(buf) + # the following fails because our np.asarray is returning the source + # array type + self.assertIs(type(got), np.ndarray) + + def test_myarray_ufunc_unsupported(self): + @njit + def foo(buf): + converted = MyArray(buf.shape, buf.dtype, buf) + return converted + converted + + buf = np.arange(4, dtype=np.float32) + with self.assertRaises(TypingError) as raises: + foo(buf) + + msg = ("No implementation of function", + "add(MyArray(1, float32, C), MyArray(1, float32, C))") + for m in msg: + self.assertIn(m, str(raises.exception)) + + @use_logger + def test_myarray_allocator_override(self): + """ + Checks that our custom allocator is used + """ + @njit + def foo(a): + b = a + np.arange(a.size, dtype=np.float64) + c = a + 1j + return b, c + + buf = np.arange(4, dtype=np.float64) + a = MyArray(buf.shape, buf.dtype, buf) + + expected = foo.py_func(a) + got = foo(a) + + self.assertPreciseEqual(got, expected) + + logged_lines = _logger + + targetctx = cpu_target.target_context + nb_dtype = typeof(buf.dtype) + align = targetctx.get_preferred_array_alignment(nb_dtype) + self.assertEqual(logged_lines, [ + ("LOG _ol_array_allocate", expected[0].nbytes, align), + ("LOG _ol_array_allocate", expected[1].nbytes, align), + ]) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_nested_calls.py b/venv/lib/python3.10/site-packages/numba/tests/test_nested_calls.py new file mode 100644 index 0000000000000000000000000000000000000000..679bc4af98680e389fc911f544c18b103b468bbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_nested_calls.py @@ -0,0 +1,153 @@ +""" +Test problems in nested calls. +Usually due to invalid type conversion between function boundaries. +""" + + +from numba import int32, int64 +from numba import jit +from numba.core import types +from numba.extending import overload +from numba.tests.support import TestCase, tag +import unittest + + +@jit(nopython=True) +def f_inner(a, b, c): + return a, b, c + +def f(x, y, z): + return f_inner(x, c=y, b=z) + +@jit(nopython=True) +def g_inner(a, b=2, c=3): + return a, b, c + +def g(x, y, z): + return g_inner(x, b=y), g_inner(a=z, c=x) + +@jit(nopython=True) +def star_inner(a=5, *b): + return a, b + +def star(x, y, z): + return star_inner(a=x), star_inner(x, y, z) + +def star_call(x, y, z): + return star_inner(x, *y), star_inner(*z) + +@jit(nopython=True) +def argcast_inner(a, b): + if b: + # Here `a` is unified to int64 (from int32 originally) + a = int64(0) + return a + +def argcast(a, b): + return argcast_inner(int32(a), b) + + +def generated_inner(x, y=5, z=6): + assert 0, "unreachable" + + +@overload(generated_inner) +def ol_generated_inner(x, y=5, z=6): + if isinstance(x, types.Complex): + def impl(x, y=5, z=6): + return x + y, z + else: + def impl(x, y=5, z=6): + return x - y, z + return impl + + +def call_generated(a, b): + return generated_inner(a, z=b) + + +class TestNestedCall(TestCase): + + def compile_func(self, pyfunc, objmode=False): + def check(*args, **kwargs): + expected = pyfunc(*args, **kwargs) + result = f(*args, **kwargs) + self.assertPreciseEqual(result, expected) + flags = dict(forceobj=True) if objmode else dict(nopython=True) + f = jit(**flags)(pyfunc) + return f, check + + def test_boolean_return(self): + @jit(nopython=True) + def inner(x): + return not x + + @jit(nopython=True) + def outer(x): + if inner(x): + return True + else: + return False + + self.assertFalse(outer(True)) + self.assertTrue(outer(False)) + + def test_named_args(self, objmode=False): + """ + Test a nested function call with named (keyword) arguments. + """ + cfunc, check = self.compile_func(f, objmode) + check(1, 2, 3) + check(1, y=2, z=3) + + def test_named_args_objmode(self): + self.test_named_args(objmode=True) + + def test_default_args(self, objmode=False): + """ + Test a nested function call using default argument values. + """ + cfunc, check = self.compile_func(g, objmode) + check(1, 2, 3) + check(1, y=2, z=3) + + def test_default_args_objmode(self): + self.test_default_args(objmode=True) + + def test_star_args(self): + """ + Test a nested function call to a function with *args in its signature. + """ + cfunc, check = self.compile_func(star) + check(1, 2, 3) + + def test_star_call(self, objmode=False): + """ + Test a function call with a *args. + """ + cfunc, check = self.compile_func(star_call, objmode) + check(1, (2,), (3,)) + + def test_star_call_objmode(self): + self.test_star_call(objmode=True) + + def test_argcast(self): + """ + Issue #1488: implicitly casting an argument variable should not + break nested calls. + """ + cfunc, check = self.compile_func(argcast) + check(1, 0) + check(1, 1) + + def test_call_generated(self): + """ + Test a nested function call to a generated jit function. + """ + cfunc = jit(nopython=True)(call_generated) + self.assertPreciseEqual(cfunc(1, 2), (-4, 2)) + self.assertPreciseEqual(cfunc(1j, 2), (1j + 5, 2)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_new_type_system.py b/venv/lib/python3.10/site-packages/numba/tests/test_new_type_system.py new file mode 100644 index 0000000000000000000000000000000000000000..fe1a198b9514b8742076e2ac9fa83b49946ac6d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_new_type_system.py @@ -0,0 +1,37 @@ +import numpy as np + +from numba import njit, config +from numba.tests.support import TestCase + + +class TestTypes(TestCase): + + def setUp(self) -> None: + if config.USE_LEGACY_TYPE_SYSTEM: + self.skipTest("This test is only for the new type system") + return super().setUp() + + def test_return_types(self): + @njit + def foo(x): + return x + + cases = [ + # Python types + 1, + 1.2, + (1 + 2j), + True, + # NumPy types + np.int32(1), + np.float64(1.2), + np.complex64(1 + 2j), + np.complex128(1 + 2j), + np.bool_(True), + np.datetime64('2020-01-01'), + np.timedelta64(1, 'D'), + ] + + for case in cases: + self.assertEqual(foo(case), case) + self.assertEqual(type(foo(case)), type(case)) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_np_functions.py b/venv/lib/python3.10/site-packages/numba/tests/test_np_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..6f95735348ffbbe7d79e1d888264abc1ac1aed89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_np_functions.py @@ -0,0 +1,6875 @@ +# Tests numpy methods of + +import itertools +import math +import platform +from functools import partial +from itertools import product +from textwrap import dedent + +import numpy as np + +from numba import jit, njit, typeof +from numba.core import types +from numba.typed import List, Dict +from numba.np.numpy_support import numpy_version +from numba.core.errors import TypingError +from numba.core.config import IS_32BITS +from numba.core.utils import pysignature +from numba.np.extensions import cross2d +from numba.tests.support import (TestCase, MemoryLeakMixin, + needs_blas, run_in_subprocess, + skip_if_numpy_2, IS_NUMPY_2, + IS_MACOS_ARM64) +import unittest + + +def sinc(x): + return np.sinc(x) + + +def angle1(x): + return np.angle(x) + + +def angle2(x, deg): + return np.angle(x, deg) + + +def array_equal(a, b): + return np.array_equal(a, b) + + +def intersect1d_2(a, b): + return np.intersect1d(a, b) + + +def intersect1d_3(a, b, assume_unique=False): + return np.intersect1d(a, b, assume_unique) + + +def append(arr, values, axis): + return np.append(arr, values, axis=axis) + + +def count_nonzero(arr, axis): + return np.count_nonzero(arr, axis=axis) + + +def delete(arr, obj): + return np.delete(arr, obj) + + +def diff1(a): + return np.diff(a) + + +def diff2(a, n): + return np.diff(a, n) + + +def bincount1(a): + return np.bincount(a) + + +def bincount2(a, w): + return np.bincount(a, weights=w) + + +def bincount3(a, w=None, minlength=0): + return np.bincount(a, w, minlength) + + +def searchsorted(a, v): + return np.searchsorted(a, v) + + +def searchsorted_left(a, v): + return np.searchsorted(a, v, side='left') + + +def searchsorted_right(a, v): + return np.searchsorted(a, v, side='right') + + +def digitize(*args): + return np.digitize(*args) + + +def histogram(*args): + return np.histogram(*args) + + +def machar(*args): + return np.MachAr() + + +def iscomplex(x): + return np.iscomplex(x) + + +def iscomplexobj(x): + return np.iscomplexobj(x) + + +def isscalar(x): + return np.isscalar(x) + + +def isreal(x): + return np.isreal(x) + + +def isrealobj(x): + return np.isrealobj(x) + + +def isneginf(x, out=None): + return np.isneginf(x, out) + + +def isposinf(x, out=None): + return np.isposinf(x, out) + + +def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + return np.isclose(a, b, rtol, atol, equal_nan) + + +def isnat(x): + return np.isnat(x) + + +def iinfo(*args): + return np.iinfo(*args) + + +def finfo(*args): + return np.finfo(*args) + + +def finfo_machar(*args): + return np.finfo(*args).machar + + +def fliplr(a): + return np.fliplr(a) + + +def flipud(a): + return np.flipud(a) + + +def flip(a): + return np.flip(a) + + +def logspace2(start, stop): + return np.logspace(start, stop) + + +def logspace3(start, stop, num=50): + return np.logspace(start, stop, num=num) + + +def geomspace2(start, stop): + return np.geomspace(start, stop) + + +def geomspace3(start, stop, num=50): + return np.geomspace(start, stop, num=num) + + +def rot90(a): + return np.rot90(a) + + +def rot90_k(a, k=1): + return np.rot90(a, k) + + +def array_split(a, indices, axis=0): + return np.array_split(a, indices, axis=axis) + + +def split(a, indices, axis=0): + return np.split(a, indices, axis=axis) + + +def vsplit(a, ind_or_sec): + return np.vsplit(a, ind_or_sec) + + +def hsplit(a, ind_or_sec): + return np.hsplit(a, ind_or_sec) + + +def dsplit(a, ind_or_sec): + return np.dsplit(a, ind_or_sec) + + +def correlate(a, v, mode="valid"): + return np.correlate(a, v, mode=mode) + + +def convolve(a, v, mode="full"): + return np.convolve(a, v, mode=mode) + + +def tri_n(N): + return np.tri(N) + + +def tri_n_m(N, M=None): + return np.tri(N, M) + + +def tri_n_k(N, k=0): + return np.tri(N, k) + + +def tri_n_m_k(N, M=None, k=0): + return np.tri(N, M, k) + + +def tril_m(m): + return np.tril(m) + + +def tril_m_k(m, k=0): + return np.tril(m, k) + + +def tril_indices_n(n): + return np.tril_indices(n) + + +def tril_indices_n_k(n, k=0): + return np.tril_indices(n, k) + + +def tril_indices_n_m(n, m=None): + return np.tril_indices(n, m=m) + + +def tril_indices_n_k_m(n, k=0, m=None): + return np.tril_indices(n, k, m) + + +def tril_indices_from_arr(arr): + return np.tril_indices_from(arr) + + +def tril_indices_from_arr_k(arr, k=0): + return np.tril_indices_from(arr, k) + + +def triu_m(m): + return np.triu(m) + + +def triu_m_k(m, k=0): + return np.triu(m, k) + + +def triu_indices_n(n): + return np.triu_indices(n) + + +def triu_indices_n_k(n, k=0): + return np.triu_indices(n, k) + + +def triu_indices_n_m(n, m=None): + return np.triu_indices(n, m=m) + + +def triu_indices_n_k_m(n, k=0, m=None): + return np.triu_indices(n, k, m) + + +def triu_indices_from_arr(arr): + return np.triu_indices_from(arr) + + +def triu_indices_from_arr_k(arr, k=0): + return np.triu_indices_from(arr, k) + + +def vander(x, N=None, increasing=False): + return np.vander(x, N, increasing) + + +def partition(a, kth): + return np.partition(a, kth) + + +def argpartition(a, kth): + return np.argpartition(a, kth) + + +def cov(m, y=None, rowvar=True, bias=False, ddof=None): + return np.cov(m, y, rowvar, bias, ddof) + + +def corrcoef(x, y=None, rowvar=True): + return np.corrcoef(x, y, rowvar) + + +def ediff1d(ary, to_end=None, to_begin=None): + return np.ediff1d(ary, to_end, to_begin) + + +def roll(a, shift): + return np.roll(a, shift) + + +def asarray(a): + return np.asarray(a) + + +def asarray_kws(a, dtype): + return np.asarray(a, dtype=dtype) + + +def asfarray(a, dtype=np.float64): + return np.asfarray(a, dtype=dtype) + + +def asfarray_default_kwarg(a): + return np.asfarray(a) + + +def extract(condition, arr): + return np.extract(condition, arr) + + +def np_trapz(y): + return np.trapz(y) + + +def np_trapz_x(y, x): + return np.trapz(y, x) + + +def np_trapz_dx(y, dx): + return np.trapz(y, dx=dx) + + +def np_trapz_x_dx(y, x, dx): + return np.trapz(y, x, dx) + + +def np_trapezoid(y): + return np.trapezoid(y) + + +def np_trapezoid_x(y, x): + return np.trapezoid(y, x) + + +def np_trapezoid_dx(y, dx): + return np.trapezoid(y, dx=dx) + + +def np_trapezoid_x_dx(y, x, dx): + return np.trapezoid(y, x, dx) + + +def np_allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): + return np.allclose(a, b, rtol, atol, equal_nan) + + +def np_average(a, axis=None, weights=None): + return np.average(a, axis=axis, weights=weights) + + +def interp(x, xp, fp): + return np.interp(x, xp, fp) + + +def np_repeat(a, repeats): + return np.repeat(a, repeats) + + +def array_repeat(a, repeats): + return np.asarray(a).repeat(repeats) + + +def np_select(condlist, choicelist, default=0): + return np.select(condlist, choicelist, default=default) + + +def np_select_defaults(condlist, choicelist): + return np.select(condlist, choicelist) + + +def np_bartlett(M): + return np.bartlett(M) + + +def np_blackman(M): + return np.blackman(M) + + +def np_hamming(M): + return np.hamming(M) + + +def np_hanning(M): + return np.hanning(M) + + +def np_kaiser(M, beta): + return np.kaiser(M, beta) + + +def np_cross(a, b): + return np.cross(a, b) + + +def np_trim_zeros(a, trim='fb'): + return np.trim_zeros(a, trim) + + +def nb_cross2d(a, b): + return cross2d(a, b) + + +def flip_lr(a): + return np.fliplr(a) + + +def flip_ud(a): + return np.flipud(a) + + +def np_union1d(a, b): + return np.union1d(a,b) + + +def np_asarray_chkfinite(a, dtype=None): + return np.asarray_chkfinite(a, dtype) + + +def unwrap(p, discont=None, axis=-1, period=6.283185307179586): + return np.unwrap(p, discont, axis, period=period) + + +def unwrap1(p): + return np.unwrap(p) + + +def unwrap13(p, period): + return np.unwrap(p, period=period) + + +def unwrap123(p, period, discont): + return np.unwrap(p, period=period, discont=discont) + + +def array_contains(a, key): + return key in a + + +def swapaxes(a, a1, a2): + return np.swapaxes(a, a1, a2) + + +def nan_to_num(X, copy=True, nan=0.0): + return np.nan_to_num(X, copy=copy, nan=nan) + + +def np_indices(dimensions): + return np.indices(dimensions) + + +def diagflat1(v): + return np.diagflat(v) + + +def diagflat2(v, k=0): + return np.diagflat(v, k) + + +def np_setxor1d_2(a, b): + return np.setxor1d(a, b) + + +def np_setxor1d_3(a, b, assume_unique=False): + return np.setxor1d(a, b, assume_unique) + + +def np_setdiff1d_2(a, b): + return np.setdiff1d(a, b) + + +def np_setdiff1d_3(a, b, assume_unique=False): + return np.setdiff1d(a, b, assume_unique) + + +def np_in1d_2(a, b): + return np.in1d(a, b) + + +def np_in1d_3a(a, b, assume_unique=False): + return np.in1d(a, b, assume_unique=assume_unique) + + +def np_in1d_3b(a, b, invert=False): + return np.in1d(a, b, invert=invert) + + +def np_in1d_4(a, b, assume_unique=False, invert=False): + return np.in1d(a, b, assume_unique, invert) + + +def np_isin_2(a, b): + return np.isin(a, b) + + +def np_isin_3a(a, b, assume_unique=False): + return np.isin(a, b, assume_unique=assume_unique) + + +def np_isin_3b(a, b, invert=False): + return np.isin(a, b, invert=invert) + + +def np_isin_4(a, b, assume_unique=False, invert=False): + return np.isin(a, b, assume_unique, invert) + + +class TestNPFunctions(MemoryLeakMixin, TestCase): + """ + Tests for various Numpy functions. + """ + + def setUp(self): + super(TestNPFunctions, self).setUp() + self.rnd = np.random.RandomState(42) + + def run_unary(self, pyfunc, x_types, x_values, func_extra_types=None, + func_extra_args=None, ignore_sign_on_zero=False, abs_tol=None, + **kwargs): + """ + Runs tests for a unary function operating in the numerical real space. + + Parameters + ---------- + pyfunc : a python function definition holding that calls the numpy + functions to be tested. + x_types: the types of the values being tested, see numba.types + x_values: the numerical values of the values to be tested + func_extra_types: the types of additional arguments to the numpy + function + func_extra_args: additional arguments to the numpy function + ignore_sign_on_zero: boolean as to whether to allow zero values + with incorrect signs to be considered equal + prec: the required precision match, see assertPreciseEqual + + Notes: + ------ + x_types and x_values must have the same length + + """ + for tx, vx in zip(x_types, x_values): + if func_extra_args is None: + func_extra_types = func_extra_args = [()] + for xtypes, xargs in zip(func_extra_types, func_extra_args): + cfunc = njit((tx,) + xtypes,)(pyfunc) + got = cfunc(vx, *xargs) + expected = pyfunc(vx, *xargs) + try: + scalty = tx.dtype + except AttributeError: + scalty = tx + prec = ('single' + if scalty in (types.float32, types.complex64) + else 'double') + msg = 'for input %r with prec %r' % (vx, prec) + self.assertPreciseEqual(got, expected, + prec=prec, + msg=msg, + ignore_sign_on_zero=ignore_sign_on_zero, + abs_tol=abs_tol, **kwargs) + + def test_sinc(self): + """ + Tests the sinc() function. + This test is purely to assert numerical computations are correct. + """ + + # Ignore sign of zeros, this will need masking depending on numpy + # version once the fix to numpy complex division is in upstream + # See: https://github.com/numpy/numpy/pull/6699 + isoz = True + + # Testing sinc(1.) leads to sin(pi)/pi, which is below machine + # precision in practice on most machines. Small floating point + # differences in sin() etc. may lead to large differences in the result + # that are at a range that is inaccessible using standard width + # floating point representations. + # e.g. Assume float64 type. + # sin(pi) ~= 1e-16, but should be zero + # sin(pi)/pi ~= 1e-17, should be zero, error carried from above + # float64 has log10(2^53)~=15.9 digits of precision and the magnitude + # change in the alg is > 16 digits (1.0...0 -> 0.0...0), + # so comparison via ULP is invalid. + # We therefore opt to assume that values under machine precision are + # equal in this case. + tol = "eps" + + pyfunc = sinc + + def check(x_types, x_values, **kwargs): + self.run_unary(pyfunc, x_types, x_values, + ignore_sign_on_zero=isoz, abs_tol=tol, + **kwargs) + + # real domain scalar context + x_values = [1., -1., 0.0, -0.0, 0.5, -0.5, 5, -5, 5e-21, -5e-21] + x_types = [types.float32, types.float64] * (len(x_values) // 2) + check(x_types, x_values) + + # real domain vector context + x_values = [np.array(x_values, dtype=np.float64)] + x_types = [typeof(v) for v in x_values] + check(x_types, x_values) + + # complex domain scalar context + x_values = [1.+0j, -1+0j, 0.0+0.0j, -0.0+0.0j, 0+1j, 0-1j, 0.5+0.0j, # noqa + -0.5+0.0j, 0.5+0.5j, -0.5-0.5j, 5+5j, -5-5j, # noqa + # the following are to test sin(x)/x for small x + 5e-21+0j, -5e-21+0j, 5e-21j, +(0-5e-21j) # noqa + ] + x_types = [types.complex64, types.complex128] * (len(x_values) // 2) + check(x_types, x_values, ulps=2) + + # complex domain vector context + x_values = [np.array(x_values, dtype=np.complex128)] + x_types = [typeof(v) for v in x_values] + check(x_types, x_values, ulps=2) + + def test_sinc_exceptions(self): + pyfunc = sinc + cfunc = jit(nopython=True)(pyfunc) + + with self.assertRaises(TypingError) as raises: + cfunc('str') + self.assertIn('Argument "x" must be a Number or array-like', + str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_contains(self): + def arrs(): + a_0 = np.arange(10, 50) + k_0 = 20 + + yield a_0, k_0 + + a_1 = np.arange(6) + k_1 = 10 + + yield a_1, k_1 + + single_val_a = np.asarray([20]) + k_in = 20 + k_out = 13 + + yield single_val_a, k_in + yield single_val_a, k_out + + empty_arr = np.asarray([]) + yield empty_arr, k_out + + # np scalars + + bool_arr = np.array([True, False]) + yield bool_arr, True + yield bool_arr, k_0 + + np.random.seed(2) + float_arr = np.random.rand(10) + np.random.seed(2) + rand_k = np.random.rand() + present_k = float_arr[0] + + yield float_arr, rand_k + yield float_arr, present_k + + complx_arr = float_arr.view(np.complex128) + yield complx_arr, complx_arr[0] + yield complx_arr, rand_k + + np.random.seed(2) + uint_arr = np.random.randint(10, size=15, dtype=np.uint8) + yield uint_arr, 5 + yield uint_arr, 25 + + pyfunc = array_contains + + cfunc = jit(nopython=True)(pyfunc) + + for arr, key in arrs(): + expected = pyfunc(arr, key) + received = cfunc(arr, key) + + self.assertPreciseEqual(expected, received) + + def test_angle(self): + """ + Tests the angle() function. + This test is purely to assert numerical computations are correct. + """ + pyfunc1 = angle1 + pyfunc2 = angle2 + + def check(x_types, x_values): + # angle(x) + self.run_unary(pyfunc1, x_types, x_values) + # angle(x, deg) + xtra_values = [(True,), (False,)] + xtra_types = [(types.bool_,)] * len(xtra_values) + self.run_unary(pyfunc2, x_types, x_values, + func_extra_types=xtra_types, + func_extra_args=xtra_values,) + + # real domain scalar context + x_values = [1., -1., 0.0, -0.0, 0.5, -0.5, 5, -5] + x_types = [types.float32, types.float64] * (len(x_values) // 2 + 1) + check(x_types, x_values) + + # real domain vector context + x_values = [np.array(x_values, dtype=np.float64)] + x_types = [typeof(v) for v in x_values] + check(x_types, x_values) + + # complex domain scalar context + x_values = [1.+0j, -1+0j, 0.0+0.0j, -0.0+0.0j, 1j, -1j, 0.5+0.0j, # noqa + -0.5+0.0j, 0.5+0.5j, -0.5-0.5j, 5+5j, -5-5j] # noqa + x_types = [types.complex64, types.complex128] * (len(x_values) // 2 + 1) + check(x_types, x_values) + + # complex domain vector context + x_values = np.array(x_values) + x_types = [types.complex64, types.complex128] + check(x_types, x_values) + + def test_angle_return_type(self): + # see issue #8949 + def numba_angle(x): + r = np.angle(x) + return r.dtype + + pyfunc = numba_angle + x_values = [1., -1., 1. + 0j, -5 - 5j] + x_types = ['f4', 'f8', 'c8', 'c16'] + for val, typ in zip(x_values, x_types): + x = np.array([val], dtype=typ) + cfunc = jit(nopython=True)(pyfunc) + expected = pyfunc(x) + got = cfunc(x) + self.assertEqual(expected, got) + + def test_angle_exceptions(self): + pyfunc = angle1 + cfunc = jit(nopython=True)(pyfunc) + + with self.assertRaises(TypingError) as raises: + cfunc('hello') + self.assertIn('Argument "z" must be a complex or Array[complex]', + str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_array_equal(self): + def arrays(): + yield np.array([]), np.array([]) + yield np.array([1, 2]), np.array([1, 2]) + yield np.array([]), np.array([1]) + x = np.arange(10).reshape(5, 2) + x[1][1] = 30 + yield np.arange(10).reshape(5, 2), x + yield x, x + yield (1, 2, 3), (1, 2, 3) + yield 2, 2 + yield 3, 2 + yield True, True + yield True, False + yield True, 2 + yield True, 1 + yield False, 0 + + pyfunc = array_equal + cfunc = jit(nopython=True)(pyfunc) + + for arr, obj in arrays(): + expected = pyfunc(arr, obj) + got = cfunc(arr, obj) + self.assertPreciseEqual(expected, got) + + def test_array_equal_exception(self): + pyfunc = array_equal + cfunc = jit(nopython=True)(pyfunc) + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(3 * 4).reshape(3, 4), None) + self.assertIn( + 'Both arguments to "array_equals" must be array-like', + str(raises.exception) + ) + + def test_intersect1d_2(self): + + def arrays(): + yield (List.empty_list(types.float64), + List.empty_list(types.float64)) # two empty arrays + yield [1], List.empty_list(types.float64) # empty right + yield List.empty_list(types.float64), [1] # empty left + yield [1], [2] # singletons no intersection + yield [1], [1] # singletons one intersection + yield [1, 2], [1] + yield [1, 2, 2], [2, 2] + yield [1, 2], [2, 1] + yield [1, 2, 3], [1, 2, 3] + # from numpy: + # https://github.com/numpy/numpy/blob/b0371ef240560e78b651a5d7c9407ae3212a3d56/numpy/lib/tests/test_arraysetops.py#L17 # noqa: E501 + yield [5, 7, 1, 2], [2, 4, 3, 1, 5] + yield [5, 5, 7, 1, 2], [2, 1, 4, 3, 3, 1, 5] + + pyfunc = intersect1d_2 + cfunc = jit(nopython=True)(pyfunc) + + for a, b in arrays(): + # a = np.array(a) + # b = np.array(b) + if isinstance(a, list): + a = List(a) + if isinstance(b, list): + b = List(b) + expected = pyfunc(a, b) + got = cfunc(a, b) + self.assertPreciseEqual(expected, got) + + def test_intersect1d_3(self): + + def arrays(): + yield (List.empty_list(types.float64), + List.empty_list(types.float64)) # two empty arrays + yield [1], List.empty_list(types.float64) # empty right + yield List.empty_list(types.float64), [1] # empty left + yield [1], [2] # singletons no intersection + yield [1], [1] # singletons one intersection + yield [1, 2], [1] + yield [1, 2, 2], [2, 2] + yield [1, 2], [2, 1] + yield [1, 2, 3], [1, 2, 3] + # from numpy: + # https://github.com/numpy/numpy/blob/b0371ef240560e78b651a5d7c9407ae3212a3d56/numpy/lib/tests/test_arraysetops.py#L17 # noqa: E501 + yield [5, 7, 1, 2], [2, 4, 3, 1, 5] + yield [5, 5, 7, 1, 2], [2, 1, 4, 3, 3, 1, 5] + + pyfunc = intersect1d_3 + cfunc = jit(nopython=True)(pyfunc) + + for a, b in arrays(): + if isinstance(a, list): + a = List(a) + if isinstance(b, list): + b = List(b) + expected = pyfunc(a, b, assume_unique=False) + got = cfunc(a, b, assume_unique=False) + self.assertPreciseEqual(expected, got) + if len(np.unique(a)) == len(a) and len(np.unique(b)) == len(b): + expected = pyfunc(a, b, assume_unique=True) + got = cfunc(a, b, assume_unique=True) + self.assertPreciseEqual(expected, got) + + def test_intersect1d_errors(self): + np_pyfunc = intersect1d_3 + np_nbfunc = njit(np_pyfunc) + + a = np.array([1]) + b = np.array([2]) + self.disable_leak_check() + with self.assertRaises(TypingError): + np_nbfunc(a, b, "foo") + with self.assertRaises(TypingError): + np_nbfunc("foo", b, True) + with self.assertRaises(TypingError): + np_nbfunc(a, "foo", True) + + def test_count_nonzero(self): + + def arrays(): + yield np.array([]), None + yield np.zeros(10), None + yield np.arange(10), None + yield np.ones(10, dtype=np.bool_), 0 + yield np.arange(3 * 4 * 5).reshape(3, 4, 5), None + yield np.arange(3 * 4).reshape(3, 4), 0 + yield np.arange(3 * 4).reshape(3, 4), 1 + + pyfunc = count_nonzero + cfunc = jit(nopython=True)(pyfunc) + + for arr, axis in arrays(): + expected = pyfunc(arr, axis) + got = cfunc(arr, axis) + self.assertPreciseEqual(expected, got) + + def test_np_append(self): + def arrays(): + yield 2, 2, None + yield np.arange(10), 3, None + yield np.arange(10), np.arange(3), None + yield np.arange(10).reshape(5, 2), np.arange(3), None + yield np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), 0 + arr = np.array([[1, 2, 3], [4, 5, 6]]) + yield arr, arr, 1 + + pyfunc = append + cfunc = jit(nopython=True)(pyfunc) + + for arr, obj, axis in arrays(): + expected = pyfunc(arr, obj, axis) + got = cfunc(arr, obj, axis) + self.assertPreciseEqual(expected, got) + + def test_np_append_exceptions(self): + pyfunc = append + cfunc = jit(nopython=True)(pyfunc) + arr = np.array([[1, 2, 3], [4, 5, 6]]) + values = np.array([[7, 8, 9]]) + axis = 0 + + # first argument must be array-like + with self.assertRaises(TypingError) as raises: + cfunc(None, values, axis) + self.assertIn( + 'The first argument "arr" must be array-like', + str(raises.exception) + ) + + # second argument must also be array-like + with self.assertRaises(TypingError) as raises: + cfunc(arr, None, axis) + self.assertIn( + 'The second argument "values" must be array-like', + str(raises.exception) + ) + + # third argument must be either nonelike or an integer + with self.assertRaises(TypingError) as raises: + cfunc(arr, values, axis=0.0) + self.assertIn( + 'The third argument "axis" must be an integer', + str(raises.exception) + ) + # Exceptions leak references + self.disable_leak_check() + + def test_delete(self): + + def arrays(): + # array, obj + # + # an array-like type + yield [1, 2, 3, 4, 5], 3 + yield [1, 2, 3, 4, 5], [2, 3] + # 1d array, scalar + yield np.arange(10), 3 + yield np.arange(10), -3 # Negative obj + # 1d array, list + yield np.arange(10), [3, 5, 6] + yield np.arange(10), [2, 3, 4, 5] + # 3d array, scalar + yield np.arange(3 * 4 * 5).reshape(3, 4, 5), 2 + # 3d array, list + yield np.arange(3 * 4 * 5).reshape(3, 4, 5), [5, 30, 27, 8] + # slices + yield [1, 2, 3, 4], slice(1, 3, 1) + yield np.arange(10), slice(10) + + pyfunc = delete + cfunc = jit(nopython=True)(pyfunc) + + for arr, obj in arrays(): + expected = pyfunc(arr, obj) + got = cfunc(arr, obj) + self.assertPreciseEqual(expected, got) + + def test_delete_exceptions(self): + pyfunc = delete + cfunc = jit(nopython=True)(pyfunc) + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc([1, 2], 3.14) + self.assertIn( + 'obj should be of Integer dtype', + str(raises.exception) + ) + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(10), [3.5, 5.6, 6.2]) + self.assertIn( + 'obj should be of Integer dtype', + str(raises.exception) + ) + + with self.assertRaises(TypingError) as raises: + cfunc(2, 3) + self.assertIn( + 'arr must be either an Array or a Sequence', + str(raises.exception) + ) + + with self.assertRaises(IndexError) as raises: + cfunc([1, 2], 3) + self.assertIn( + 'obj must be less than the len(arr)', + str(raises.exception), + ) + # Exceptions leak references + self.disable_leak_check() + + def diff_arrays(self): + """ + Some test arrays for np.diff() + """ + a = np.arange(12) ** 3 + yield a + b = a.reshape((3, 4)) + yield b + c = np.arange(24).reshape((3, 2, 4)) ** 3 + yield c + + def test_diff1(self): + pyfunc = diff1 + cfunc = jit(nopython=True)(pyfunc) + for arr in self.diff_arrays(): + expected = pyfunc(arr) + got = cfunc(arr) + self.assertPreciseEqual(expected, got) + + # 0-dim array + a = np.array(42) + with self.assertTypingError(): + cfunc(a) + + def test_diff2(self): + pyfunc = diff2 + cfunc = jit(nopython=True)(pyfunc) + for arr in self.diff_arrays(): + size = arr.shape[-1] + for n in (0, 1, 2, 3, size - 1, size, size + 1, 421): + expected = pyfunc(arr, n) + got = cfunc(arr, n) + self.assertPreciseEqual(expected, got) + + def test_diff2_exceptions(self): + pyfunc = diff2 + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + # 0-dim array + arr = np.array(42) + with self.assertTypingError(): + cfunc(arr, 1) + + # Invalid `n` + arr = np.arange(10) + for n in (-1, -2, -42): + with self.assertRaises(ValueError) as raises: + cfunc(arr, n) + self.assertIn("order must be non-negative", str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_isscalar(self): + def values(): + yield 3 + yield np.asarray([3]) + yield (3,) + yield 3j + yield 'numba' + yield int(10) + yield np.int16(12345) + yield 4.234 + yield True + yield None + yield np.timedelta64(10, 'Y') + yield np.datetime64('nat') + yield np.datetime64(1, 'Y') + + pyfunc = isscalar + cfunc = jit(nopython=True)(pyfunc) + for x in values(): + expected = pyfunc(x) + got = cfunc(x) + self.assertEqual(expected, got, x) + + def test_isobj_functions(self): + def values(): + yield 1 + yield 1 + 0j + yield np.asarray([3, 1 + 0j, True]) + yield "hello world" + + @jit(nopython=True) + def optional_fn(x, cond, cfunc): + y = x if cond else None + return cfunc(y) + + pyfuncs = [iscomplexobj, isrealobj] + for pyfunc in pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + for x in values(): + expected = pyfunc(x) + got = cfunc(x) + self.assertEqual(expected, got) + + # optional type + expected_optional = optional_fn.py_func(x, True, pyfunc) + got_optional = optional_fn(x, True, cfunc) + self.assertEqual(expected_optional, got_optional) + + # none type + expected_none = optional_fn.py_func(x, False, pyfunc) + got_none = optional_fn(x, False, cfunc) + self.assertEqual(expected_none, got_none) + + self.assertEqual(len(cfunc.signatures), 8) + + def test_is_real_or_complex(self): + def values(): + yield np.array([1 + 1j, 1 + 0j, 4.5, 3, 2, 2j]) + yield np.array([1, 2, 3]) + yield 3 + yield 12j + yield 1 + 4j + yield 10 + 0j + yield (1 + 4j, 2 + 0j) + yield np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + + pyfuncs = [iscomplex, isreal] + for pyfunc in pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + for x in values(): + expected = pyfunc(x) + got = cfunc(x) + self.assertPreciseEqual(expected, got) + + def test_isneg_or_ispos_inf(self): + def values(): + yield -np.inf, None + yield np.inf, None + yield np.inf, None + yield np.asarray([-np.inf, 0., np.inf]), None + yield -np.inf, np.zeros(1, dtype=np.bool_) + yield np.inf, np.zeros(1, dtype=np.bool_) + yield np.inf, np.zeros(1, dtype=np.bool_) + yield -np.inf, np.empty(12) + yield np.asarray([-np.inf, 0., np.inf]), np.zeros(3, dtype=np.bool_) + + pyfuncs = [isneginf, isposinf] + for pyfunc in pyfuncs: + cfunc = jit(nopython=True)(pyfunc) + for x, out in values(): + expected = pyfunc(x, out) + got = cfunc(x, out) + self.assertPreciseEqual(expected, got) + + def test_isclose(self): + rtol = 1e-5 + atol = 1e-8 + arr = np.array([100, 1000]) + aran = np.arange(8).reshape((2, 2, 2)) + kw = {'rtol': rtol, 'atol': atol} + + def values(): + yield 1e10, 1.00001e10, {} + yield 1e10, np.nan, {} + yield np.array([1e-8, 1e-7]), np.array([0.0, 0.0]), {} + yield np.array([1e10, 1e-7]), np.array([1.00001e10, 1e-8]), {} + yield np.array([1e10, 1e-8]), np.array([1.00001e10, 1e-9]), {} + yield np.array([1e10, 1e-8]), np.array([1.0001e10, 1e-9]), {} + yield np.array([1.0, np.nan]), np.array([1.0, np.nan]), {} + yield np.array([1.0, np.nan]), np.array([1.0, np.nan]), {'equal_nan': True} # noqa + yield np.array([np.nan, np.nan]), np.array([1.0, np.nan]), {'equal_nan': True} # noqa + yield np.array([1e-100, 1e-7]), np.array([0.0, 0.0]), {'atol': 0.0} + yield np.array([1e-10, 1e-10]), np.array([1e-20, 0.0]), {} + yield np.array([1e-10, 1e-10]), np.array([1e-20, 0.999999e-10]), {'atol': 0.0} # noqa + yield np.array([1, np.inf, 2]), np.array([3, np.inf, 4]), kw + yield np.array([atol, np.inf, -np.inf, np.nan]), np.array([0]), kw + yield np.array([atol, np.inf, -np.inf, np.nan]), 0, kw + yield 0, np.array([atol, np.inf, -np.inf, np.nan]), kw + + # tests taken from + # https://github.com/numpy/numpy/blob/aac965af6032b69d5cb515ad785cc9a331e816f4/numpy/core/tests/test_numeric.py#L2298-L2335 # noqa: E501 + + # all close tests + yield np.array([0, 1]), np.array([1, 0]), kw + yield arr, arr, kw + yield np.array([1]), np.array([1 + rtol + atol]), kw + yield arr, arr + arr * rtol, kw + yield arr, arr + arr * rtol + atol, kw + yield aran, aran + aran * rtol, kw + yield np.inf, np.inf, kw + yield -np.inf, np.inf, kw + yield np.inf, np.array([np.inf]), kw + yield np.array([np.inf, -np.inf]), np.array([np.inf, -np.inf]), kw + + # none close tests + yield np.array([np.inf, 0]), np.array([1, np.inf]), kw + yield np.array([np.inf, -np.inf]), np.array([1, 0]), kw + yield np.array([np.inf, np.inf]), np.array([1, -np.inf]), kw + yield np.array([np.inf, np.inf]), np.array([1, 0]), kw + yield np.array([np.nan, 0]), np.array([np.nan, -np.inf]), kw + yield np.array([atol * 2]), np.array([0]), kw + yield np.array([1]), np.array([1 + rtol + atol * 2]), kw + yield aran, aran + rtol * 1.1 * aran + atol * 1.1, kw + yield np.array(np.array([np.inf, 1])), np.array(np.array([0, np.inf])), kw # noqa + + # some close tests + yield np.array([np.inf, 0]), np.array([atol * 2, atol * 2]), kw + yield np.array([np.inf, 0]), np.array([np.inf, atol * 2]), kw + yield np.array([atol, 1, 1e6 * (1 + 2 * rtol) + atol]), np.array([0, np.nan, 1e6]), kw # noqa + yield np.arange(3), np.array([0, 1, 2.1]), kw + yield np.nan, np.array([np.nan, np.nan, np.nan]), kw + yield np.array([0]), np.array([atol, np.inf, -np.inf, np.nan]), kw + yield 0, np.array([atol, np.inf, -np.inf, np.nan]), kw + + pyfunc = isclose + cfunc = jit(nopython=True)(pyfunc) + for a, b, kwargs in values(): + expected = pyfunc(a, b, **kwargs) + got = cfunc(a, b, **kwargs) + if isinstance(expected, np.bool_): + self.assertEqual(expected, got) + else: + self.assertTrue(np.array_equal(expected, got)) + + def isclose_exception(self): + pyfunc = isclose + cfunc = jit(nopython=True)(pyfunc) + inps = [ + (np.asarray([1e10, 1e-9, np.nan]), + np.asarray([1.0001e10, 1e-9]), + 1e-05, 1e-08, False, + "shape mismatch: objects cannot be broadcast to a single shape", + ValueError), + ('hello', 3, False, 1e-08, False, + 'The first argument "a" must be array-like', + TypingError), + (3, 'hello', False, 1e-08, False, + 'The second argument "b" must be array-like', + TypingError), + (2, 3, False, 1e-08, False, + 'The third argument "rtol" must be a floating point', + TypingError), + (2, 3, 1e-05, False, False, + 'The fourth argument "atol" must be a floating point', + TypingError), + (2, 3, 1e-05, 1e-08, 1, + 'The fifth argument "equal_nan" must be a boolean', + TypingError), + ] + + for a, b, rtol, atol, equal_nan, exc_msg, exc in inps: + with self.assertRaisesRegex(exc, exc_msg): + cfunc(a, b, rtol, atol, equal_nan) + + def bincount_sequences(self): + """ + Some test sequences for np.bincount() + """ + a = [1, 2, 5, 2, 3, 20] + b = np.array([5, 8, 42, 5]) + c = self.rnd.randint(0, 100, size=300).astype(np.int8) + return (a, b, c) + + def test_bincount1(self): + pyfunc = bincount1 + cfunc = jit(nopython=True)(pyfunc) + for seq in self.bincount_sequences(): + expected = pyfunc(seq) + got = cfunc(seq) + self.assertPreciseEqual(expected, got) + + def test_bincount1_exceptions(self): + pyfunc = bincount1 + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + # Negative input + with self.assertRaises(ValueError) as raises: + cfunc([2, -1]) + self.assertIn("first argument must be non-negative", + str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_bincount2(self): + pyfunc = bincount2 + cfunc = jit(nopython=True)(pyfunc) + for seq in self.bincount_sequences(): + w = [math.sqrt(x) - 2 for x in seq] + # weights as list, then array, mixed types, check upcast is ok + for weights in (w, np.array(w), seq, np.array(seq)): + expected = pyfunc(seq, weights) + got = cfunc(seq, weights) + self.assertPreciseEqual(expected, got) + + def test_bincount2_exceptions(self): + pyfunc = bincount2 + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + # Negative input + with self.assertRaises(ValueError) as raises: + cfunc([2, -1], [0, 0]) + self.assertIn("first argument must be non-negative", + str(raises.exception)) + + # Mismatching input sizes + with self.assertRaises(ValueError) as raises: + cfunc([2, -1], [0]) + self.assertIn("weights and list don't have the same length", + str(raises.exception)) + + def test_bincount3(self): + pyfunc = bincount3 + cfunc = jit(nopython=True)(pyfunc) + for seq in self.bincount_sequences(): + a_max = max(seq) + # Length should be a_max in the first case, minlength in the second + for minlength in (a_max, a_max + 2): + expected = pyfunc(seq, None, minlength) + got = cfunc(seq, None, minlength) + self.assertEqual(len(expected), len(got)) + self.assertPreciseEqual(expected, got) + + def test_bincount3_exceptions(self): + pyfunc = bincount3 + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + # Negative input + with self.assertRaises(ValueError) as raises: + cfunc([2, -1], [0, 0]) + self.assertIn("first argument must be non-negative", + str(raises.exception)) + + # Negative minlength + with self.assertRaises(ValueError) as raises: + cfunc([17, 38], None, -1) + self.assertIn("'minlength' must not be negative", + str(raises.exception)) + + def test_searchsorted(self): + pyfunc = searchsorted + cfunc = jit(nopython=True)(pyfunc) + + pyfunc_left = searchsorted_left + cfunc_left = jit(nopython=True)(pyfunc_left) + + pyfunc_right = searchsorted_right + cfunc_right = jit(nopython=True)(pyfunc_right) + + def check(a, v): + expected = pyfunc(a, v) + got = cfunc(a, v) + self.assertPreciseEqual(expected, got) + + expected = pyfunc_left(a, v) + got = cfunc_left(a, v) + self.assertPreciseEqual(expected, got) + + expected = pyfunc_right(a, v) + got = cfunc_right(a, v) + self.assertPreciseEqual(expected, got) + + # First with integer values (no NaNs) + bins = np.arange(5) ** 2 + values = np.arange(20) - 1 + + for a in (bins, list(bins)): + # Scalar values + for v in values: + check(a, v) + # Array values + for v in (values, values.reshape((4, 5))): + check(a, v) + # Sequence values + check(a, list(values)) + + # Second with float values (including NaNs) + bins = np.float64(list(bins) + [float('nan')] * 7) / 2.0 + values = np.arange(20) - 0.5 + + for a in (bins, list(bins)): + # Scalar values + for v in values: + check(a, v) + # Array values + for v in (values, values.reshape((4, 5))): + check(a, v) + # Sequence values + check(a, list(values)) + + # nonsense value for 'side' raises TypingError + def bad_side(a, v): + return np.searchsorted(a, v, side='nonsense') + cfunc = jit(nopython=True)(bad_side) + with self.assertTypingError(): + cfunc([1,2], 1) + + # non-constant value for 'side' raises TypingError + def nonconst_side(a, v, side='left'): + return np.searchsorted(a, v, side=side) + cfunc = jit(nopython=True)(nonconst_side) + with self.assertTypingError(): + cfunc([1,2], 1, side='right') + + # Test unordered values + a = np.array([1, 2, 0]) + v = np.array( + [ + [5, 4], + [6, 7], + [2, 1], + [0, 3], + ] + ) + check(a, v) + + a = np.array([9, 1, 4, 2, 0, 3, 7, 6, 8]) + v = np.array( + [ + [5, 10], + [10, 5], + [-1, 5], + ] + ) + check(a, v) + + def test_searchsorted_supplemental(self): + pyfunc = searchsorted + cfunc = jit(nopython=True)(pyfunc) + + pyfunc_left = searchsorted_left + cfunc_left = jit(nopython=True)(pyfunc_left) + + pyfunc_right = searchsorted_right + cfunc_right = jit(nopython=True)(pyfunc_right) + + def check(a, v): + expected = pyfunc(a, v) + got = cfunc(a, v) + self.assertPreciseEqual(expected, got) + + expected = pyfunc_left(a, v) + got = cfunc_left(a, v) + self.assertPreciseEqual(expected, got) + + expected = pyfunc_right(a, v) + got = cfunc_right(a, v) + self.assertPreciseEqual(expected, got) + + element_pool = list(range(-5, 50)) + element_pool += [np.nan] * 5 + [np.inf] * 3 + [-np.inf] * 3 + + for i in range(1000): + sample_size = self.rnd.choice([5, 10, 25]) + + # `a` and `v` not sorted; either may have repeating values + a = self.rnd.choice(element_pool, sample_size) + v = self.rnd.choice(element_pool, sample_size + (i % 3 - 1)) + + # output should match numpy regardless of whether `a` is sorted + check(a, v) + check(np.sort(a), v) + + ones = np.ones(5) + nans = np.full(len(ones), fill_value=np.nan) + check(ones, ones) + + # `a` and / or `v` full of nans + check(ones, nans) + check(nans, ones) + check(nans, nans) + + # `v` is zero size + a = np.arange(1) + v = np.arange(0) + check(a, v) + + # `a` and `v` booleans + a = np.array([False, False, True, True]) + v = np.array([False, True]) + check(a, v) + + # `v` is a (scalar) boolean + a = [1, 2, 3] + v = True + check(a, v) + + # `a` and `v` arrays of strings + a = np.array(['1', '2', '3']) + v = np.array(['2', '4']) + check(a, v) + + def test_searchsorted_complex(self): + pyfunc = searchsorted + cfunc = jit(nopython=True)(pyfunc) + + pyfunc_left = searchsorted_left + cfunc_left = jit(nopython=True)(pyfunc_left) + + pyfunc_right = searchsorted_right + cfunc_right = jit(nopython=True)(pyfunc_right) + + def check(a, v): + expected = pyfunc(a, v) + got = cfunc(a, v) + self.assertPreciseEqual(expected, got) + + expected = pyfunc_left(a, v) + got = cfunc_left(a, v) + self.assertPreciseEqual(expected, got) + + expected = pyfunc_right(a, v) + got = cfunc_right(a, v) + self.assertPreciseEqual(expected, got) + + pool = [0, 1, np.nan] + element_pool = [complex(*c) for c in itertools.product(pool, pool)] + + for i in range(100): + sample_size = self.rnd.choice([3, 5, len(element_pool)]) + + # `a` and `v` not sorted; either may have repeating values + a = self.rnd.choice(element_pool, sample_size) + v = self.rnd.choice(element_pool, sample_size + (i % 3 - 1)) + + # output should match numpy regardless of whether `a` is sorted + check(a, v) + check(np.sort(a), v) + + # check type promotion (a complex; v not so much) + check(a=np.array(element_pool), v=np.arange(2)) + + def test_digitize(self): + pyfunc = digitize + cfunc = jit(nopython=True)(pyfunc) + + def check(*args): + expected = pyfunc(*args) + got = cfunc(*args) + self.assertPreciseEqual(expected, got) + + values = np.float64((0, 0.99, 1, 4.4, 4.5, 7, 8, 9, 9.5, + float('inf'), float('-inf'), float('nan'))) + assert len(values) == 12 + self.rnd.shuffle(values) + + bins1 = np.float64([1, 3, 4.5, 8]) + bins2 = np.float64([1, 3, 4.5, 8, float('inf'), float('-inf')]) + bins3 = np.float64([1, 3, 4.5, 8, float('inf'), float('-inf')] + + [float('nan')] * 10) + + all_bins = [bins1, bins2, bins3] + xs = [values, values.reshape((3, 4))] + + # 2-ary digitize() + for bins in all_bins: + bins.sort() + for x in xs: + check(x, bins) + check(x, bins[::-1]) + + # 3-ary digitize() + for bins in all_bins: + for right in (True, False): + check(values, bins, right) + check(values, bins[::-1], right) + + # Sequence input + check(list(values), bins1) + + # per https://github.com/numba/numba/issues/8768 + check(np.array([np.nan, 1]), np.array([1.5, np.nan])) + + def test_digitize_non_monotonic_bins(self): + # Exceptions leak references + self.disable_leak_check() + + pyfunc = digitize + cfunc = jit(nopython=True)(pyfunc) + + def check_error(*args): + for fn in (pyfunc, cfunc): + with self.assertRaises(ValueError) as raises: + fn(*args) + + msg = 'bins must be monotonically increasing or decreasing' + self.assertIn(msg, str(raises.exception)) + + x = np.array([np.nan, 1]) + bins = np.array([np.nan, 1.5, 2.3, np.nan]) + check_error(x, bins) + + x = [-1, 0, 1, 2] + bins = [0, 0, 1, 0] + check_error(x, bins) + + bins = [1, 1, 0, 1] + check_error(x, bins) + + def test_digitize_supplemental(self): + # inspired by the tests in + # https://github.com/numpy/numpy/blob/a277f62/numpy/lib/tests/test_function_base.py + + pyfunc = digitize + cfunc = jit(nopython=True)(pyfunc) + + def check(*args): + expected = pyfunc(*args) + got = cfunc(*args) + self.assertPreciseEqual(expected, got) + + # forward + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + check(x, bins) + + # reverse + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + check(x, bins) + + # random + x = self.rnd.rand(10) + bins = np.linspace(x.min(), x.max(), 10) + check(x, bins) + + # right_basic + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + check(x, bins) + + # right_open + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + check(x, bins, True) + + # right_open_reverse + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + check(x, bins, True) + + # right_open_random + x = self.rnd.rand(10) + bins = np.linspace(x.min(), x.max(), 10) + check(x, bins, True) + + # monotonic + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + check(x, bins) + + bins = [1, 1, 0] + check(x, bins) + + bins = [1, 1, 1, 1] + check(x, bins) + + # large_integers_increasing + x = 2 ** 54 # loses precision in a float + check([x], [x - 1, x + 1]) + + def test_digitize_raise_if_x_complex(self): + # Exceptions leak references + self.disable_leak_check() + + pyfunc = digitize + cfunc = jit(nopython=True)(pyfunc) + + x = np.array([1 + 1j]) + y = np.array([1., 3., 4.5, 8.]) + msg = 'x may not be complex' + + for func in pyfunc, cfunc: + with self.assertTypingError() as raises: + func(x, y) + self.assertIn(msg, str(raises.exception)) + + def test_histogram(self): + pyfunc = histogram + cfunc = jit(nopython=True)(pyfunc) + + def check(*args): + pyhist, pybins = pyfunc(*args) + chist, cbins = cfunc(*args) + self.assertPreciseEqual(pyhist, chist) + # There can be a slight discrepancy in the linspace() result + # when `bins` is an integer... + self.assertPreciseEqual(pybins, cbins, prec='double', ulps=2) + + def check_values(values): + # Explicit bins array + # (note Numpy seems to not support NaN bins) + bins = np.float64([1, 3, 4.5, 8]) + check(values, bins) + check(values.reshape((3, 4)), bins) + + # Explicit number of bins + check(values, 7) + + # Explicit number of bins and bins range + check(values, 7, (1.0, 13.5)) + + # Implicit bins=10 + check(values) + + values = np.float64((0, 0.99, 1, 4.4, 4.5, 7, 8, + 9, 9.5, 42.5, -1.0, -0.0)) + assert len(values) == 12 + self.rnd.shuffle(values) + + check_values(values) + + def _test_correlate_convolve(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + # only 1d arrays are accepted, test varying lengths + # and varying dtype + lengths = (1, 2, 3, 7) + dts = [np.int8, np.int32, np.int64, np.float32, np.float64, + np.complex64, np.complex128] + modes = ["full", "valid", "same"] + + for dt1, dt2, n, m, mode in itertools.product( + dts, dts, lengths, lengths, modes + ): + a = np.arange(n, dtype=dt1) + v = np.arange(m, dtype=dt2) + + if np.issubdtype(dt1, np.complexfloating): + a = (a + 1j * a).astype(dt1) + if np.issubdtype(dt2, np.complexfloating): + v = (v + 1j * v).astype(dt2) + + expected = pyfunc(a, v, mode=mode) + got = cfunc(a, v, mode=mode) + + self.assertPreciseEqual(expected, got) + + _a = np.arange(12).reshape(4, 3) + _b = np.arange(12) + for x, y in [(_a, _b), (_b, _a)]: + with self.assertRaises(TypingError) as raises: + cfunc(x, y) + msg = 'only supported on 1D arrays' + self.assertIn(msg, str(raises.exception)) + + def test_correlate(self): + self._test_correlate_convolve(correlate) + + def _test_correlate_convolve_exceptions(self, fn): + # Exceptions leak references + self.disable_leak_check() + + # convolve raises if either array has a 0 dimension + _a = np.ones(shape=(0,)) + _b = np.arange(5) + cfunc = jit(nopython=True)(fn) + for x, y in [(_a, _b), (_b, _a)]: + with self.assertRaises(ValueError) as raises: + cfunc(x, y) + if len(x) == 0: + self.assertIn("'a' cannot be empty", str(raises.exception)) + else: + self.assertIn("'v' cannot be empty", str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(_b, _b, mode="invalid mode") + + self.assertIn("Invalid 'mode'", str(raises.exception)) + + def test_correlate_exceptions(self): + # correlate supported 0 dimension arrays until 1.18 + self._test_correlate_convolve_exceptions(correlate) + + def test_convolve(self): + self._test_correlate_convolve(convolve) + + def test_convolve_exceptions(self): + self._test_correlate_convolve_exceptions(convolve) + + def _check_output(self, pyfunc, cfunc, params, abs_tol=None): + expected = pyfunc(**params) + got = cfunc(**params) + self.assertPreciseEqual(expected, got, abs_tol=abs_tol) + + def test_vander_basic(self): + pyfunc = vander + cfunc = jit(nopython=True)(pyfunc) + _check_output = partial(self._check_output, pyfunc, cfunc) + + def _check(x): + n_choices = [None, 0, 1, 2, 3, 4] + increasing_choices = [True, False] + + # N and increasing defaulted + params = {'x': x} + _check_output(params) + + # N provided and increasing defaulted + for n in n_choices: + params = {'x': x, 'N': n} + _check_output(params) + + # increasing provided and N defaulted: + for increasing in increasing_choices: + params = {'x': x, 'increasing': increasing} + _check_output(params) + + # both n and increasing supplied + for n in n_choices: + for increasing in increasing_choices: + params = {'x': x, 'N': n, 'increasing': increasing} + _check_output(params) + + _check(np.array([1, 2, 3, 5])) + _check(np.arange(7) - 10.5) + _check(np.linspace(3, 10, 5)) + _check(np.array([1.2, np.nan, np.inf, -np.inf])) + _check(np.array([])) + _check(np.arange(-5, 5) - 0.3) + + # # boolean array + _check(np.array([True] * 5 + [False] * 4)) + + # cycle through dtypes to check type promotion a la numpy + for dtype in np.int32, np.int64, np.float32, np.float64: + _check(np.arange(10, dtype=dtype)) + + # non array inputs + _check([0, 1, 2, 3]) + _check((4, 5, 6, 7)) + _check((0.0, 1.0, 2.0)) + _check(()) + + # edge cases + _check((3, 4.444, 3.142)) + _check((True, False, 4)) + + def test_vander_exceptions(self): + pyfunc = vander + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + x = np.arange(5) - 0.5 + + def _check_n(N): + with self.assertTypingError() as raises: + cfunc(x, N=N) + self.assertIn("Second argument N must be None or an integer", + str(raises.exception)) + + for N in 1.1, True, np.inf, [1, 2]: + _check_n(N) + + with self.assertRaises(ValueError) as raises: + cfunc(x, N=-1) + self.assertIn("Negative dimensions are not allowed", + str(raises.exception)) + + def _check_1d(x): + with self.assertRaises(ValueError) as raises: + cfunc(x) + self.assertEqual("x must be a one-dimensional array or sequence.", + str(raises.exception)) + + x = np.arange(27).reshape((3, 3, 3)) + _check_1d(x) + + x = ((2, 3), (4, 5)) + _check_1d(x) + + def test_tri_n_basic(self): + pyfunc = tri_n + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + def n_variations(): + return np.arange(-4, 8) # number of rows + + # N supplied, M and k defaulted + for n in n_variations(): + params = {'N': n} + _check(params) + + def test_tri_n_m_basic(self): + pyfunc = tri_n_m + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + def n_variations(): + return np.arange(-4, 8) # number of rows + + def m_variations(): + # number of columns + return itertools.chain.from_iterable(([None], range(-5, 9))) + + # N supplied, M and k defaulted + for n in n_variations(): + params = {'N': n} + _check(params) + + # N and M supplied, k defaulted + for n in n_variations(): + for m in m_variations(): + params = {'N': n, 'M': m} + _check(params) + + def test_tri_n_k_basic(self): + pyfunc = tri_n_k + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + def n_variations(): + return np.arange(-4, 8) # number of rows + + def k_variations(): + return np.arange(-10, 10) # offset + + # N supplied, M and k defaulted + for n in n_variations(): + params = {'N': n} + _check(params) + + # N and k supplied, M defaulted + for n in n_variations(): + for k in k_variations(): + params = {'N': n, 'k': k} + _check(params) + + def test_tri_n_m_k_basic(self): + pyfunc = tri_n_m_k + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + def n_variations(): + return np.arange(-4, 8) # number of rows + + def m_variations(): + # number of columns + return itertools.chain.from_iterable(([None], range(-5, 9))) + + def k_variations(): + return np.arange(-10, 10) # offset + + # N supplied, M and k defaulted + for n in n_variations(): + params = {'N': n} + _check(params) + + # N and M supplied, k defaulted + for n in n_variations(): + for m in m_variations(): + params = {'N': n, 'M': m} + _check(params) + + # N and k supplied, M defaulted + for n in n_variations(): + for k in k_variations(): + params = {'N': n, 'k': k} + _check(params) + + # N, M and k supplied + for n in n_variations(): + for k in k_variations(): + for m in m_variations(): + params = {'N': n, 'M': m, 'k': k} + _check(params) + + def test_tri_exceptions(self): + pyfunc = tri_n_m_k + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(k): + with self.assertTypingError() as raises: + cfunc(5, 6, k=k) + assert "k must be an integer" in str(raises.exception) + + for k in 1.5, True, np.inf, [1, 2]: + _check(k) + + def _triangular_matrix_tests_m(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + def _check(arr): + expected = pyfunc(arr) + got = cfunc(arr) + # TODO: Contiguity of result not consistent with numpy + self.assertEqual(got.dtype, expected.dtype) + np.testing.assert_array_equal(got, expected) + + return self._triangular_matrix_tests_inner(self, pyfunc, _check) + + def _triangular_matrix_tests_m_k(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + def _check(arr): + for k in itertools.chain.from_iterable(([None], range(-10, 10))): + if k is None: + params = {} + else: + params = {'k': k} + expected = pyfunc(arr, **params) + got = cfunc(arr, **params) + # TODO: Contiguity of result not consistent with numpy + self.assertEqual(got.dtype, expected.dtype) + np.testing.assert_array_equal(got, expected) + + return self._triangular_matrix_tests_inner(self, pyfunc, _check) + + @staticmethod + def _triangular_matrix_tests_inner(self, pyfunc, _check): + + def check_odd(a): + _check(a) + a = a.reshape((9, 7)) + _check(a) + a = a.reshape((7, 1, 3, 3)) + _check(a) + _check(a.T) + + def check_even(a): + _check(a) + a = a.reshape((4, 16)) + _check(a) + a = a.reshape((4, 2, 2, 4)) + _check(a) + _check(a.T) + + check_odd(np.arange(63) + 10.5) + check_even(np.arange(64) - 10.5) + + # edge cases + _check(np.arange(360).reshape(3, 4, 5, 6)) + _check(np.array([])) + _check(np.arange(9).reshape((3, 3))[::-1]) + _check(np.arange(9).reshape((3, 3), order='F')) + + arr = (np.arange(64) - 10.5).reshape((4, 2, 2, 4)) + _check(arr) + _check(np.asfortranarray(arr)) + + def _triangular_matrix_exceptions(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + a = np.ones((5, 6)) + with self.assertTypingError() as raises: + cfunc(a, k=1.5) + self.assertIn("k must be an integer", str(raises.exception)) + + def _triangular_indices_tests_base(self, pyfunc, args): + cfunc = jit(nopython=True)(pyfunc) + + for x in args: + expected = pyfunc(*x) + got = cfunc(*x) + self.assertEqual(type(expected), type(got)) + self.assertEqual(len(expected), len(got)) + for e, g in zip(expected, got): + np.testing.assert_array_equal(e, g) + + def _triangular_indices_tests_n(self, pyfunc): + self._triangular_indices_tests_base( + pyfunc, + [[n] for n in range(10)] + ) + + def _triangular_indices_tests_n_k(self, pyfunc): + self._triangular_indices_tests_base( + pyfunc, + [[n, k] for n in range(10) for k in range(-n - 1, n + 2)] + ) + + def _triangular_indices_tests_n_m(self, pyfunc): + self._triangular_indices_tests_base( + pyfunc, + [[n, m] for n in range(10) for m in range(2 * n)] + ) + + def _triangular_indices_tests_n_k_m(self, pyfunc): + self._triangular_indices_tests_base( + pyfunc, + [[n, k, m] for n in range(10) + for k in range(-n - 1, n + 2) + for m in range(2 * n)] + ) + + # Check jitted version works with default values for kwargs + cfunc = jit(nopython=True)(pyfunc) + cfunc(1) + + def _triangular_indices_from_tests_arr(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + for dtype in [int, float, bool]: + for n,m in itertools.product(range(10), range(10)): + arr = np.ones((n, m), dtype) + expected = pyfunc(arr) + got = cfunc(arr) + self.assertEqual(type(expected), type(got)) + self.assertEqual(len(expected), len(got)) + for e, g in zip(expected, got): + np.testing.assert_array_equal(e, g) + + def _triangular_indices_from_tests_arr_k(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + + for dtype in [int, float, bool]: + for n,m in itertools.product(range(10), range(10)): + arr = np.ones((n, m), dtype) + for k in range(-10, 10): + expected = pyfunc(arr) + got = cfunc(arr) + self.assertEqual(type(expected), type(got)) + self.assertEqual(len(expected), len(got)) + for e, g in zip(expected, got): + np.testing.assert_array_equal(e, g) + + def _triangular_indices_exceptions(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + parameters = pysignature(pyfunc).parameters + + with self.assertTypingError() as raises: + cfunc(1.0) + self.assertIn("n must be an integer", str(raises.exception)) + + if 'k' in parameters: + with self.assertTypingError() as raises: + cfunc(1, k=1.0) + self.assertIn("k must be an integer", str(raises.exception)) + + if 'm' in parameters: + with self.assertTypingError() as raises: + cfunc(1, m=1.0) + self.assertIn("m must be an integer", str(raises.exception)) + + def _triangular_indices_from_exceptions(self, pyfunc, test_k=True): + cfunc = jit(nopython=True)(pyfunc) + + for ndims in [0, 1, 3]: + a = np.ones([5] * ndims) + with self.assertTypingError() as raises: + cfunc(a) + self.assertIn("input array must be 2-d", str(raises.exception)) + + if test_k: + a = np.ones([5, 5]) + with self.assertTypingError() as raises: + cfunc(a, k=0.5) + self.assertIn("k must be an integer", str(raises.exception)) + + def test_tril_basic(self): + self._triangular_matrix_tests_m(tril_m) + self._triangular_matrix_tests_m_k(tril_m_k) + + def test_tril_exceptions(self): + self._triangular_matrix_exceptions(tril_m_k) + + def test_tril_indices(self): + self._triangular_indices_tests_n(tril_indices_n) + self._triangular_indices_tests_n_k(tril_indices_n_k) + self._triangular_indices_tests_n_m(tril_indices_n_m) + self._triangular_indices_tests_n_k_m(tril_indices_n_k_m) + self._triangular_indices_exceptions(tril_indices_n) + self._triangular_indices_exceptions(tril_indices_n_k) + self._triangular_indices_exceptions(tril_indices_n_m) + self._triangular_indices_exceptions(tril_indices_n_k_m) + + def test_tril_indices_from(self): + self._triangular_indices_from_tests_arr(tril_indices_from_arr) + self._triangular_indices_from_tests_arr_k(tril_indices_from_arr_k) + self._triangular_indices_from_exceptions(tril_indices_from_arr, False) + self._triangular_indices_from_exceptions(tril_indices_from_arr_k, True) + + def test_triu_basic(self): + self._triangular_matrix_tests_m(triu_m) + self._triangular_matrix_tests_m_k(triu_m_k) + + def test_triu_exceptions(self): + self._triangular_matrix_exceptions(triu_m_k) + + def test_triu_indices(self): + self._triangular_indices_tests_n(triu_indices_n) + self._triangular_indices_tests_n_k(triu_indices_n_k) + self._triangular_indices_tests_n_m(triu_indices_n_m) + self._triangular_indices_tests_n_k_m(triu_indices_n_k_m) + self._triangular_indices_exceptions(triu_indices_n) + self._triangular_indices_exceptions(triu_indices_n_k) + self._triangular_indices_exceptions(triu_indices_n_m) + self._triangular_indices_exceptions(triu_indices_n_k_m) + + def test_triu_indices_from(self): + self._triangular_indices_from_tests_arr(triu_indices_from_arr) + self._triangular_indices_from_tests_arr_k(triu_indices_from_arr_k) + self._triangular_indices_from_exceptions(triu_indices_from_arr, False) + self._triangular_indices_from_exceptions(triu_indices_from_arr_k, True) + + def test_indices_basic(self): + pyfunc = np_indices + cfunc = njit(np_indices) + + def inputs(): + # Taken from https://github.com/numpy/numpy/blob/db4f43983cb938f12c311e1f5b7165e270c393b4/numpy/core/tests/test_numeric.py#L3383-L3407 # noqa: E501 + yield (4, 3) + yield (4,) + yield (0,) + yield (2, 2, 3, 5) + + for dims in inputs(): + self.assertPreciseEqual(pyfunc(dims), cfunc(dims)) + + def test_indices_exception(self): + cfunc = njit(np_indices) + + self.disable_leak_check() + + errmsg = 'The argument "dimensions" must be a tuple of integers' + with self.assertRaises(TypingError) as raises: + cfunc("abc") + self.assertIn(errmsg, + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc((2.0, 3.0)) + self.assertIn(errmsg, + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc((2, 3.0)) + self.assertIn(errmsg, + str(raises.exception)) + + def partition_sanity_check(self, pyfunc, cfunc, a, kth): + # as NumPy uses a different algorithm, we do not expect to + # match outputs exactly... + expected = pyfunc(a, kth) + got = cfunc(a, kth) + + # but we do expect the unordered collection of elements up to the + # kth to tie out + self.assertPreciseEqual(np.unique(expected[:kth]), np.unique(got[:kth])) + + # likewise the unordered collection of elements from the kth onwards + self.assertPreciseEqual(np.unique(expected[kth:]), np.unique(got[kth:])) + + def argpartition_sanity_check(self, pyfunc, cfunc, a, kth): + # as NumPy uses a different algorithm, we do not expect to + # match outputs exactly... + expected = pyfunc(a, kth) + got = cfunc(a, kth) + + # but we do expect the unordered collection of elements up to the + # kth to tie out + self.assertPreciseEqual(np.unique(a[expected[:kth]]), + np.unique(a[got[:kth]])) + + # likewise the unordered collection of elements from the kth onwards + self.assertPreciseEqual(np.unique(a[expected[kth:]]), + np.unique(a[got[kth:]])) + + def test_partition_fuzz(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + for j in range(10, 30): + for i in range(1, j - 2): + d = np.arange(j) + self.rnd.shuffle(d) + d = d % self.rnd.randint(2, 30) + idx = self.rnd.randint(d.size) + kth = [0, idx, i, i + 1, -idx, -i] # include negative kth's + tgt = np.sort(d)[kth] + self.assertPreciseEqual(cfunc(d, kth)[kth], + tgt) # a -> array + self.assertPreciseEqual(cfunc(d.tolist(), kth)[kth], + tgt) # a -> list + self.assertPreciseEqual(cfunc(tuple(d.tolist()), kth)[kth], + tgt) # a -> tuple + + for k in kth: + self.partition_sanity_check(pyfunc, cfunc, d, k) + + def test_argpartition_fuzz(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + for j in range(10, 30): + for i in range(1, j - 2): + d = np.arange(j) + self.rnd.shuffle(d) + d = d % self.rnd.randint(2, 30) + idx = self.rnd.randint(d.size) + kth = [0, idx, i, i + 1, -idx, -i] # include negative kth's + tgt = np.argsort(d)[kth] + self.assertPreciseEqual(d[cfunc(d, kth)[kth]], + d[tgt]) # a -> array + self.assertPreciseEqual(d[cfunc(d.tolist(), kth)[kth]], + d[tgt]) # a -> list + self.assertPreciseEqual(d[cfunc(tuple(d.tolist()), kth)[kth]], + d[tgt]) # a -> tuple + + for k in kth: + self.argpartition_sanity_check(pyfunc, cfunc, d, k) + + def test_partition_exception_out_of_range(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + # Test out of range values in kth raise an error + a = np.arange(10) + + def _check(a, kth): + with self.assertRaises(ValueError) as e: + cfunc(a, kth) + assert str(e.exception) == "kth out of bounds" + + _check(a, 10) + _check(a, -11) + _check(a, (3, 30)) + + def test_argpartition_exception_out_of_range(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + # Test out of range values in kth raise an error + a = np.arange(10) + + def _check(a, kth): + with self.assertRaises(ValueError) as e: + cfunc(a, kth) + assert str(e.exception) == "kth out of bounds" + + _check(a, 10) + _check(a, -11) + _check(a, (3, 30)) + + def test_partition_exception_non_integer_kth(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertTypingError() as raises: + cfunc(a, kth) + self.assertIn("Partition index must be integer", + str(raises.exception)) + + a = np.arange(10) + _check(a, 9.0) + _check(a, (3.3, 4.4)) + _check(a, np.array((1, 2, np.nan))) + + def test_argpartition_exception_non_integer_kth(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertTypingError() as raises: + cfunc(a, kth) + self.assertIn("Partition index must be integer", + str(raises.exception)) + + a = np.arange(10) + _check(a, 9.0) + _check(a, (3.3, 4.4)) + _check(a, np.array((1, 2, np.nan))) + + def test_partition_exception_a_not_array_like(self): + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertTypingError() as raises: + cfunc(a, kth) + self.assertIn('The first argument must be an array-like', + str(raises.exception)) + + _check(4, 0) + _check('Sausages', 0) + + def test_argpartition_exception_a_not_array_like(self): + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertTypingError() as raises: + cfunc(a, kth) + self.assertIn('The first argument must be an array-like', + str(raises.exception)) + + _check(4, 0) + _check('Sausages', 0) + + def test_partition_exception_a_zero_dim(self): + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertTypingError() as raises: + cfunc(a, kth) + self.assertIn('The first argument must be at least 1-D (found 0-D)', + str(raises.exception)) + + _check(np.array(1), 0) + + def test_argpartition_exception_a_zero_dim(self): + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertTypingError() as raises: + cfunc(a, kth) + self.assertIn('The first argument must be at least 1-D (found 0-D)', + str(raises.exception)) + + _check(np.array(1), 0) + + def test_partition_exception_kth_multi_dimensional(self): + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertRaises(ValueError) as raises: + cfunc(a, kth) + self.assertIn('kth must be scalar or 1-D', str(raises.exception)) + + _check(np.arange(10), kth=np.arange(6).reshape(3, 2)) + + def test_argpartition_exception_kth_multi_dimensional(self): + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check(a, kth): + with self.assertRaises(ValueError) as raises: + cfunc(a, kth) + self.assertIn('kth must be scalar or 1-D', str(raises.exception)) + + _check(np.arange(10), kth=np.arange(6).reshape(3, 2)) + + def test_partition_empty_array(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + def check(a, kth=0): + expected = pyfunc(a, kth) + got = cfunc(a, kth) + self.assertPreciseEqual(expected, got) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + + # include this with some other empty data structures + for arr in a, (), np.array([]): + check(arr) + + def test_argpartition_empty_array(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + def check(a, kth=0): + expected = pyfunc(a, kth) + got = cfunc(a, kth) + self.assertPreciseEqual(expected, got) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + + # include this with some other empty data structures + for arr in a, (), np.array([]): + check(arr) + + def test_partition_basic(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + d = np.array([]) + got = cfunc(d, 0) + self.assertPreciseEqual(d, got) + + d = np.ones(1) + got = cfunc(d, 0) + self.assertPreciseEqual(d, got) + + # kth not modified + kth = np.array([30, 15, 5]) + okth = kth.copy() + cfunc(np.arange(40), kth) + self.assertPreciseEqual(kth, okth) + + for r in ([2, 1], [1, 2], [1, 1]): + d = np.array(r) + tgt = np.sort(d) + for k in 0, 1: + self.assertPreciseEqual(cfunc(d, k)[k], tgt[k]) + self.partition_sanity_check(pyfunc, cfunc, d, k) + + for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], + [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): + d = np.array(r) + tgt = np.sort(d) + for k in 0, 1, 2: + self.assertPreciseEqual(cfunc(d, k)[k], tgt[k]) + self.partition_sanity_check(pyfunc, cfunc, d, k) + + d = np.ones(50) + self.assertPreciseEqual(cfunc(d, 0), d) + + # sorted + d = np.arange(49) + for k in 5, 15: + self.assertEqual(cfunc(d, k)[k], k) + self.partition_sanity_check(pyfunc, cfunc, d, k) + + # rsorted, with input flavours: array, list and tuple + d = np.arange(47)[::-1] + for a in d, d.tolist(), tuple(d.tolist()): + self.assertEqual(cfunc(a, 6)[6], 6) + self.assertEqual(cfunc(a, 16)[16], 16) + self.assertPreciseEqual(cfunc(a, -6), cfunc(a, 41)) + self.assertPreciseEqual(cfunc(a, -16), cfunc(a, 31)) + self.partition_sanity_check(pyfunc, cfunc, d, -16) + + # median of 3 killer, O(n^2) on pure median 3 pivot quickselect + # exercises the median of median of 5 code used to keep O(n) + d = np.arange(1000000) + x = np.roll(d, d.size // 2) + mid = x.size // 2 + 1 + self.assertEqual(cfunc(x, mid)[mid], mid) + d = np.arange(1000001) + x = np.roll(d, d.size // 2 + 1) + mid = x.size // 2 + 1 + self.assertEqual(cfunc(x, mid)[mid], mid) + + # max + d = np.ones(10) + d[1] = 4 + self.assertEqual(cfunc(d, (2, -1))[-1], 4) + self.assertEqual(cfunc(d, (2, -1))[2], 1) + d[1] = np.nan + assert np.isnan(cfunc(d, (2, -1))[-1]) + + # equal elements + d = np.arange(47) % 7 + tgt = np.sort(np.arange(47) % 7) + self.rnd.shuffle(d) + for i in range(d.size): + self.assertEqual(cfunc(d, i)[i], tgt[i]) + self.partition_sanity_check(pyfunc, cfunc, d, i) + + d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 9]) + kth = [0, 3, 19, 20] + self.assertEqual(tuple(cfunc(d, kth)[kth]), (0, 3, 7, 7)) + + td = [(dt, s) for dt in [np.int32, np.float32] for s in (9, 16)] + for dt, s in td: + d = np.arange(s, dtype=dt) + self.rnd.shuffle(d) + d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) + map(self.rnd.shuffle, d1) + for i in range(d.size): + p = cfunc(d, i) + self.assertEqual(p[i], i) + # all before are smaller + np.testing.assert_array_less(p[:i], p[i]) + # all after are larger + np.testing.assert_array_less(p[i], p[i + 1:]) + # sanity check + self.partition_sanity_check(pyfunc, cfunc, d, i) + + def test_argpartition_basic(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py # noqa: E501 + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + d = np.array([], dtype=np.int64) + expected = pyfunc(d, 0) + got = cfunc(d, 0) + self.assertPreciseEqual(expected, got) + + d = np.ones(1, dtype=np.int64) + expected = pyfunc(d, 0) + got = cfunc(d, 0) + self.assertPreciseEqual(expected, got) + + # kth not modified + kth = np.array([30, 15, 5]) + okth = kth.copy() + cfunc(np.arange(40), kth) + self.assertPreciseEqual(kth, okth) + + for r in ([2, 1], [1, 2], [1, 1]): + d = np.array(r) + tgt = np.argsort(d) + for k in 0, 1: + self.assertPreciseEqual(d[cfunc(d, k)[k]], d[tgt[k]]) + self.argpartition_sanity_check(pyfunc, cfunc, d, k) + + for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], + [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): + d = np.array(r) + tgt = np.argsort(d) + for k in 0, 1, 2: + self.assertPreciseEqual(d[cfunc(d, k)[k]], d[tgt[k]]) + self.argpartition_sanity_check(pyfunc, cfunc, d, k) + + d = np.ones(50) + self.assertPreciseEqual(d[cfunc(d, 0)], d) + + # sorted + d = np.arange(49) + for k in 5, 15: + self.assertEqual(cfunc(d, k)[k], k) + self.partition_sanity_check(pyfunc, cfunc, d, k) + + # rsorted, with input flavours: array, list and tuple + d = np.arange(47)[::-1] + for a in d, d.tolist(), tuple(d.tolist()): + self.assertEqual(cfunc(a, 6)[6], 40) + self.assertEqual(cfunc(a, 16)[16], 30) + self.assertPreciseEqual(cfunc(a, -6), cfunc(a, 41)) + self.assertPreciseEqual(cfunc(a, -16), cfunc(a, 31)) + self.argpartition_sanity_check(pyfunc, cfunc, d, -16) + + # median of 3 killer, O(n^2) on pure median 3 pivot quickselect + # exercises the median of median of 5 code used to keep O(n) + d = np.arange(1000000) + x = np.roll(d, d.size // 2) + mid = x.size // 2 + 1 + self.assertEqual(x[cfunc(x, mid)[mid]], mid) + d = np.arange(1000001) + x = np.roll(d, d.size // 2 + 1) + mid = x.size // 2 + 1 + self.assertEqual(x[cfunc(x, mid)[mid]], mid) + + # max + d = np.ones(10) + d[1] = 4 + self.assertEqual(d[cfunc(d, (2, -1))[-1]], 4) + self.assertEqual(d[cfunc(d, (2, -1))[2]], 1) + d[1] = np.nan + assert np.isnan(d[cfunc(d, (2, -1))[-1]]) + + # equal elements + d = np.arange(47) % 7 + tgt = np.sort(np.arange(47) % 7) + self.rnd.shuffle(d) + for i in range(d.size): + self.assertEqual(d[cfunc(d, i)[i]], tgt[i]) + self.argpartition_sanity_check(pyfunc, cfunc, d, i) + + d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 9]) + kth = [0, 3, 19, 20] + self.assertEqual(tuple(d[cfunc(d, kth)[kth]]), (0, 3, 7, 7)) + + td = [(dt, s) for dt in [np.int32, np.float32] for s in (9, 16)] + for dt, s in td: + d = np.arange(s, dtype=dt) + self.rnd.shuffle(d) + d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) + map(self.rnd.shuffle, d1) + for i in range(d.size): + p = d[cfunc(d, i)] + self.assertEqual(p[i], i) + # all before are smaller + np.testing.assert_array_less(p[:i], p[i]) + # all after are larger + np.testing.assert_array_less(p[i], p[i + 1:]) + # sanity check + self.argpartition_sanity_check(pyfunc, cfunc, d, i) + + def assert_partitioned(self, pyfunc, cfunc, d, kth): + prev = 0 + for k in np.sort(kth): + np.testing.assert_array_less(d[prev:k], d[k], + err_msg='kth %d' % k) + self.assertTrue((d[k:] >= d[k]).all(), + msg=("kth %d, %r not greater equal " + "%d" % (k, d[k:], d[k]))) + prev = k + 1 + self.partition_sanity_check(pyfunc, cfunc, d, k) + + def assert_argpartitioned(self, pyfunc, cfunc, d, kth): + prev = 0 + for k in np.sort(kth): + np.testing.assert_array_less(d[prev:k], d[k], + err_msg='kth %d' % k) + self.assertTrue((d[k:] >= d[k]).all(), + msg=("kth %d, %r not greater equal " + "%d" % (k, d[k:], d[k]))) + prev = k + 1 + self.argpartition_sanity_check(pyfunc, cfunc, d, k) + + def test_partition_iterative(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + assert_partitioned = partial(self.assert_partitioned, pyfunc, cfunc) + + d = np.array([3, 4, 2, 1]) + p = cfunc(d, (0, 3)) + assert_partitioned(p, (0, 3)) + assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + self.assertPreciseEqual(p, cfunc(d, (-3, -1))) + + d = np.arange(17) + self.rnd.shuffle(d) + self.assertPreciseEqual(np.arange(17), cfunc(d, list(range(d.size)))) + + # test unsorted kth + d = np.arange(17) + self.rnd.shuffle(d) + keys = np.array([1, 3, 8, -2]) + self.rnd.shuffle(d) + p = cfunc(d, keys) + assert_partitioned(p, keys) + self.rnd.shuffle(keys) + self.assertPreciseEqual(cfunc(d, keys), p) + + # equal kth + d = np.arange(20)[::-1] + assert_partitioned(cfunc(d, [5] * 4), [5]) + assert_partitioned(cfunc(d, [5] * 4 + [6, 13]), [5] * 4 + [6, 13]) + + def test_argpartition_iterative(self): + # inspired by the test of the same name in: + # https://github.com/numpy/numpy/blob/043a840/numpy/core/tests/test_multiarray.py + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + assert_argpartitioned = partial(self.assert_argpartitioned, + pyfunc, + cfunc) + + d = np.array([3, 4, 2, 1]) + p = d[cfunc(d, (0, 3))] + assert_argpartitioned(p, (0, 3)) + assert_argpartitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + self.assertPreciseEqual(p, d[cfunc(d, (-3, -1))]) + + d = np.arange(17) + self.rnd.shuffle(d) + self.assertPreciseEqual(np.arange(17), d[cfunc(d, list(range(d.size)))]) + + # test unsorted kth + d = np.arange(17) + self.rnd.shuffle(d) + keys = np.array([1, 3, 8, -2]) + self.rnd.shuffle(d) + p = d[cfunc(d, keys)] + assert_argpartitioned(p, keys) + self.rnd.shuffle(keys) + self.assertPreciseEqual(d[cfunc(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + assert_argpartitioned(d[cfunc(d, [5] * 4)], [5]) + assert_argpartitioned(d[cfunc(d, [5] * 4 + [6, 13])], [5] * 4 + [6, 13]) + + def test_partition_multi_dim(self): + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + def check(a, kth): + expected = pyfunc(a, kth) + got = cfunc(a, kth) + self.assertPreciseEqual(expected[:, :, kth], got[:, :, kth]) + + for s in np.ndindex(expected.shape[:-1]): + self.assertPreciseEqual(np.unique(expected[s][:kth]), + np.unique(got[s][:kth])) + self.assertPreciseEqual(np.unique(expected[s][kth:]), + np.unique(got[s][kth:])) + + def a_variations(a): + yield a + yield a.T + yield np.asfortranarray(a) + yield np.full_like(a, fill_value=np.nan) + yield np.full_like(a, fill_value=np.inf) + # multi-dimensional tuple input + yield (((1.0, 3.142, -np.inf, 3),),) + + a = np.linspace(1, 10, 48) + a[4:7] = np.nan + a[8] = -np.inf + a[9] = np.inf + a = a.reshape((4, 3, 4)) + + for arr in a_variations(a): + for k in range(-3, 3): + check(arr, k) + + def test_argpartition_multi_dim(self): + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + def check(a, kth): + expected = pyfunc(a, kth) + got = cfunc(a, kth) + a = np.asarray(a) + idx = np.ndindex(a.shape[:-1]) + for s in idx: + self.assertPreciseEqual(a[s][expected[s][kth]], + a[s][got[s][kth]]) + + for s in np.ndindex(expected.shape[:-1]): + self.assertPreciseEqual(np.unique(a[s][expected[s][:kth]]), + np.unique(a[s][got[s][:kth]])) + self.assertPreciseEqual(np.unique(a[s][expected[s][kth:]]), + np.unique(a[s][got[s][kth:]])) + + def a_variations(a): + yield a + yield a.T + yield np.asfortranarray(a) + yield np.full_like(a, fill_value=np.nan) + yield np.full_like(a, fill_value=np.inf) + # multi-dimensional tuple input + yield (((1.0, 3.142, -np.inf, 3),),) + + a = np.linspace(1, 10, 48) + a[4:7] = np.nan + a[8] = -np.inf + a[9] = np.inf + a = a.reshape((4, 3, 4)) + + for arr in a_variations(a): + for k in range(-3, 3): + check(arr, k) + + def test_partition_boolean_inputs(self): + pyfunc = partition + cfunc = jit(nopython=True)(pyfunc) + + for d in np.linspace(1, 10, 17), np.array((True, False, True)): + for kth in True, False, -1, 0, 1: + self.partition_sanity_check(pyfunc, cfunc, d, kth) + + def test_argpartition_boolean_inputs(self): + pyfunc = argpartition + cfunc = jit(nopython=True)(pyfunc) + + for d in np.linspace(1, 10, 17), np.array((True, False, True)): + for kth in True, False, -1, 0, 1: + self.argpartition_sanity_check(pyfunc, cfunc, d, kth) + + @needs_blas + def test_cov_invalid_ddof(self): + pyfunc = cov + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + m = np.array([[0, 2], [1, 1], [2, 0]]).T + + for ddof in np.arange(4), 4j: + with self.assertTypingError() as raises: + cfunc(m, ddof=ddof) + self.assertIn('ddof must be a real numerical scalar type', + str(raises.exception)) + + for ddof in np.nan, np.inf: + with self.assertRaises(ValueError) as raises: + cfunc(m, ddof=ddof) + self.assertIn('Cannot convert non-finite ddof to integer', + str(raises.exception)) + + for ddof in 1.1, -0.7: + with self.assertRaises(ValueError) as raises: + cfunc(m, ddof=ddof) + self.assertIn('ddof must be integral value', str(raises.exception)) + + def corr_corrcoef_basic(self, pyfunc, first_arg_name): + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + def input_variations(): + # array inputs + yield np.array([[0, 2], [1, 1], [2, 0]]).T + yield self.rnd.randn(100).reshape(5, 20) + yield np.asfortranarray(np.array([[0, 2], [1, 1], [2, 0]]).T) + yield self.rnd.randn(100).reshape(5, 20)[:, ::2] + yield np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + yield np.full((4, 5), fill_value=True) + yield np.array([np.nan, 0.5969, -np.inf, 0.9918, 0.7964]) + yield np.linspace(-3, 3, 33).reshape(33, 1) + + # non-array inputs + yield ((0.1, 0.2), (0.11, 0.19), (0.09, 0.21)) # UniTuple + yield ((0.1, 0.2), (0.11, 0.19), (0.09j, 0.21j)) # Tuple + yield (-2.1, -1, 4.3) + yield (1, 2, 3) + yield [4, 5, 6] + yield ((0.1, 0.2, 0.3), (0.1, 0.2, 0.3)) + yield [(1, 2, 3), (1, 3, 2)] + yield 3.142 + yield ((1.1, 2.2, 1.5),) + + # empty data structures + yield np.array([]) + yield np.array([]).reshape(0, 2) + yield np.array([]).reshape(2, 0) + yield () + + # all inputs other than the first are defaulted + for input_arr in input_variations(): + _check({first_arg_name: input_arr}) + + @needs_blas + def test_corrcoef_basic(self): + pyfunc = corrcoef + self.corr_corrcoef_basic(pyfunc, first_arg_name='x') + + @needs_blas + def test_cov_basic(self): + pyfunc = cov + self.corr_corrcoef_basic(pyfunc, first_arg_name='m') + + @needs_blas + def test_cov_explicit_arguments(self): + pyfunc = cov + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + m = self.rnd.randn(105).reshape(15, 7) + y_choices = None, m[::-1] + rowvar_choices = False, True + bias_choices = False, True + ddof_choice = None, -1, 0, 1, 3.0, True + + products = itertools.product(y_choices, rowvar_choices, + bias_choices, ddof_choice) + for y, rowvar, bias, ddof in products: + params = {'m': m, 'y': y, 'ddof': ddof, + 'bias': bias, 'rowvar': rowvar} + _check(params) + + @needs_blas + def test_corrcoef_explicit_arguments(self): + pyfunc = corrcoef + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + x = self.rnd.randn(105).reshape(15, 7) + y_choices = None, x[::-1] + rowvar_choices = False, True + + for y, rowvar in itertools.product(y_choices, rowvar_choices): + params = {'x': x, 'y': y, 'rowvar': rowvar} + _check(params) + + def cov_corrcoef_edge_cases(self, pyfunc, first_arg_name): + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + # some of these examples borrowed from numpy doc string examples: + # https://github.com/numpy/numpy/blob/v1.15.0/numpy/lib/function_base.py#L2199-L2231 # noqa: E501 + # some borrowed from TestCov and TestCorrCoef: + # https://github.com/numpy/numpy/blob/80d3a7a/numpy/lib/tests/test_function_base.py # noqa: E501 + m = np.array([-2.1, -1, 4.3]) + y = np.array([3, 1.1, 0.12]) + params = {first_arg_name: m, 'y': y} + _check(params) + + m = np.array([1, 2, 3]) # test case modified such that m is 1D + y = np.array([[1j, 2j, 3j]]) + params = {first_arg_name: m, 'y': y} + _check(params) + + m = np.array([1, 2, 3]) + y = (1j, 2j, 3j) + params = {first_arg_name: m, 'y': y} + _check(params) + params = {first_arg_name: y, 'y': m} # flip real and complex inputs + _check(params) + + m = np.array([1, 2, 3]) + y = (1j, 2j, 3) # note last item is not complex + params = {first_arg_name: m, 'y': y} + _check(params) + params = {first_arg_name: y, 'y': m} # flip real and complex inputs + _check(params) + + m = np.array([]) + y = np.array([]) + params = {first_arg_name: m, 'y': y} + _check(params) + + m = 1.1 + y = 2.2 + params = {first_arg_name: m, 'y': y} + _check(params) + + m = self.rnd.randn(10, 3) + y = np.array([-2.1, -1, 4.3]).reshape(1, 3) / 10 + params = {first_arg_name: m, 'y': y} + _check(params) + + m = np.array([-2.1, -1, 4.3]) + y = np.array([[3, 1.1, 0.12], [3, 1.1, 0.12]]) + params = {first_arg_name: m, 'y': y} + _check(params) + + for rowvar in False, True: + m = np.array([-2.1, -1, 4.3]) + y = np.array([[3, 1.1, 0.12], [3, 1.1, 0.12], [4, 1.1, 0.12]]) + params = {first_arg_name: m, 'y': y, 'rowvar': rowvar} + _check(params) + # swap m and y + params = {first_arg_name: y, 'y': m, 'rowvar': rowvar} + _check(params) + + @needs_blas + def test_corrcoef_edge_cases(self): + pyfunc = corrcoef + self.cov_corrcoef_edge_cases(pyfunc, first_arg_name='x') + + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + for x in (np.nan, -np.inf, 3.142, 0): + params = {'x': x} + _check(params) + + @needs_blas + def test_corrcoef_edge_case_extreme_values(self): + pyfunc = corrcoef + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + # extreme values + x = ((1e-100, 1e100), (1e100, 1e-100)) + params = {'x': x} + _check(params) + + @needs_blas + def test_cov_edge_cases(self): + pyfunc = cov + self.cov_corrcoef_edge_cases(pyfunc, first_arg_name='m') + + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-14) + + # invalid ddof + m = np.array([[0, 2], [1, 1], [2, 0]]).T + params = {'m': m, 'ddof': 5} + _check(params) + + @needs_blas + def test_cov_exceptions(self): + pyfunc = cov + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def _check_m(m): + with self.assertTypingError() as raises: + cfunc(m) + self.assertIn('m has more than 2 dimensions', str(raises.exception)) + + m = np.ones((5, 6, 7)) + _check_m(m) + + m = ((((1, 2, 3), (2, 2, 2)),),) + _check_m(m) + + m = [[[5, 6, 7]]] + _check_m(m) + + def _check_y(m, y): + with self.assertTypingError() as raises: + cfunc(m, y=y) + self.assertIn('y has more than 2 dimensions', str(raises.exception)) + + m = np.ones((5, 6)) + y = np.ones((5, 6, 7)) + _check_y(m, y) + + m = np.array((1.1, 2.2, 1.1)) + y = (((1.2, 2.2, 2.3),),) + _check_y(m, y) + + m = np.arange(3) + y = np.arange(4) + with self.assertRaises(ValueError) as raises: + cfunc(m, y=y) + self.assertIn('m and y have incompatible dimensions', + str(raises.exception)) + # Numpy raises ValueError: all the input array dimensions except for the + # concatenation axis must match exactly. + + m = np.array([-2.1, -1, 4.3]).reshape(1, 3) + with self.assertRaises(RuntimeError) as raises: + cfunc(m) + self.assertIn('2D array containing a single row is unsupported', + str(raises.exception)) + + def test_ediff1d_basic(self): + pyfunc = ediff1d + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + def to_variations(a): + yield None + yield a + yield a.astype(np.int16) + + def ary_variations(a): + yield a + yield a.reshape(3, 2, 2) + yield a.astype(np.int32) + + for ary in ary_variations(np.linspace(-2, 7, 12)): + params = {'ary': ary} + _check(params) + + for a in to_variations(ary): + params = {'ary': ary, 'to_begin': a} + _check(params) + + params = {'ary': ary, 'to_end': a} + _check(params) + + for b in to_variations(ary): + params = {'ary': ary, 'to_begin': a, 'to_end': b} + _check(params) + + def test_ediff1d_exceptions(self): + pyfunc = ediff1d + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertTypingError() as e: + cfunc(np.array((True, True, False))) + + msg = "Boolean dtype is unsupported (as per NumPy)" + assert msg in str(e.exception) + + def test_fliplr_basic(self): + pyfunc = fliplr + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(10).reshape(5, 2) + yield np.arange(20).reshape(5, 2, 2) + yield ((1, 2),) + yield ([1, 2], [3, 4],) + + for a in a_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + with self.assertRaises(TypingError) as raises: + cfunc("abc") + + self.assertIn("Cannot np.fliplr on %s type" % types.unicode_type, + str(raises.exception)) + + def test_fliplr_exception(self): + pyfunc = fliplr + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(3)) + + self.assertIn("cannot index array", str(raises.exception)) + self.assertIn("with 2 indices", str(raises.exception)) + + def test_flipud_basic(self): + pyfunc = flipud + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield [1] + yield np.arange(10) + yield np.arange(10).reshape(5, 2) + yield np.arange(20).reshape(5, 2, 2) + yield ((1, 2),) + yield ([1, 2], [3, 4],) + + for a in a_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + with self.assertRaises(TypingError) as raises: + cfunc("abc") + + self.assertIn("Cannot np.flipud on %s type" % types.unicode_type, + str(raises.exception)) + + def test_flipud_exception(self): + pyfunc = flipud + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc(1) + + self.assertIn("cannot index array", str(raises.exception)) + self.assertIn("with 1 indices", str(raises.exception)) + + def test_flip_basic(self): + pyfunc = flip + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.array(1) + yield np.arange(10) + yield np.arange(10).reshape(5, 2) + yield np.arange(20).reshape(5, 2, 2) + + for a in a_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + with self.assertRaises(TypingError) as raises: + cfunc((1, 2, 3)) + + self.assertIn("Cannot np.flip on UniTuple", str(raises.exception)) + + def test_logspace2_basic(self): + + def inputs(): + #start, stop + yield 1, 60 + yield -1, 60 + yield -60, -1 + yield -1, -60 + yield 60, -1 + yield 1.0, 60.0 + yield -60.0, -1.0 + yield -1.0, 60.0 + yield 0.0, np.e + yield 0.0, np.pi + if numpy_version < (2, 0): + yield np.complex64(1), np.complex64(2) + yield np.complex64(2j), np.complex64(4j) + yield np.complex64(2), np.complex64(4j) + yield np.complex64(1 + 2j), np.complex64(3 + 4j) + yield np.complex64(1 - 2j), np.complex64(3 - 4j) + yield np.complex64(-1 + 2j), np.complex64(3 + 4j) + + pyfunc = logspace2 + cfunc = jit(nopython=True)(pyfunc) + + for start, stop in inputs(): + np.testing.assert_allclose(pyfunc(start, stop), cfunc(start, stop)) + + def test_logspace2_exception(self): + cfunc = jit(nopython=True)(logspace2) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc", 5) + self.assertIn('The first argument "start" must be a number', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(5, "abc") + self.assertIn('The second argument "stop" must be a number', + str(raises.exception)) + + def test_logspace3_basic(self): + + def inputs(): + #start, stop + yield 1, 60 + yield -1, 60 + yield -60, -1 + yield -1, -60 + yield 60, -1 + yield 1.0, 60.0 + yield -60.0, -1.0 + yield -1.0, 60.0 + yield 0.0, np.e + yield 0.0, np.pi + if numpy_version < (2, 0): + yield np.complex64(1), np.complex64(2) + yield np.complex64(2j), np.complex64(4j) + yield np.complex64(2), np.complex64(4j) + yield np.complex64(1 + 2j), np.complex64(3 + 4j) + yield np.complex64(1 - 2j), np.complex64(3 - 4j) + yield np.complex64(-1 + 2j), np.complex64(3 + 4j) + + pyfunc = logspace3 + cfunc = jit(nopython=True)(pyfunc) + + for start, stop in inputs(): + np.testing.assert_allclose(pyfunc(start, stop), cfunc(start, stop)) + + def test_logspace3_with_num_basic(self): + + def inputs(): + #start, stop, num + yield 1, 60, 20 + yield -1, 60, 30 + yield -60, -1, 40 + yield -1, -60, 50 + yield 60, -1, 60 + yield 1.0, 60.0, 70 + yield -60.0, -1.0, 80 + yield -1.0, 60.0, 90 + yield 0.0, np.e, 20 + yield 0.0, np.pi, 30 + if numpy_version < (2, 0): + yield np.complex64(1), np.complex64(2), 40 + yield np.complex64(2j), np.complex64(4j), 50 + yield np.complex64(2), np.complex64(4j), 60 + yield np.complex64(1 + 2j), np.complex64(3 + 4j), 70 + yield np.complex64(1 - 2j), np.complex64(3 - 4j), 80 + yield np.complex64(-1 + 2j), np.complex64(3 + 4j), 90 + + pyfunc = logspace3 + cfunc = jit(nopython=True)(pyfunc) + + for start, stop, num in inputs(): + np.testing.assert_allclose(pyfunc(start, stop, num), + cfunc(start, stop, num)) + + def test_logspace3_exception(self): + cfunc = jit(nopython=True)(logspace3) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc", 5) + self.assertIn('The first argument "start" must be a number', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(5, "abc") + self.assertIn('The second argument "stop" must be a number', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(0, 5, "abc") + self.assertIn('The third argument "num" must be an integer', + str(raises.exception)) + + def test_geomspace2_basic(self): + + def inputs(): + #start, stop + yield -1, -60 + yield 1.0, 60.0 + yield -60.0, -1.0 + yield 1, 1000 + yield 1000, 1 + yield 1, 256 + yield -1000, -1 + yield -1, np.complex64(2j) + yield np.complex64(2j), -1 + yield -1.0, np.complex64(2j) + yield np.complex64(1j), np.complex64(1000j) + yield np.complex64(-1 + 0j), np.complex64(1 + 0j) + yield np.complex64(1), np.complex64(2) + yield np.complex64(2j), np.complex64(4j) + yield np.complex64(2), np.complex64(4j) + yield np.complex64(1 + 2j), np.complex64(3 + 4j) + yield np.complex64(1 - 2j), np.complex64(3 - 4j) + yield np.complex64(-1 + 2j), np.complex64(3 + 4j) + + pyfunc = geomspace2 + cfunc = jit(nopython=True)(pyfunc) + + for start, stop in inputs(): + self.assertPreciseEqual(pyfunc(start, stop), + cfunc(start, stop), + abs_tol=1e-12) + + def test_geomspace2_exception(self): + cfunc = jit(nopython=True)(geomspace2) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc", 5) + self.assertIn('The argument "start" must be a number', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(5, "abc") + self.assertIn('The argument "stop" must be a number', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(0, 5) + self.assertIn('Geometric sequence cannot include zero', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(5, 0) + self.assertIn('Geometric sequence cannot include zero', + str(raises.exception)) + + def test_geomspace3_basic(self): + + def inputs(): + #start, stop, num + yield -1, -60, 50 + yield 1.0, 60.0, 70 + yield -60.0, -1.0, 80 + yield 1, 1000, 4 + yield 1, 1000, 3 + yield 1000, 1, 4 + yield 1, 256, 9 + yield -1000, -1, 4 + yield -1, np.complex64(2j), 10 + yield np.complex64(2j), -1, 20 + yield -1.0, np.complex64(2j), 30 + yield np.complex64(1j), np.complex64(1000j), 4 + yield np.complex64(-1 + 0j), np.complex64(1 + 0j), 5 + yield np.complex64(1), np.complex64(2), 40 + yield np.complex64(2j), np.complex64(4j), 50 + yield np.complex64(2), np.complex64(4j), 60 + yield np.complex64(1 + 2j), np.complex64(3 + 4j), 70 + yield np.complex64(1 - 2j), np.complex64(3 - 4j), 80 + yield np.complex64(-1 + 2j), np.complex64(3 + 4j), 90 + + pyfunc = geomspace3 + cfunc = jit(nopython=True)(pyfunc) + + for start, stop, num in inputs(): + self.assertPreciseEqual(pyfunc(start, stop, num), + cfunc(start, stop, num), + abs_tol=1e-14) + + def test_geomspace3_exception(self): + cfunc = jit(nopython=True)(geomspace3) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc", 5, 10) + self.assertIn('The argument "start" must be a number', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(5, "abc", 10) + self.assertIn('The argument "stop" must be a number', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(5, 10, "abc") + self.assertIn('The argument "num" must be an integer', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(0, 5, 5) + self.assertIn('Geometric sequence cannot include zero', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(5, 0, 5) + self.assertIn('Geometric sequence cannot include zero', + str(raises.exception)) + + def test_geomspace_numpy(self): + cfunc2 = jit(nopython=True)(geomspace2) + cfunc3 = jit(nopython=True)(geomspace3) + pfunc3 = geomspace3 + + # https://github.com/numpy/numpy/blob/ab2178b47c0ee834180c318db196976623710691/numpy/core/tests/test_function_base.py#L122C3-L142 + # test_basic + y = cfunc2(1, 1e6) + self.assertEqual(len(y), 50) + y = cfunc3(1, 1e6, num=100) + self.assertEqual(y[-1], 10 ** 6) + y = cfunc3(1, 1e6, num=7) + self.assertPreciseEqual(y, pfunc3(1,1e6, num=7)) + + y = cfunc3(8, 2, num=3) + self.assertPreciseEqual(y, pfunc3(8, 2, num=3)) + self.assertTrue([x == 0 for x in y.imag]) + + y = cfunc3(-1, -100, num=3) + self.assertPreciseEqual(y, pfunc3(-1, -100, num=3)) + self.assertTrue([x == 0 for x in y.imag]) + + y = cfunc3(-100, -1, num=3) + self.assertPreciseEqual(y, pfunc3(-100, -1, num=3)) + self.assertTrue([x == 0 for x in y.imag]) + + # test_boundaries_match_start_and_stop_exactly + start = 0.3 + stop = 20.3 + + y = cfunc3(start, stop, num=1) + self.assertPreciseEqual(y[0], start) + + y = cfunc3(start, stop, num=3) + self.assertPreciseEqual(y[0], start) + self.assertPreciseEqual(y[-1], stop) + + # test_nan_interior + with np.errstate(invalid='ignore'): + y = cfunc3(-3, 3, num=4) + + self.assertPreciseEqual(y[0], -3.0) + self.assertTrue(np.isnan(y[1:-1]).all()) + self.assertPreciseEqual(y[3], 3.0) + + # test_complex + # Purely imaginary + y = cfunc3(1j, 16j, num=5) + self.assertPreciseEqual(y, pfunc3(1j, 16j, num=5), abs_tol=1e-14) + self.assertTrue([x == 0 for x in y.real]) + + y = cfunc3(-4j, -324j, num=5) + self.assertPreciseEqual(y, pfunc3(-4j, -324j, num=5), abs_tol=1e-13) + self.assertTrue([x == 0 for x in y.real]) + + y = cfunc3(1 + 1j, 1000 + 1000j, num=4) + self.assertPreciseEqual(y, + pfunc3(1 + 1j, 1000 + 1000j, num=4), + abs_tol=1e-13) + + y = cfunc3(-1 + 1j, -1000 + 1000j, num=4) + self.assertPreciseEqual(y, + pfunc3(-1 + 1j, -1000 + 1000j, num=4), + abs_tol=1e-13) + + # Logarithmic spirals + if numpy_version < (2, 0): + y = cfunc3(-1 + 0j, 1 + 0j, num=3) + self.assertPreciseEqual(y, pfunc3(-1 + 0j, 1 + 0j, num=3)) + + y = cfunc3(0 + 3j, -3 + 0j, 3) + self.assertPreciseEqual(y, pfunc3(0 + 3j, -3 + 0j, 3), abs_tol=1e-15) + y = cfunc3(0 + 3j, 3 + 0j, 3) + self.assertPreciseEqual(y, pfunc3(0 + 3j, 3 + 0j, 3), abs_tol=1e-15) + y = cfunc3(-3 + 0j, 0 - 3j, 3) + self.assertPreciseEqual(y, pfunc3(-3 + 0j, 0 - 3j, 3), abs_tol=1e-15) + y = cfunc3(0 + 3j, -3 + 0j, 3) + self.assertPreciseEqual(y, pfunc3(0 + 3j, -3 + 0j, 3), abs_tol=1e-15) + y = cfunc3(-2 - 3j, 5 + 7j, 7) + self.assertPreciseEqual(y, pfunc3(-2 - 3j, 5 + 7j, 7), abs_tol=1e-14) + + y = cfunc3(3j, -5, 2) + self.assertPreciseEqual(y, pfunc3(3j, -5, 2)) + y = cfunc3(-5, 3j, 2) + self.assertPreciseEqual(y, pfunc3(-5, 3j, 2)) + + def test_rot90_basic(self): + pyfunc = rot90 + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(10).reshape(5, 2) + yield np.arange(20).reshape(5, 2, 2) + yield np.arange(64).reshape(2, 2, 2, 2, 2, 2) + + for a in a_variations(): + expected = pyfunc(a) + got = cfunc(a) + self.assertPreciseEqual(expected, got) + + def test_rot90_with_k_basic(self): + pyfunc = rot90_k + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(10).reshape(5, 2) + yield np.arange(20).reshape(5, 2, 2) + yield np.arange(64).reshape(2, 2, 2, 2, 2, 2) + + for a in a_variations(): + for k in range(-5, 6): + expected = pyfunc(a, k) + got = cfunc(a, k) + self.assertPreciseEqual(expected, got) + + def test_rot90_exception(self): + pyfunc = rot90_k + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc") + + self.assertIn('The first argument "m" must be an array', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(4).reshape(2, 2), k="abc") + + self.assertIn('The second argument "k" must be an integer', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(3)) + + self.assertIn("Input must be >= 2-d.", str(raises.exception)) + + def _check_split(self, func): + # Since np.split and np.array_split are very similar + pyfunc = func + cfunc = jit(nopython=True)(pyfunc) + + def args_variations(): + a = np.arange(100) + yield a, 2 + yield a, 2, 0 + yield a, [1, 4, 72] + yield list(a), [1, 4, 72] + yield tuple(a), [1, 4, 72] + yield a, [1, 4, 72], 0 + yield list(a), [1, 4, 72], 0 + yield tuple(a), [1, 4, 72], 0 + + a = np.arange(64).reshape(4, 4, 4) + yield a, 2 + yield a, 2, 0 + yield a, 2, 1 + yield a, [2, 1, 5] + yield a, [2, 1, 5], 1 + yield a, [2, 1, 5], 2 + yield a, [1, 3] + yield a, [1, 3], 1 + yield a, [1, 3], 2 + yield a, [1], -1 + yield a, [1], -2 + yield a, [1], -3 + yield a, np.array([], dtype=np.int64), 0 + + a = np.arange(100).reshape(2, -1) + yield a, 1 + yield a, 1, 0 + yield a, [1], 0 + yield a, 50, 1 + yield a, np.arange(10, 50, 10), 1 + yield a, (1,) + yield a, (np.int32(4), 10) + + a = np.array([]) + yield a, 1 + yield a, 2 + yield a, (2, 3), 0 + yield a, 1, 0 + + a = np.array([[]]) + yield a, 1 + yield a, (2, 3), 1 + yield a, 1, 0 + yield a, 1, 1 + + for args in args_variations(): + expected = pyfunc(*args) + got = cfunc(*args) + + np.testing.assert_equal(expected, list(got)) + + def _check_array_split(self, func): + # array_split specific checks, mainly dealing with `int`s + pyfunc = func + cfunc = jit(nopython=True)(pyfunc) + + def args_variations(): + yield np.arange(8), 3 + yield list(np.arange(8)), 3 + yield tuple(np.arange(8)), 3 + yield np.arange(24).reshape(12, 2), 5 + + for args in args_variations(): + expected = pyfunc(*args) + got = cfunc(*args) + + np.testing.assert_equal(expected, list(got)) + + def test_array_split_basic(self): + self._check_split(array_split) + self._check_array_split(array_split) + + def test_split_basic(self): + self._check_split(split) + + self.disable_leak_check() # The exception leaks + with self.assertRaises(ValueError) as raises: + njit(split)(np.ones(5), 2) + self.assertIn( + "array split does not result in an equal division", + str(raises.exception) + ) + + with self.assertRaises(ValueError) as raises: + njit(split)(np.ones(5), [3], axis=-3) + self.assertIn("np.split: Argument axis out of bounds", + str(raises.exception)) + + def test_vhdsplit_basic(self): + # split and array_split have more comprehensive tests of splitting. + # only do simple tests on vsplit, hsplit and dsplit + # Based on tests from https://github.com/numpy/numpy/blob/f0befec40376fc46fdaceac2c49c7349ad671bde/numpy/lib/tests/test_shape_base.py#L538-L624 # noqa: E501 + def inputs1D(): + # test_1D_array + yield np.array([1, 2, 3, 4]), 2 + yield np.array([1., 2., 3., 4.]), 2 + + def inputs2D(): + # test_2D_array + yield np.array([[1, 2, 3, 4], [1, 2, 3, 4]]), 2 + yield np.array([[1., 2., 3., 4.], [1., 2., 3., 4.]]), 2 + yield np.arange(16.0).reshape(4, 4), 2 + yield np.arange(16.0).reshape(4, 4), np.array([3, 6]) + yield np.arange(16.0).reshape(4, 4), [3, 6] + yield np.arange(16.0).reshape(4, 4), (3, 6) + yield np.arange(8.0).reshape(2, 2, 2), 2 + + def inputs3D(): + # test_3D_array + np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]), 2 + yield np.arange(16.0).reshape(2, 2, 4), 2 + yield np.arange(16.0).reshape(2, 2, 4), np.array([3, 6]) + yield np.arange(16.0).reshape(2, 2, 4), [3, 6] + yield np.arange(16.0).reshape(2, 2, 4), (3, 6) + yield np.arange(8.0).reshape(2, 2, 2), 2 + + inputs = [inputs1D(), inputs2D(), inputs3D()] + for (f, mindim, name) in [(vsplit, 2, "vsplit"), + (hsplit, 1, "hsplit"), + (dsplit, 3, "dsplit")]: + pyfunc = f + cfunc = njit(pyfunc) + for i in range(mindim, 4): + for a, ind_or_sec in inputs[i - 1]: + self.assertPreciseEqual(pyfunc(a, ind_or_sec), + cfunc(a, ind_or_sec)) + + def test_vhdsplit_exception(self): + # Single test method for vsplit, hsplit and dsplit exceptions + for (f, mindim, name) in [(vsplit, 2, "vsplit"), + (hsplit, 1, "hsplit"), + (dsplit, 3, "dsplit")]: + cfunc = jit(nopython=True)(f) + self.disable_leak_check() + with self.assertRaises(TypingError) as raises: + cfunc(1, 2) + self.assertIn('The argument "ary" must be an array', + str(raises.exception)) + with self.assertRaises(TypingError) as raises: + cfunc("abc", 2) + self.assertIn('The argument "ary" must be an array', + str(raises.exception)) + with self.assertRaises(TypingError) as raises: + cfunc(np.array([[1, 2, 3, 4], [1, 2, 3, 4]]), "abc") + self.assertIn(('The argument "indices_or_sections" must be int or ' + '1d-array'), + str(raises.exception)) + with self.assertRaises(ValueError) as raises: + cfunc(np.array(1), 2) + self.assertIn(name + ' only works on arrays of ' + str(mindim) + + ' or more dimensions', + str(raises.exception)) + + def test_roll_basic(self): + pyfunc = roll + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(7) + yield np.arange(3 * 4 * 5).reshape(3, 4, 5) + yield [1.1, 2.2, 3.3] + yield (True, False, True) + yield False + yield 4 + yield (9,) + yield np.asfortranarray(np.array([[1.1, np.nan], [np.inf, 7.8]])) + yield np.array([]) + yield () + + def shift_variations(): + return itertools.chain.from_iterable(((True, False), + range(-10, 10))) + + for a in a_variations(): + for shift in shift_variations(): + expected = pyfunc(a, shift) + got = cfunc(a, shift) + self.assertPreciseEqual(expected, got) + + def test_roll_exceptions(self): + pyfunc = roll + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + for shift in 1.1, (1, 2): + with self.assertTypingError() as e: + cfunc(np.arange(10), shift) + + msg = "shift must be an integer" + assert msg in str(e.exception) + + def test_extract_basic(self): + pyfunc = extract + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + a = np.arange(10) + self.rnd.shuffle(a) + for threshold in range(-3, 13): + cond = a > threshold + _check({'condition': cond, 'arr': a}) + + a = np.arange(60).reshape(4, 5, 3) + cond = a > 11.2 + _check({'condition': cond, 'arr': a}) + + a = ((1, 2, 3), (3, 4, 5), (4, 5, 6)) + cond = np.eye(3).flatten() + _check({'condition': cond, 'arr': a}) + + a = [1.1, 2.2, 3.3, 4.4] + cond = [1, 1, 0, 1] + _check({'condition': cond, 'arr': a}) + + a = np.linspace(-2, 10, 6) + element_pool = (True, False, np.nan, -1, -1.0, -1.2, 1, 1.0, 1.5j) + for cond in itertools.combinations_with_replacement(element_pool, 4): + _check({'condition': cond, 'arr': a}) + _check({'condition': np.array(cond).reshape(2, 2), 'arr': a}) + + a = np.array([1, 2, 3]) + cond = np.array([]) + _check({'condition': cond, 'arr': a}) + + a = np.array([1, 2, 3]) + cond = np.array([1, 0, 1, 0]) # but [1, 0, 1, 0, 1] raises + _check({'condition': cond, 'arr': a}) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + cond = [1, 0, 1, 0, 1, 0] # but [1, 0, 1, 0, 1, 0, 1] raises + _check({'condition': cond, 'arr': a}) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + cond = np.array([1, 0, 1, 0, 1, 0, 0, 0]).reshape(2, 2, 2) + _check({'condition': cond, 'arr': a}) + + a = np.asfortranarray(np.arange(60).reshape(3, 4, 5)) + cond = np.repeat((0, 1), 30) + _check({'condition': cond, 'arr': a}) + _check({'condition': cond, 'arr': a[::-1]}) + + a = np.array(4) + for cond in 0, 1: + _check({'condition': cond, 'arr': a}) + + a = 1 + cond = 1 + _check({'condition': cond, 'arr': a}) + + a = np.array(1) + cond = np.array([True, False]) + _check({'condition': cond, 'arr': a}) + + a = np.arange(4) + cond = np.array([1, 0, 1, 0, 0, 0]).reshape(2, 3) * 1j + _check({'condition': cond, 'arr': a}) + + def test_extract_exceptions(self): + pyfunc = extract + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + a = np.array([]) + cond = np.array([1, 2, 3]) + + with self.assertRaises(ValueError) as e: + cfunc(cond, a) + self.assertIn('Cannot extract from an empty array', str(e.exception)) + + def _check(cond, a): + msg = 'condition shape inconsistent with arr shape' + with self.assertRaises(ValueError) as e: + cfunc(cond, a) + self.assertIn(msg, str(e.exception)) + + a = np.array([[1, 2, 3], [1, 2, 3]]) + cond = [1, 0, 1, 0, 1, 0, 1] + _check(cond, a) + + a = np.array([1, 2, 3]) + cond = np.array([1, 0, 1, 0, 1]) + _check(cond, a) + + a = np.array(60) # note, this is 0D + cond = 0, 1 + _check(cond, a) + + a = np.arange(4) + cond = np.array([True, False, False, False, True]) + _check(cond, a) + + a = np.arange(4) + cond = np.array([True, False, True, False, False, True, False]) + _check(cond, a) + + @unittest.skipUnless(IS_NUMPY_2, "New in numpy 2.0+") + def test_np_trapezoid_basic(self): + self.test_np_trapz_basic(pyfunc=np_trapezoid) + + def test_np_trapz_basic(self, pyfunc=np_trapz): + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + y = [1, 2, 3] + _check({'y': y}) + + y = (3, 1, 2, 2, 2) + _check({'y': y}) + + y = np.arange(15).reshape(3, 5) + _check({'y': y}) + + y = np.linspace(-10, 10, 60).reshape(4, 3, 5) + _check({'y': y}, abs_tol=1e-13) + + self.rnd.shuffle(y) + _check({'y': y}, abs_tol=1e-13) + + y = np.array([]) + _check({'y': y}) + + y = np.array([3.142, np.nan, np.inf, -np.inf, 5]) + _check({'y': y}) + + y = np.arange(20) + np.linspace(0, 10, 20) * 1j + _check({'y': y}) + + y = np.array([], dtype=np.complex128) + _check({'y': y}) + + y = (True, False, True) + _check({'y': y}) + + @unittest.skipUnless(IS_NUMPY_2, "New in numpy 2.0+") + def test_np_trapezoid_x_basic(self): + self.test_np_trapz_x_basic(pyfunc=np_trapezoid_x) + + def test_np_trapz_x_basic(self, pyfunc=np_trapz_x): + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + y = [1, 2, 3] + x = [4, 6, 8] + _check({'y': y, 'x': x}) + + y = [1, 2, 3, 4, 5] + x = (4, 6) + _check({'y': y, 'x': x}) + + y = (1, 2, 3, 4, 5) + x = [4, 5, 6, 7, 8] + _check({'y': y, 'x': x}) + + y = np.array([1, 2, 3, 4, 5]) + x = [4, 4] + _check({'y': y, 'x': x}) + + y = np.array([]) + x = np.array([2, 3]) + _check({'y': y, 'x': x}) + + y = (1, 2, 3, 4, 5) + x = None + _check({'y': y, 'x': x}) + + y = np.arange(20).reshape(5, 4) + x = np.array([4, 5]) + _check({'y': y, 'x': x}) + + y = np.arange(20).reshape(5, 4) + x = np.array([4, 5, 6, 7]) + _check({'y': y, 'x': x}) + + y = np.arange(60).reshape(5, 4, 3) + x = np.array([4, 5]) + _check({'y': y, 'x': x}) + + y = np.arange(60).reshape(5, 4, 3) + x = np.array([4, 5, 7]) + _check({'y': y, 'x': x}) + + y = np.arange(60).reshape(5, 4, 3) + self.rnd.shuffle(y) + x = y + 1.1 + self.rnd.shuffle(x) + _check({'y': y, 'x': x}) + + y = np.arange(20) + x = y + np.linspace(0, 10, 20) * 1j + _check({'y': y, 'x': x}) + + y = np.array([1, 2, 3]) + x = np.array([1 + 1j, 1 + 2j]) + _check({'y': y, 'x': x}) + + @unittest.skipUnless(IS_NUMPY_2, "New in numpy 2.0+") + def test_trapezoid_numpy_questionable(self): + self.test_trapz_numpy_questionable(pyfunc=np_trapezoid) + + @unittest.skip('NumPy behaviour questionable') + def test_trapz_numpy_questionable(self, pyfunc=np_trapz): + # https://github.com/numpy/numpy/issues/12858 + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + # passes (NumPy and Numba return 2.0) + y = np.array([True, False, True, True]).astype(int) + _check({'y': y}) + + # fails (NumPy returns 1.5; Numba returns 2.0) + y = np.array([True, False, True, True]) + _check({'y': y}) + + @unittest.skipUnless(IS_NUMPY_2, "New in numpy 2.0+") + def test_np_trapezoid_dx_basic(self): + self.test_np_trapz_dx_basic(pyfunc=np_trapezoid_dx) + + def test_np_trapz_dx_basic(self, pyfunc=np_trapz_dx): + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + y = [1, 2, 3] + dx = 2 + _check({'y': y, 'dx': dx}) + + y = [1, 2, 3, 4, 5] + dx = [1, 4, 5, 6] + _check({'y': y, 'dx': dx}) + + y = [1, 2, 3, 4, 5] + dx = [1, 4, 5, 6] + _check({'y': y, 'dx': dx}) + + y = np.linspace(-2, 5, 10) + dx = np.nan + _check({'y': y, 'dx': dx}) + + y = np.linspace(-2, 5, 10) + dx = np.inf + _check({'y': y, 'dx': dx}) + + y = np.linspace(-2, 5, 10) + dx = np.linspace(-2, 5, 9) + _check({'y': y, 'dx': dx}, abs_tol=1e-13) + + y = np.arange(60).reshape(4, 5, 3) * 1j + dx = np.arange(40).reshape(4, 5, 2) + _check({'y': y, 'dx': dx}) + + x = np.arange(-10, 10, .1) + r = cfunc(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + np.testing.assert_almost_equal(r, 1, 7) + + y = np.arange(20) + dx = 1j + _check({'y': y, 'dx': dx}) + + y = np.arange(20) + dx = np.array([5]) + _check({'y': y, 'dx': dx}) + + @unittest.skipUnless(IS_NUMPY_2, "New in numpy 2.0+") + def test_np_trapezoid_x_dx_basic(self): + self.test_np_trapz_x_dx_basic(pyfunc=np_trapezoid_x_dx) + + def test_np_trapz_x_dx_basic(self, pyfunc=np_trapz_x_dx): + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + # dx should be ignored + for dx in (None, 2, np.array([1, 2, 3, 4, 5])): + y = [1, 2, 3] + x = [4, 6, 8] + _check({'y': y, 'x': x, 'dx': dx}) + + y = [1, 2, 3, 4, 5] + x = [4, 6] + _check({'y': y, 'x': x, 'dx': dx}) + + y = [1, 2, 3, 4, 5] + x = [4, 5, 6, 7, 8] + _check({'y': y, 'x': x, 'dx': dx}) + + y = np.arange(60).reshape(4, 5, 3) + self.rnd.shuffle(y) + x = y * 1.1 + x[2, 2, 2] = np.nan + _check({'y': y, 'x': x, 'dx': dx}) + + @unittest.skipUnless(IS_NUMPY_2, "New in numpy 2.0+") + def test_np_trapezoid_x_dx_exceptions(self): + self.test_np_trapz_x_dx_exceptions(pyfunc=np_trapezoid_x_dx) + + def test_np_trapz_x_dx_exceptions(self, pyfunc=np_trapz_x_dx): + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + def check_not_ok(params): + with self.assertRaises(ValueError) as e: + cfunc(*params) + + self.assertIn('unable to broadcast', str(e.exception)) + + y = [1, 2, 3, 4, 5] + for x in [4, 5, 6, 7, 8, 9], [4, 5, 6]: + check_not_ok((y, x, 1.0)) + + y = np.arange(60).reshape(3, 4, 5) + x = np.arange(36).reshape(3, 4, 3) + check_not_ok((y, x, 1.0)) + + y = np.arange(60).reshape(3, 4, 5) + x = np.array([4, 5, 6, 7]) + check_not_ok((y, x, 1.0)) + + y = [1, 2, 3, 4, 5] + dx = np.array([1.0, 2.0]) + check_not_ok((y, None, dx)) + + y = np.arange(60).reshape(3, 4, 5) + dx = np.arange(60).reshape(3, 4, 5) + check_not_ok((y, None, dx)) + + with self.assertTypingError() as e: + y = np.array(4) + check_not_ok((y, None, 1.0)) + + self.assertIn('y cannot be 0D', str(e.exception)) + + for y in 5, False, np.nan: + with self.assertTypingError() as e: + cfunc(y, None, 1.0) + + self.assertIn('y cannot be a scalar', str(e.exception)) + + def test_average(self): + + #array of random numbers + N = 100 + a = np.random.ranf(N) * 100 + w = np.random.ranf(N) * 100 + w0 = np.zeros(N) + + #boolean array and weights + a_bool = np.random.ranf(N) > 0.5 + w_bool = np.random.ranf(N) > 0.5 + + #array of random ints + a_int = np.random.randint(101, size=N) + w_int = np.random.randint(101, size=N) + + #3D array of random numbers + d0 = 100 + d1 = 50 + d2 = 25 + a_3d = np.random.rand(d0,d1,d2) * 100 + w_3d = np.random.rand(d0,d1,d2) * 100 + + pyfunc = np_average + cfunc = jit(nopython=True)(pyfunc) + + #test case for average with weights + #(number of elements in array and weight array are equal) + self.assertAlmostEqual( pyfunc(a,weights=w), + cfunc(a,weights=w), places=10) + self.assertAlmostEqual( pyfunc(a_3d,weights=w_3d), + cfunc(a_3d,weights=w_3d), places=10) + + #test case for average with array and weights with + #int datatype (number of elements in array and weight array are equal) + self.assertAlmostEqual( pyfunc(a_int,weights=w_int), + cfunc(a_int,weights=w_int), places=10) + + #test case for average with boolean weights + self.assertAlmostEqual( pyfunc(a,weights=w_bool), + cfunc(a,weights=w_bool), places=10) + self.assertAlmostEqual( pyfunc(a_bool,weights=w), + cfunc(a_bool,weights=w), places=10) + self.assertAlmostEqual( pyfunc(a_bool, weights=w_bool), + cfunc(a_bool, weights=w_bool), places=10) + + #test case for average without weights + self.assertAlmostEqual(pyfunc(a), cfunc(a), places=10) + self.assertAlmostEqual(pyfunc(a_3d), cfunc(a_3d), places=10) + + def test_weights_zero_sum(data, weights): + with self.assertRaises(ZeroDivisionError) as e: + cfunc(data, weights=weights) + err = e.exception + self.assertEqual(str(err), + "Weights sum to zero, can't be normalized.") + + #test case when sum of weights is zero + test_weights_zero_sum(a, weights=w0) + + def test_1D_weights(data, weights): + with self.assertRaises(TypeError) as e: + cfunc(data, weights=weights) + err = e.exception + self.assertEqual(str(err), + "Numba does not support average when shapes of " + "a and weights differ.") + + def test_1D_weights_axis(data, axis, weights): + with self.assertRaises(TypeError) as e: + cfunc(data,axis=axis, weights=weights) + err = e.exception + self.assertEqual(str(err), + "Numba does not support average with axis.") + + #small case to test exceptions for 2D array and 1D weights + data = np.arange(6).reshape((3,2,1)) + w = np.asarray([1. / 4, 3. / 4]) + + #test without axis argument + test_1D_weights(data, weights=w) + + #test with axis argument + test_1D_weights_axis(data, axis=1, weights=w) + + def test_allclose(self): + + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + + simple_data = [ + (np.asarray([1e10, 1e-7]), np.asarray([1.00001e10, 1e-8])), + (np.asarray([1e10, 1e-8]), np.asarray([1.00001e10, 1e-9])), + (np.asarray([1e10, 1e-8]), np.asarray([1.0001e10, 1e-9])), + (np.asarray([1e10]), np.asarray([1.0001e10, 1e-9])), + (1.0, 1.0), + (np.array([np.inf, 1]), np.array([0, np.inf])), + (a, a) + ] + + for a, b in simple_data: + py_result = pyfunc(a, b) + c_result = cfunc(a, b) + self.assertEqual(py_result, c_result) + + a = np.asarray([1.0, np.nan]) + b = np.asarray([1.0, np.nan]) + self.assertFalse(cfunc(a, b)) + self.assertEqual(pyfunc(a, b, equal_nan=True), + cfunc(a, b, equal_nan=True)) + + b = np.asarray([np.nan, 1.0]) + self.assertEqual(pyfunc(a, b), cfunc(a, b)) + + noise_levels = [1.0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 0.0] + zero_array = np.zeros((25, 4)) + a = np.random.ranf((25, 4)) + for noise in noise_levels: + for rtol in noise_levels: + for atol in noise_levels: + py_result = pyfunc(zero_array, noise, + atol=atol, rtol=rtol) + c_result = cfunc(zero_array, noise, + atol=atol, rtol=rtol) + self.assertEqual(py_result, c_result) + + py_result = pyfunc(noise, zero_array, + atol=atol, rtol=rtol) + c_result = cfunc(noise, zero_array, + atol=atol, rtol=rtol) + self.assertEqual(py_result, c_result) + + py_result = pyfunc(np.asarray([noise]), zero_array, + atol=atol, rtol=rtol) + c_result = cfunc(np.asarray([noise]), zero_array, + atol=atol, rtol=rtol) + self.assertEqual(py_result, c_result) + + py_result = pyfunc(a, a + noise, atol=atol, rtol=rtol) + c_result = cfunc(a, a + noise, atol=atol, rtol=rtol) + self.assertEqual(py_result, c_result) + + py_result = pyfunc(a + noise, a, atol=atol, rtol=rtol) + c_result = cfunc(a + noise, a, atol=atol, rtol=rtol) + self.assertEqual(py_result, c_result) + + def test_ip_allclose_numpy(self): + # https://github.com/numpy/numpy/blob/4adc87dff15a247e417d50f10cc4def8e1c17a03/numpy/core/tests/test_numeric.py#L2402-L2420 # noqa: E501 + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + arr = np.array([100.0, 1000.0]) + aran = np.arange(125).astype(dtype=np.float64).reshape((5, 5, 5)) + + atol = 1e-8 + rtol = 1e-5 + + numpy_data = [ + (np.asarray([1, 0]), np.asarray([1, 0])), + (np.asarray([atol]), np.asarray([0.0])), + (np.asarray([1.0]), np.asarray([1 + rtol + atol])), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol * 2), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, np.asarray([np.inf])) + ] + + for (x, y) in numpy_data: + self.assertEqual(pyfunc(x, y), cfunc(x, y)) + + def test_ip_not_allclose_numpy(self): + # https://github.com/numpy/numpy/blob/4adc87dff15a247e417d50f10cc4def8e1c17a03/numpy/core/tests/test_numeric.py#L2422-L2441 # noqa: E501 + + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + aran = np.arange(125).astype(dtype=np.float64).reshape((5, 5, 5)) + + atol = 1e-8 + rtol = 1e-5 + + numpy_data = [ + (np.asarray([np.inf, 0]), np.asarray([1.0, np.inf])), + (np.asarray([np.inf, 0]), np.asarray([1.0, 0])), + (np.asarray([np.inf, np.inf]), np.asarray([1.0, np.inf])), + (np.asarray([np.inf, np.inf]), np.asarray([1.0, 0.0])), + (np.asarray([-np.inf, 0.0]), np.asarray([np.inf, 0.0])), + (np.asarray([np.nan, 0.0]), np.asarray([np.nan, 0.0])), + (np.asarray([atol * 2]), np.asarray([0.0])), + (np.asarray([1.0]), np.asarray([1 + rtol + atol * 2])), + (aran, aran + aran * atol + atol * 2), + (np.array([np.inf, 1.0]), np.array([0.0, np.inf])) + ] + + for (x, y) in numpy_data: + self.assertEqual(pyfunc(x, y), cfunc(x, y)) + + def test_return_class_is_ndarray_numpy(self): + # https://github.com/numpy/numpy/blob/4adc87dff15a247e417d50f10cc4def8e1c17a03/numpy/core/tests/test_numeric.py#L2460-L2468 # noqa: E501 + + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + self.assertTrue(type(cfunc(a, a)) is bool) + + def test_equalnan_numpy(self): + # https://github.com/numpy/numpy/blob/4adc87dff15a247e417d50f10cc4def8e1c17a03/numpy/core/tests/test_numeric.py#L2456-L2458 # noqa: E501 + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + x = np.array([1.0, np.nan]) + + self.assertEqual(pyfunc(x, x, equal_nan=True), + cfunc(x, x, equal_nan=True)) + + def test_no_parameter_modification_numpy(self): + # https://github.com/numpy/numpy/blob/4adc87dff15a247e417d50f10cc4def8e1c17a03/numpy/core/tests/test_numeric.py#L2443-L2448 # noqa: E501 + + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + + cfunc(x, y) + np.testing.assert_array_equal(x, np.array([np.inf, 1])) + np.testing.assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int_numpy(self): + # https://github.com/numpy/numpy/blob/4adc87dff15a247e417d50f10cc4def8e1c17a03/numpy/core/tests/test_numeric.py#L2450-L2454 # noqa: E501 + + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + + self.assertEqual(pyfunc(a, a), cfunc(a, a)) + + def test_allclose_exception(self): + self.disable_leak_check() + + pyfunc = np_allclose + cfunc = jit(nopython=True)(pyfunc) + + inps = [ + (np.asarray([1e10, 1e-9, np.nan]), + np.asarray([1.0001e10, 1e-9]), + 1e-05, 1e-08, False, + "shape mismatch: objects cannot be broadcast to a single shape", + ValueError), + ('hello', 3, False, 1e-08, False, + 'The first argument "a" must be array-like', + TypingError), + (3, 'hello', False, 1e-08, False, + 'The second argument "b" must be array-like', + TypingError), + (2, 3, False, 1e-08, False, + 'The third argument "rtol" must be a floating point', + TypingError), + (2, 3, 1e-05, False, False, + 'The fourth argument "atol" must be a floating point', + TypingError), + (2, 3, 1e-05, 1e-08, 1, + 'The fifth argument "equal_nan" must be a boolean', + TypingError), + ] + + for a, b, rtol, atol, equal_nan, exc_msg, exc in inps: + with self.assertRaisesRegex(exc, exc_msg): + cfunc(a, b, rtol, atol, equal_nan) + + def test_interp_basic(self): + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc, abs_tol=1e-10) + + x = np.linspace(-5, 5, 25) + xp = np.arange(-4, 8) + fp = xp + 1.5 + _check(params={'x': x, 'xp': xp, 'fp': fp}) + self.rnd.shuffle(x) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + self.rnd.shuffle(fp) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x[:5] = np.nan + x[-5:] = np.inf + self.rnd.shuffle(x) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + fp[:5] = np.nan + fp[-5:] = -np.inf + self.rnd.shuffle(fp) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.arange(-4, 8) + xp = x + 1 + fp = x + 2 + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = (2.2, 3.3, -5.0) + xp = (2, 3, 4) + fp = (5, 6, 7) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = ((2.2, 3.3, -5.0), (1.2, 1.3, 4.0)) + xp = np.linspace(-4, 4, 10) + fp = np.arange(-5, 5) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.array([1.4, np.nan, np.inf, -np.inf, 0.0, -9.1]) + x = x.reshape(3, 2, order='F') + xp = np.linspace(-4, 4, 10) + fp = np.arange(-5, 5) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + for x in range(-2, 4): + xp = [0, 1, 2] + fp = (3, 4, 5) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.array([]) + xp = [0, 1, 2] + fp = (3, 4, 5) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.linspace(0, 25, 60).reshape(3, 4, 5) + xp = np.arange(20) + fp = xp - 10 + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.nan + xp = np.arange(5) + fp = np.full(5, np.nan) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.nan + xp = [3] + fp = [4] + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.arange(-4, 8) + xp = x + fp = x + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = [True, False] + xp = np.arange(-4, 8) + fp = xp + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = [-np.inf, -1.0, 0.0, 1.0, np.inf] + xp = np.arange(-4, 8) + fp = xp * 2.2 + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.linspace(-10, 10, 10) + xp = np.array([-np.inf, -1.0, 0.0, 1.0, np.inf]) + fp = xp * 2.2 + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = self.rnd.randn(100) + xp = np.linspace(-3, 3, 100) + fp = np.full(100, fill_value=3.142) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + for factor in 1, -1: + x = np.array([5, 6, 7]) * factor + xp = [1, 2] + fp = [3, 4] + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = 1 + xp = [1] + fp = [True] + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + out = cfunc(x0, x, y) + np.testing.assert_almost_equal(out, x0) + + x = np.array([1, 2, 3, 4]) + xp = np.array([1, 2, 3, 4]) + fp = np.array([1, 2, 3.01, 4]) + _check(params={'x': x, 'xp': xp, 'fp': fp}) + + xp = [1] + fp = [np.inf] + _check(params={'x': 1, 'xp': xp, 'fp': fp}) + + x = np.array([1, 2, 2.5, 3, 4]) + xp = np.array([1, 2, 3, 4]) + fp = np.array([1, 2, np.nan, 4]) + _check({'x': x, 'xp': xp, 'fp': fp}) + + x = np.array([1, 1.5, 2, 2.5, 3, 4, 4.5, 5, 5.5]) + xp = np.array([1, 2, 3, 4, 5]) + fp = np.array([np.nan, 2, np.nan, 4, np.nan]) + _check({'x': x, 'xp': xp, 'fp': fp}) + + x = np.array([1, 2, 2.5, 3, 4]) + xp = np.array([1, 2, 3, 4]) + fp = np.array([1, 2, np.inf, 4]) + _check({'x': x, 'xp': xp, 'fp': fp}) + + x = np.array([1, 1.5, np.nan, 2.5, -np.inf, 4, 4.5, 5, np.inf, 0, 7]) + xp = np.array([1, 2, 3, 4, 5, 6]) + fp = np.array([1, 2, np.nan, 4, 3, np.inf]) + _check({'x': x, 'xp': xp, 'fp': fp}) + + x = np.array([3.10034867, 3.0999066, 3.10001529]) + xp = np.linspace(0, 10, 1 + 20000) + fp = np.sin(xp / 2.0) + _check({'x': x, 'xp': xp, 'fp': fp}) + + x = self.rnd.uniform(0, 2 * np.pi, (100,)) + xp = np.linspace(0, 2 * np.pi, 1000) + fp = np.cos(xp) + exact = np.cos(x) + got = cfunc(x, xp, fp) + np.testing.assert_allclose(exact, got, atol=1e-5) + + # very dense calibration + x = self.rnd.randn(10) + xp = np.linspace(-10, 10, 1000) + fp = np.ones_like(xp) + _check({'x': x, 'xp': xp, 'fp': fp}) + + # very sparse calibration + x = self.rnd.randn(1000) + xp = np.linspace(-10, 10, 10) + fp = np.ones_like(xp) + _check({'x': x, 'xp': xp, 'fp': fp}) + + def _make_some_values_non_finite(self, a): + p = a.size // 100 + np.put(a, self.rnd.choice(range(a.size), p, replace=False), np.nan) + np.put(a, self.rnd.choice(range(a.size), p, replace=False), -np.inf) + np.put(a, self.rnd.choice(range(a.size), p, replace=False), np.inf) + + def arrays(self, ndata): + # much_finer_grid + yield np.linspace(2.0, 7.0, 1 + ndata * 5) + # finer_grid + yield np.linspace(2.0, 7.0, 1 + ndata) + # similar_grid + yield np.linspace(2.1, 6.8, 1 + ndata // 2) + # coarser_grid + yield np.linspace(2.1, 7.5, 1 + ndata // 2) + # much_coarser_grid + yield np.linspace(1.1, 9.5, 1 + ndata // 5) + # finer_stretched_grid + yield np.linspace(3.1, 5.3, 1 + ndata) * 1.09 + # similar_stretched_grid + yield np.linspace(3.1, 8.3, 1 + ndata // 2) * 1.09 + # finer_compressed_grid + yield np.linspace(3.1, 5.3, 1 + ndata) * 0.91 + # similar_compressed_grid + yield np.linspace(3.1, 8.3, 1 + ndata // 2) * 0.91 + # warped_grid + yield np.linspace(3.1, 5.3, 1 + ndata // 2) + 0.3 * np.sin( + np.arange(1 + ndata / 2) * np.pi / (1 + ndata / 2)) + # very_low_noise_grid + yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal( + size=1 + ndata, scale=0.5 / ndata) + # low_noise_grid + yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal( + size=1 + ndata, scale=2.0 / ndata) + # med_noise_grid + yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal( + size=1 + ndata, scale=5.0 / ndata) + # high_noise_grid + yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal( + size=1 + ndata, scale=20.0 / ndata) + # very_high_noise_grid + yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal( + size=1 + ndata, scale=50.0 / ndata) + # extreme_noise_grid + yield np.linspace(3.1, 5.3, 1 + ndata) + self.rnd.normal( + size=1 + ndata, scale=200.0 / ndata) + # random_fine_grid + yield self.rnd.rand(1 + ndata) * 9.0 + 0.6 + # random_grid + yield self.rnd.rand(1 + ndata * 2) * 4.0 + 1.3 + + def test_interp_stress_tests(self): + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + + ndata = 20000 + xp = np.linspace(0, 10, 1 + ndata) + fp = np.sin(xp / 2.0) + + for x in self.arrays(ndata): + atol = 1e-14 # using abs_tol as otherwise fails on 32bit builds + + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + # no longer require xp to be monotonically increasing + # (in keeping with numpy) even if the output might not + # be meaningful; shuffle all inputs + self.rnd.shuffle(x) + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + self.rnd.shuffle(xp) + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + self.rnd.shuffle(fp) + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + # add some values non finite + self._make_some_values_non_finite(x) + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + self._make_some_values_non_finite(xp) + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + self._make_some_values_non_finite(fp) + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got, abs_tol=atol) + + @unittest.skipIf(IS_NUMPY_2 and IS_MACOS_ARM64, "NEP 50 interaction issue.") + def test_interp_complex_stress_tests(self): + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + + ndata = 2000 + xp = np.linspace(0, 10, 1 + ndata) + + real = np.sin(xp / 2.0) + real[:200] = self.rnd.choice([np.inf, -np.inf, np.nan], 200) + self.rnd.shuffle(real) + + imag = np.cos(xp / 2.0) + imag[:200] = self.rnd.choice([np.inf, -np.inf, np.nan], 200) + self.rnd.shuffle(imag) + + fp = real + 1j * imag + + for x in self.arrays(ndata): + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + np.testing.assert_allclose(expected, got, equal_nan=True) + + self.rnd.shuffle(x) + self.rnd.shuffle(xp) + self.rnd.shuffle(fp) + np.testing.assert_allclose(expected, got, equal_nan=True) + + def test_interp_exceptions(self): + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + x = np.array([1, 2, 3]) + xp = np.array([]) + fp = np.array([]) + + with self.assertRaises(ValueError) as e: + cfunc(x, xp, fp) + + msg = "array of sample points is empty" + self.assertIn(msg, str(e.exception)) + + x = 1 + xp = np.array([1, 2, 3]) + fp = np.array([1, 2]) + + with self.assertRaises(ValueError) as e: + cfunc(x, xp, fp) + + msg = "fp and xp are not of the same size." + self.assertIn(msg, str(e.exception)) + + x = 1 + xp = np.arange(6).reshape(3, 2) + fp = np.arange(6) + + with self.assertTypingError() as e: + cfunc(x, xp, fp) + + msg = "xp must be 1D" + self.assertIn(msg, str(e.exception)) + + x = 1 + xp = np.arange(6) + fp = np.arange(6).reshape(3, 2) + + with self.assertTypingError() as e: + cfunc(x, xp, fp) + + msg = "fp must be 1D" + self.assertIn(msg, str(e.exception)) + + x = 1 + 1j + xp = np.arange(6) + fp = np.arange(6) + + with self.assertTypingError() as e: + cfunc(x, xp, fp) + + complex_dtype_msg = ( + "Cannot cast array data from complex dtype " + "to float64 dtype" + ) + self.assertIn(complex_dtype_msg, str(e.exception)) + + x = 1 + xp = (np.arange(6) + 1j).astype(np.complex64) + fp = np.arange(6) + + with self.assertTypingError() as e: + cfunc(x, xp, fp) + + self.assertIn(complex_dtype_msg, str(e.exception)) + + def test_interp_non_finite_calibration(self): + # examples from + # https://github.com/numpy/numpy/issues/12951 + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + _check = partial(self._check_output, pyfunc, cfunc) + + xp = np.array([0, 1, 9, 10]) + fp = np.array([-np.inf, 0.1, 0.9, np.inf]) + x = np.array([0.2, 9.5]) + params = {'x': x, 'xp': xp, 'fp': fp} + _check(params) + + xp = np.array([-np.inf, 1, 9, np.inf]) + fp = np.array([0, 0.1, 0.9, 1]) + x = np.array([0.2, 9.5]) + params = {'x': x, 'xp': xp, 'fp': fp} + _check(params) + + def test_interp_supplemental_tests(self): + # inspired by class TestInterp + # https://github.com/numpy/numpy/blob/f5b6850f231/numpy/lib/tests/test_function_base.py # noqa: E501 + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = cfunc(incpts, xp, yp) + decres = cfunc(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + np.testing.assert_almost_equal(incres, inctgt) + np.testing.assert_almost_equal(decres, dectgt) + + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + np.testing.assert_almost_equal(cfunc(x0, x, y), x0) + x0 = 0.3 + np.testing.assert_almost_equal(cfunc(x0, x, y), x0) + x0 = np.float32(0.3) + np.testing.assert_almost_equal(cfunc(x0, x, y), x0) + x0 = np.float64(0.3) + np.testing.assert_almost_equal(cfunc(x0, x, y), x0) + x0 = np.nan + np.testing.assert_almost_equal(cfunc(x0, x, y), x0) + + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(0.3) + np.testing.assert_almost_equal(cfunc(x0, x, y), x0) + + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + np.testing.assert_almost_equal(cfunc(np.pi, xp, fp), 0.0) + + def test_interp_supplemental_complex_tests(self): + # inspired by class TestInterp + # https://github.com/numpy/numpy/blob/f5b6850f231/numpy/lib/tests/test_function_base.py # noqa: E501 + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j + x0 = 0.3 + y0 = x0 + (1 + x0) * 1.0j + np.testing.assert_almost_equal(cfunc(x0, x, y), y0) + + def test_interp_float_precision_handled_per_numpy(self): + # test cases from https://github.com/numba/numba/issues/4890 + pyfunc = interp + cfunc = jit(nopython=True)(pyfunc) + dtypes = [np.float32, np.float64, np.int32, np.int64] + + for combo in itertools.combinations_with_replacement(dtypes, 3): + xp_dtype, fp_dtype, x_dtype = combo + xp = np.arange(10, dtype=xp_dtype) + fp = (xp ** 2).astype(fp_dtype) + x = np.linspace(2, 3, 10, dtype=x_dtype) + + expected = pyfunc(x, xp, fp) + got = cfunc(x, xp, fp) + self.assertPreciseEqual(expected, got) + + def test_isnat(self): + def values(): + yield np.datetime64("2016-01-01") + yield np.datetime64("NaT") + yield np.datetime64('NaT', 'ms') + yield np.datetime64('NaT', 'ns') + yield np.datetime64('2038-01-19T03:14:07') + + yield np.timedelta64('NaT', "ms") + yield np.timedelta64(34, "ms") + + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'ns', 'ps', 'fs', 'as']: + yield np.array([123, -321, "NaT"], + dtype=' 5], [x, x ** 2], 0)) + # test with two tuples + test_cases.append(((x < 3, x > 5), (x, x ** 2), 0)) + # test with one list and one tuple + test_cases.append(([x < 3, x > 5], (x, x ** 2), 0)) + # test with one tuple and one list + test_cases.append(((x < 3, x > 5), [x, x ** 2], 0)) + + for condlist, choicelist, default in test_cases: + self.assertPreciseEqual(np_pyfunc(condlist, choicelist, default), + np_nbfunc(condlist, choicelist, default)) + + np_pyfunc_defaults = np_select_defaults + np_nbfunc_defaults = njit(np_select_defaults) + # check the defaults work, using whatever the last input was + self.assertPreciseEqual(np_pyfunc_defaults(condlist, choicelist), + np_nbfunc_defaults(condlist, choicelist)) + + def test_select_exception(self): + np_nbfunc = njit(np_select) + x = np.arange(10) + self.disable_leak_check() + for condlist, choicelist, default, expected_error, expected_text in [ + # Each test case below is one tuple. + # Each tuple is separated by the description of the intended error + + # passing condlist of dim zero + ([np.array(True), np.array([False, True, False])], + [np.array(1), np.arange(12).reshape(4, 3)], 0, + TypingError, "condlist arrays must be of at least dimension 1"), + # condlist and choicelist with different dimensions + ([np.array(True), np.array(False)], [np.array([1]), np.array([2])], + 0, TypingError, "condlist and choicelist elements must have the " + "same number of dimensions"), + # condlist and choicelist with different dimensions + ([np.array([True]), np.array([False])], + [np.array([[1]]), np.array([[2]])], 0, TypingError, + "condlist and choicelist elements must have the " + "same number of dimensions"), + # passing choicelist of dim zero + ([np.array(True), np.array(False)], [np.array(1), np.array(2)], 0, + TypingError, "condlist arrays must be of at least dimension 1"), + # passing an array as condlist instead of a list or tuple + (np.isnan(np.array([1, 2, 3, np.nan, 5, 7])), + np.array([1, 2, 3, np.nan, 5, 7]), 0, TypingError, + "condlist must be a List or a Tuple"), + # default is a list + ([True], [0], [0], TypingError, + "default must be a scalar"), + # condlist with ints instead of booleans + ([(x < 3).astype(int), (x > 5).astype(int)], [x, x ** 2], 0, + TypingError, "condlist arrays must contain booleans"), + # condlist and choicelist of different length + ([x > 9, x > 8, x > 7, x > 6], [x, x**2, x], 0, ValueError, + "list of cases must be same length as list of conditions"), + + # condlist contains tuples instead of arrays + # if in the future numba's np.where accepts tuples, the + # implementation of np.select should also accept them and + # the following two test cases should be normal tests + # instead of negative tests + + # test with lists of length 100 of tuples of length 1 for condlist + ([(False,)] * 100, [np.array([1])] * 100, 0, TypingError, + 'items of condlist must be arrays'), + # test with lists of length 100 of tuples of length 1 for choicelist + ([np.array([False])] * 100, [(1,)] * 100, 0, TypingError, + 'items of choicelist must be arrays'), + ]: + with self.assertRaises(expected_error) as e: + np_nbfunc(condlist, choicelist, default) + self.assertIn(expected_text, str(e.exception)) + + def test_windowing(self): + def check_window(func): + np_pyfunc = func + np_nbfunc = njit(func) + + for M in [0, 1, 5, 12]: + expected = np_pyfunc(M) + got = np_nbfunc(M) + self.assertPreciseEqual(expected, got, prec='double') + + for M in ['a', 1.1, 1j]: + with self.assertRaises(TypingError) as raises: + np_nbfunc(1.1) + self.assertIn("M must be an integer", str(raises.exception)) + + check_window(np_bartlett) + check_window(np_blackman) + check_window(np_hamming) + check_window(np_hanning) + + # Test np.kaiser separately + np_pyfunc = np_kaiser + np_nbfunc = njit(np_kaiser) + + for M in [0, 1, 5, 12]: + for beta in [0.0, 5.0, 14.0]: + expected = np_pyfunc(M, beta) + got = np_nbfunc(M, beta) + + if IS_32BITS or platform.machine() in ['ppc64le', 'aarch64']: + self.assertPreciseEqual(expected, + got, prec='double', ulps=2) + else: + self.assertPreciseEqual(expected, got, prec='double', + ulps=2) + + for M in ['a', 1.1, 1j]: + with self.assertRaises(TypingError) as raises: + np_nbfunc(M, 1.0) + self.assertIn("M must be an integer", str(raises.exception)) + + for beta in ['a', 1j]: + with self.assertRaises(TypingError) as raises: + np_nbfunc(5, beta) + self.assertIn("beta must be an integer or float", + str(raises.exception)) + + def test_cross(self): + pyfunc = np_cross + cfunc = jit(nopython=True)(pyfunc) + pairs = [ + # 3x3 (n-dims) + ( + np.array([[1, 2, 3], [4, 5, 6]]), + np.array([[4, 5, 6], [1, 2, 3]]) + ), + # 2x3 array-like (n-dims) + ( + np.array([[1, 2, 3], [4, 5, 6]]), + ((4, 5), (1, 2)) + ), + # 3x3 (1-dim) with type promotion + ( + np.array([1, 2, 3], dtype=np.int64), + np.array([4, 5, 6], dtype=np.float64) + ), + # 3x3 array-like (1-dim) + ( + (1, 2, 3), + (4, 5, 6) + ), + # 2x3 (1-dim) + ( + np.array([1, 2]), + np.array([4, 5, 6]) + ), + # 3x3 (with broadcasting 1d x 2d) + ( + np.array([1, 2, 3]), + np.array([[4, 5, 6], [1, 2, 3]]) + ), + # 3x3 (with broadcasting 2d x 1d) + ( + np.array([[1, 2, 3], [4, 5, 6]]), + np.array([1, 2, 3]) + ), + # 3x2 (with higher order broadcasting) + ( + np.arange(36).reshape(6, 2, 3), + np.arange(4).reshape(2, 2) + ) + ] + + for x, y in pairs: + expected = pyfunc(x, y) + got = cfunc(x, y) + self.assertPreciseEqual(expected, got) + + def test_cross_exceptions(self): + pyfunc = np_cross + cfunc = jit(nopython=True)(pyfunc) + self.disable_leak_check() + + # test incompatible dimensions for ndim == 1 + with self.assertRaises(ValueError) as raises: + cfunc( + np.arange(4), + np.arange(3) + ) + self.assertIn( + 'Incompatible dimensions for cross product', + str(raises.exception) + ) + + # test 2d cross product error for ndim == 1 + with self.assertRaises(ValueError) as raises: + cfunc( + np.array((1, 2)), + np.array((3, 4)) + ) + self.assertIn( + 'Dimensions for both inputs is 2.', + str(raises.exception) + ) + + self.assertIn( + '`cross2d(a, b)` from `numba.np.extensions`.', + str(raises.exception) + ) + + # test incompatible dimensions for ndim > 1 + with self.assertRaises(ValueError) as raises: + cfunc( + np.arange(8).reshape((2, 4)), + np.arange(6)[::-1].reshape((2, 3)) + ) + self.assertIn( + 'Incompatible dimensions for cross product', + str(raises.exception) + ) + + # test 2d cross product error for ndim == 1 + with self.assertRaises(ValueError) as raises: + cfunc( + np.arange(8).reshape((4, 2)), + np.arange(8)[::-1].reshape((4, 2)) + ) + self.assertIn( + 'Dimensions for both inputs is 2', + str(raises.exception) + ) + + # test non-array-like input + with self.assertRaises(TypingError) as raises: + cfunc( + set([1, 2, 3]), + set([4, 5, 6]) + ) + self.assertIn( + 'Inputs must be array-like.', + str(raises.exception) + ) + + def test_cross2d(self): + pyfunc = np_cross + cfunc = njit(nb_cross2d) + pairs = [ + # 2x2 (n-dims) + ( + np.array([[1, 2], [4, 5]]), + np.array([[4, 5], [1, 2]]) + ), + # 2x2 array-like (n-dims) + ( + np.array([[1, 2], [4, 5]]), + ((4, 5), (1, 2)) + ), + # 2x2 (1-dim) with type promotion + ( + np.array([1, 2], dtype=np.int64), + np.array([4, 5], dtype=np.float64) + ), + # 2x2 array-like (1-dim) + ( + (1, 2), + (4, 5) + ), + # 2x2 (with broadcasting 1d x 2d) + ( + np.array([1, 2]), + np.array([[4, 5], [1, 2]]) + ), + # 2x2 (with broadcasting 2d x 1d) + ( + np.array([[1, 2], [4, 5]]), + np.array([1, 2]) + ), + # 2x2 (with higher order broadcasting) + ( + np.arange(36).reshape(6, 3, 2), + np.arange(6).reshape(3, 2) + ) + ] + + for x, y in pairs: + expected = pyfunc(x, y) + got = cfunc(x, y) + self.assertPreciseEqual(expected, got) + + def test_cross2d_exceptions(self): + cfunc = njit(nb_cross2d) + self.disable_leak_check() + + # test incompatible dimensions for ndim == 1 + with self.assertRaises(ValueError) as raises: + cfunc( + np.array((1, 2, 3)), + np.array((4, 5, 6)) + ) + self.assertIn( + 'Incompatible dimensions for 2D cross product', + str(raises.exception) + ) + + # test incompatible dimensions for ndim > 1 + with self.assertRaises(ValueError) as raises: + cfunc( + np.arange(6).reshape((2, 3)), + np.arange(6)[::-1].reshape((2, 3)) + ) + self.assertIn( + 'Incompatible dimensions for 2D cross product', + str(raises.exception) + ) + + # test non-array-like input + with self.assertRaises(TypingError) as raises: + cfunc( + set([1, 2]), + set([4, 5]) + ) + self.assertIn( + 'Inputs must be array-like.', + str(raises.exception) + ) + + def test_trim_zeros(self): + + def arrays(): + yield np.array([]) + yield np.zeros(5) + yield np.zeros(1) + yield np.array([1, 2, 3]) + yield np.array([0, 1, 2, 3]) + yield np.array([0., 1., 2., np.nan, 0.]) + yield np.array(['0', 'Hello', 'world']) + + def explicit_trim(): + yield np.array([0, 1, 2, 0, 0]), 'FB' + yield np.array([0, 1, 2]), 'B' + yield np.array([np.nan, 0., 1.2, 2.3, 0.]), 'b' + yield np.array([0, 0, 1, 2, 5]), 'f' + if numpy_version < (2, 2): + # abf and d are not supported in numpy >= 2.2 + yield np.array([0, 1, 2, 0]), 'abf' + yield np.array([0, 4, 0]), 'd' + + yield np.array(['\0', '1', '2']), 'f' + + pyfunc = np_trim_zeros + cfunc = jit(nopython=True)(pyfunc) + + for arr in arrays(): + expected = pyfunc(arr) + got = cfunc(arr) + self.assertPreciseEqual(expected, got) + + for arr, trim in explicit_trim(): + expected = pyfunc(arr, trim) + got = cfunc(arr, trim) + self.assertPreciseEqual(expected, got) + + def test_trim_zeros_numpy(self): + # https://github.com/numpy/numpy/blob/9d8d46ad615a7e13256b930146ac369f651016c0/numpy/lib/tests/test_function_base.py#L1251-L1313 + a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) + b = a.astype(float) + c = a.astype(complex) + # d = a.astype(object) + values = [a, b, c] + + # test_basic + slc = np.s_[2:-1] + for arr in values: + res = np_trim_zeros(arr) + self.assertPreciseEqual(res, arr[slc]) + + # test_leading_skip + slc = np.s_[:-1] + for arr in values: + res = np_trim_zeros(arr, trim='b') + self.assertPreciseEqual(res, arr[slc]) + + # test_trailing_skip + slc = np.s_[2:] + for arr in values: + res = np_trim_zeros(arr, trim='F') + self.assertPreciseEqual(res, arr[slc]) + + # test_all_zero + for _arr in values: + arr = np.zeros_like(_arr, dtype=_arr.dtype) + + res1 = np_trim_zeros(arr, trim='B') + assert len(res1) == 0 + + res2 = np_trim_zeros(arr, trim='f') + assert len(res2) == 0 + + # test_size_zero + arr = np.zeros(0) + res = np_trim_zeros(arr) + self.assertPreciseEqual(arr, res) + + # test_overflow + for arr in [np.array([0, 2**62, 0]), np.array([0, 2**63, 0]), + np.array([0, 2**64, 0])]: + slc = np.s_[1:2] + res = np_trim_zeros(arr) + self.assertPreciseEqual(res, arr[slc]) + + # test_no_trim + arr = np.array([None, 1, None]) + res = np_trim_zeros(arr) + self.assertPreciseEqual(arr, res) + + # test_list_to_list + res = np_trim_zeros(a.tolist()) + assert isinstance(res, list) + + def test_trim_zeros_exceptions(self): + self.disable_leak_check() + cfunc = jit(nopython=True)(np_trim_zeros) + + with self.assertRaises(TypingError) as raises: + cfunc(np.array([[1, 2, 3], [4, 5, 6]])) + self.assertIn( + 'array must be 1D', + str(raises.exception) + ) + + with self.assertRaises(TypingError) as raises: + cfunc(3) + self.assertIn( + 'The first argument must be an array', + str(raises.exception) + ) + + with self.assertRaises(TypingError) as raises: + cfunc({0, 1, 2}) + self.assertIn( + 'The first argument must be an array', + str(raises.exception) + ) + + with self.assertRaises(TypingError) as raises: + cfunc(np.array([0, 1, 2]), 1) + self.assertIn( + 'The second argument must be a string', + str(raises.exception) + ) + + def test_union1d(self): + pyfunc = np_union1d + cfunc = jit(nopython=True)(pyfunc) + arrays = [ + # Test 1d arrays + ( + np.array([1, 2, 3]), + np.array([2, 3, 4]) + ), + # Test 2d with 1d array + ( + np.array([[1, 2, 3], [2, 3, 4]]), + np.array([2, 5, 6]) + ), + # Test 3d with 1d array + ( + np.arange(0, 20).reshape(2,2,5), + np.array([1, 20, 21]) + ), + # Test 2d with 3d array + ( + np.arange(0, 10).reshape(2,5), + np.arange(0, 20).reshape(2,5,2) + ), + # Test other array-like + ( + np.array([False, True, 7]), + np.array([1, 2, 3]) + ) + ] + + for a, b in arrays: + expected = pyfunc(a,b) + got = cfunc(a,b) + self.assertPreciseEqual(expected, got) + + def test_union1d_exceptions(self): + cfunc = jit(nopython=True)(np_union1d) + self.disable_leak_check() + + # Test inputs not array-like + with self.assertRaises(TypingError) as raises: + cfunc("Hello", np.array([1,2])) + self.assertIn( + "The arguments to np.union1d must be array-like", + str(raises.exception) + ) + with self.assertRaises(TypingError) as raises: + cfunc(np.array([1,2]), "Hello") + self.assertIn( + "The arguments to np.union1d must be array-like", + str(raises.exception) + ) + with self.assertRaises(TypingError) as raises: + cfunc("Hello", "World") + self.assertIn( + "The arguments to np.union1d must be array-like", + str(raises.exception) + ) + + # Test Unicode array exceptions + with self.assertRaises(TypingError) as raises: + cfunc(np.array(['hello', 'world']), np.array(['a', 'b'])) + self.assertIn( + "For Unicode arrays, arrays must have same dtype", + str(raises.exception) + ) + with self.assertRaises(TypingError) as raises: + cfunc(np.array(['c', 'd']), np.array(['foo', 'bar'])) + self.assertIn( + "For Unicode arrays, arrays must have same dtype", + str(raises.exception) + ) + with self.assertRaises(TypingError) as raises: + cfunc(np.array(['c', 'd']), np.array([1, 2])) + self.assertIn( + "For Unicode arrays, arrays must have same dtype", + str(raises.exception) + ) + with self.assertRaises(TypingError) as raises: + cfunc(np.array(['c', 'd']), np.array([1.1, 2.5])) + self.assertIn( + "For Unicode arrays, arrays must have same dtype", + str(raises.exception) + ) + + def test_asarray_chkfinite(self): + pyfunc = np_asarray_chkfinite + cfunc = jit(nopython=True)(pyfunc) + self.disable_leak_check() + + pairs = [ + #1D array with all args + ( + np.array([1, 2, 3]), + np.float32, + ), + #1D array + ( + np.array([1, 2, 3]), + ), + #1D array-like + ( + [1, 2, 3, 4], + ), + # 2x2 (n-dims) + ( + np.array([[1, 2], [3, 4]]), + np.float32, + ), + # 2x2 array-like (n-dims) + ( + ((1, 2), (3, 4)), + np.int64 + ), + # 2x2 (1-dim) with type promotion + ( + np.array([1, 2], dtype=np.int64), + ), + # 3x2 (with higher order broadcasting) + ( + np.arange(36).reshape(6, 2, 3), + ), + ] + + for pair in pairs: + expected = pyfunc(*pair) + got = cfunc(*pair) + self.assertPreciseEqual(expected, got) + + def test_asarray_chkfinite_exceptions(self): + cfunc = jit(nopython=True)(np_asarray_chkfinite) + self.disable_leak_check() + + #test for single value + with self.assertRaises(TypingError) as e: + cfunc(2) + msg = "The argument to np.asarray_chkfinite must be array-like" + self.assertIn(msg, str(e.exception)) + + #test for NaNs + with self.assertRaises(ValueError) as e: + cfunc(np.array([2, 4, np.nan, 5])) + self.assertIn("array must not contain infs or NaNs", str(e.exception)) + + #test for infs + with self.assertRaises(ValueError) as e: + cfunc(np.array([1, 2, np.inf, 4])) + self.assertIn("array must not contain infs or NaNs", str(e.exception)) + + #test for dtype + with self.assertRaises(TypingError) as e: + cfunc(np.array([1, 2, 3, 4]), 'float32') + self.assertIn("dtype must be a valid Numpy dtype", str(e.exception)) + + def test_unwrap_basic(self): + pyfunc = unwrap + cfunc = njit(pyfunc) + + pyfunc1 = unwrap1 + cfunc1 = njit(pyfunc1) + + pyfunc13 = unwrap13 + cfunc13 = njit(pyfunc13) + + pyfunc123 = unwrap123 + cfunc123 = njit(pyfunc123) + # Based on tests from https://github.com/numpy/numpy/blob/3032e84ff34f20def2ef4ebf9f8695947af3fd24/numpy/lib/tests/test_function_base.py#L1979-L2003 # noqa: E501 + # Additional tests are included to ensure proper support for + # higher dimensional arrays + + # p only + def inputs1(): + yield np.array([1, 1 + 2 * np.pi]) + phase = np.linspace(0, np.pi, num=5) + phase[3:] += np.pi + yield phase + yield np.arange(16).reshape((4,4)) + yield np.arange(160, step=10).reshape((4,4)) + yield np.arange(240, step=10).reshape((2,3,4)) + + for p in inputs1(): + self.assertPreciseEqual(pyfunc1(p), cfunc1(p)) + + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + + # p and period only + def inputs13(): + yield np.array([1, 1 + 256]), 255 + yield np.array([0, 75, 150, 225, 300]), 255 + yield np.array([0, 1, 2, -1, 0]), 4 + yield np.array([2, 3, 4, 5, 2, 3, 4, 5]), 4 + yield wrap_uneven, 250 + + # check that you can set axis=-1 without errors + self.assertPreciseEqual(pyfunc(wrap_uneven, axis=-1, period=250), + cfunc(wrap_uneven, axis=-1, period=250)) + + for p, period in inputs13(): + self.assertPreciseEqual(pyfunc13(p, period=period), + cfunc13(p, period=period)) + + # p, period and discont + def inputs123(): + yield wrap_uneven, 250, 140 + + for p, period, discont in inputs123(): + self.assertPreciseEqual(pyfunc123(p, period=period, + discont=discont), + cfunc123(p, period=period, + discont=discont)) + + def test_unwrap_exception(self): + cfunc = njit(unwrap) + self.disable_leak_check() + + with self.assertRaises(TypingError) as e: + cfunc('abc') + self.assertIn('The argument "p" must be array-like', + str(e.exception)) + + with self.assertRaises(TypingError) as e: + cfunc(np.array([1, 2]), 'abc') + self.assertIn('The argument "discont" must be a scalar', + str(e.exception)) + + with self.assertRaises(TypingError) as e: + cfunc(np.array([1, 2]), 3, period='abc') + self.assertIn('The argument "period" must be a scalar', + str(e.exception)) + + with self.assertRaises(TypingError) as e: + cfunc(np.array([1, 2]), 3, axis='abc') + self.assertIn('The argument "axis" must be an integer', + str(e.exception)) + + with self.assertRaises(ValueError) as e: + cfunc(np.array([1, 2]), 3, axis=2) + self.assertIn('Value for argument "axis" is not supported', + str(e.exception)) + + def test_swapaxes_basic(self): + pyfunc = swapaxes + cfunc = jit(nopython=True)(pyfunc) + + def a_variations(): + yield np.arange(10) + yield np.arange(10).reshape(2, 5) + yield np.arange(60).reshape(5, 4, 3) + + for a in a_variations(): + for a1 in range(-a.ndim, a.ndim): + for a2 in range(-a.ndim, a.ndim): + expected = pyfunc(a, a1, a2) + got = cfunc(a, a1, a2) + self.assertPreciseEqual(expected, got) + + def test_swapaxes_exception(self): + pyfunc = swapaxes + cfunc = jit(nopython=True)(pyfunc) + + # Exceptions leak references + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc('abc', 0, 0) + + self.assertIn('The first argument "a" must be an array', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(4), 'abc', 0) + + self.assertIn('The second argument "axis1" must be an integer', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.arange(4), 0, 'abc') + + self.assertIn('The third argument "axis2" must be an integer', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(np.arange(4), 1, 0) + + self.assertIn('np.swapaxes: Argument axis1 out of bounds', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc(np.arange(8).reshape(2, 4), 0, -3) + + self.assertIn('np.swapaxes: Argument axis2 out of bounds', + str(raises.exception)) + + def test_take_along_axis(self): + a = np.arange(24).reshape((3, 1, 4, 2)) + + # For now axis must be literal, test explicitly defined implementations + @njit + def axis_none(a, i): + return np.take_along_axis(a, i, axis=None) + + indices = np.array([1, 2], dtype=np.uint64) + self.assertPreciseEqual(axis_none(a, indices), + axis_none.py_func(a, indices)) + + def gen(axis): + @njit + def impl(a, i): + return np.take_along_axis(a, i, axis) + return impl + + for i in range(-1, a.ndim): + jfunc = gen(i) + ai = np.argsort(a, axis=i) + self.assertPreciseEqual(jfunc(a, ai), jfunc.py_func(a, ai)) + + def test_take_along_axis_broadcasting(self): + # Based on + # https://github.com/numpy/numpy/blob/v1.21.0/numpy/lib/tests/test_shape_base.py#L74-L79 + # This demonstrates that arrays are broadcast before the algorithm is + # applied. + arr = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + + def gen(axis): + @njit + def impl(a, i): + return np.take_along_axis(a, i, axis) + return impl + + # Check same axis but expressed as positive/negative value + for i in (1, -2): + check = gen(i) + expected = check.py_func(arr, ai) + actual = check(arr, ai) + self.assertPreciseEqual(expected, actual) + self.assertEqual(actual.shape, (3, 2, 5)) + + def test_take_along_axis_exceptions(self): + arr2d = np.arange(8).reshape(2, 4) + # Valid indices when axis=None is passed to take_along_axis: + indices_none = np.array([0, 1], dtype=np.uint64) + indices = np.ones((2, 4), dtype=np.uint64) + + # For now axis must be literal, so we need to construct functions with + # explicit axis: + def gen(axis): + @njit + def impl(a, i): + return np.take_along_axis(a, i, axis) + return impl + + with self.assertRaises(TypingError) as raises: + gen("a")(arr2d, indices) + self.assertIn("axis must be an integer", str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + gen(-3)(arr2d, indices) + self.assertIn("axis is out of bounds", str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + gen(2)(arr2d, indices) + self.assertIn("axis is out of bounds", str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + gen(None)(12, indices_none) + self.assertIn('"arr" must be an array', str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + gen(None)(arr2d, 5) + self.assertIn('"indices" must be an array', str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + gen(None)(arr2d, np.array([0.0, 1.0])) + self.assertIn( + 'indices array must contain integers', + str(raises.exception) + ) + + @njit + def not_literal_axis(a, i, axis): + return np.take_along_axis(a, i, axis) + + with self.assertRaises(TypingError) as raises: + not_literal_axis(arr2d, indices, 0) + self.assertIn("axis must be a literal value", str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + gen(0)(arr2d, np.array([0, 1], dtype=np.uint64)) + self.assertIn("must have the same number of dimensions", + str(raises.exception)) + + # With axis None, array's ndim is implicitly 1. + with self.assertRaises(TypingError) as raises: + gen(None)(arr2d, arr2d) + self.assertIn("must have the same number of dimensions", + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + gen(0)(arr2d, np.ones((2, 3), dtype=np.uint64)) + self.assertIn("dimensions don't match", str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_nan_to_num(self): + # Test cases are from + # https://github.com/numpy/numpy/blob/8ff45c5bb520db04af8720bf1d34a392a8d2561a/numpy/lib/tests/test_type_check.py#L350-L452 + values = [ + np.nan, + 1, + 1.1, + 1 + 1j, + complex(-np.inf, np.nan), + complex(np.nan, np.nan), + np.array([1], dtype=int), + np.array([complex(-np.inf, np.inf), complex(1, np.nan), + complex(np.nan, 1), complex(np.inf, -np.inf)]), + np.array([0.1, 1.0, 0.4]), + np.array([1, 2, 3]), + np.array([[0.1, 1.0, 0.4], [0.4, 1.2, 4.0]]), + np.array([0.1, np.nan, 0.4]), + np.array([[0.1, np.nan, 0.4], [np.nan, 1.2, 4.0]]), + np.array([-np.inf, np.nan, np.inf]), + np.array([-np.inf, np.nan, np.inf], dtype=np.float32) + ] + nans = [0.0, 10] + + pyfunc = nan_to_num + cfunc = njit(nan_to_num) + + for value, nan in product(values, nans): + expected = pyfunc(value, nan=nan) + got = cfunc(value, nan=nan) + self.assertPreciseEqual(expected, got) + + def test_nan_to_num_copy_false(self): + # Check that copy=False operates in-place. + cfunc = njit(nan_to_num) + + x = np.array([0.1, 0.4, np.nan]) + expected = 1.0 + cfunc(x, copy=False, nan=expected) + self.assertPreciseEqual(x[-1], expected) + + x_complex = np.array([0.1, 0.4, complex(np.nan, np.nan)]) + cfunc(x_complex, copy=False, nan=expected) + self.assertPreciseEqual(x_complex[-1], 1. + 1.j) + + def test_nan_to_num_invalid_argument(self): + cfunc = njit(nan_to_num) + + with self.assertTypingError() as raises: + cfunc("invalid_input") + self.assertIn("The first argument must be a scalar or an array-like", + str(raises.exception)) + + def test_diagflat_basic(self): + pyfunc1 = diagflat1 + cfunc1 = njit(pyfunc1) + pyfunc2 = diagflat2 + cfunc2 = njit(pyfunc2) + + def inputs(): + yield np.array([1,2]), 1 + yield np.array([[1,2],[3,4]]), -2 + yield np.arange(8).reshape((2,2,2)), 2 + yield [1, 2], 1 + yield np.array([]), 1 + + for v, k in inputs(): + self.assertPreciseEqual(pyfunc1(v), cfunc1(v)) + self.assertPreciseEqual(pyfunc2(v, k), cfunc2(v, k)) + + def test_diagflat1_exception(self): + pyfunc = diagflat1 + cfunc = njit(pyfunc) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc") + self.assertIn('The argument "v" must be array-like', + str(raises.exception)) + + def test_diagflat2_exception(self): + pyfunc = diagflat2 + cfunc = njit(pyfunc) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc", 2) + self.assertIn('The argument "v" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc([1, 2], "abc") + self.assertIn('The argument "k" must be an integer', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc([1, 2], 3.0) + self.assertIn('The argument "k" must be an integer', + str(raises.exception)) + + @staticmethod + def _setxor_arrays(): + yield (List.empty_list(types.float64), + List.empty_list(types.float64)) # two empty arrays + yield [1], List.empty_list(types.float64) # empty right + yield List.empty_list(types.float64), [1] # empty left + yield [1], [2] # singletons - xor == union + yield [1], [1] # singletons - xor == nothing + yield [1, 2], [1] + yield [1, 2, 2], [2, 2] + yield [1, 2, 2], [2, 2, 3] + yield [1, 2], [2, 1] + yield [1, 2, 3], [1, 2, 3] + yield [2, 3, 4, 0], [1, 3] + # from numpy: + # https://github.com/numpy/numpy/blob/b0371ef240560e78b651a5d7c9407ae3212a3d56/numpy/lib/tests/test_arraysetops.py#L86 # noqa: E501 + yield [5, 7, 1, 2], [2, 4, 3, 1, 5] + yield [1, 2, 3], [6, 5, 4] + yield [1, 8, 2, 3], [1, 2, 3, 4, 5, 6] + + def test_setxor1d_2(self): + np_pyfunc = np_setxor1d_2 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2) + got = np_nbfunc(ar1, ar2) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._setxor_arrays(): + check(a, b) + + def test_setxor1d_3(self): + np_pyfunc = np_setxor1d_3 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, assume_unique=False): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2, assume_unique) + got = np_nbfunc(ar1, ar2, assume_unique) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._setxor_arrays(): + check(a, b) + if len(np.unique(a)) == len(a) and len(np.unique(b)) == len(b): + check(a, b, True) + + def test_setxor1d_errors(self): + np_pyfunc = np_setxor1d_3 + np_nbfunc = njit(np_pyfunc) + + a = np.array([1]) + b = np.array([2]) + self.disable_leak_check() + with self.assertRaises(TypingError): + np_nbfunc(a, b, "foo") + with self.assertRaises(TypingError): + np_nbfunc("foo", b, True) + with self.assertRaises(TypingError): + np_nbfunc(a, "foo", True) + + @staticmethod + def _setdiff_arrays(): + yield (List.empty_list(types.float64), + List.empty_list(types.float64)) # two empty arrays + yield [1], List.empty_list(types.float64) # empty right + yield List.empty_list(types.float64), [1] # empty left + yield [1], [2] # singletons - diff == [1] + yield [1], [1] # singletons - diff == nothing + yield [1, 2], [1] + yield [1, 2, 2], [2, 2] + yield [1, 2, 2], [2, 2, 3] + yield [1, 2], [2, 1] + yield [1, 2, 3], [1, 2, 3] + yield [2, 3, 4, 0], [1, 3] + + # https://github.com/numpy/numpy/blob/b0371ef240560e78b651a5d7c9407ae3212a3d56/numpy/lib/tests/test_arraysetops.py#L558 # noqa: E501 + yield (np.array([6, 5, 4, 7, 1, 2, 7, 4]), + np.array([2, 4, 3, 3, 2, 1, 5])) + yield np.arange(21), np.arange(19) + yield np.array([3, 2, 1]), np.array([7, 5, 2]) + + def test_setdiff1d_2(self): + np_pyfunc = np_setdiff1d_2 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2) + got = np_nbfunc(ar1, ar2) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._setdiff_arrays(): + check(a, b) + + def test_setdiff1d_3(self): + np_pyfunc = np_setdiff1d_3 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, assume_unique=False): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2, assume_unique) + got = np_nbfunc(ar1, ar2, assume_unique) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._setdiff_arrays(): + check(a, b) + if len(np.unique(a)) == len(a) and len(np.unique(b)) == len(b): + check(a, b, True) + + def test_setdiff1d_errors(self): + np_pyfunc = np_setdiff1d_3 + np_nbfunc = njit(np_pyfunc) + + a = np.array([1]) + b = np.array([2]) + self.disable_leak_check() + with self.assertRaises(TypingError): + np_nbfunc(a, b, "foo") + with self.assertRaises(TypingError): + np_nbfunc("foo", b, True) + with self.assertRaises(TypingError): + np_nbfunc(a, "foo", True) + + @staticmethod + def _in1d_arrays(): + yield (List.empty_list(types.float64), + List.empty_list(types.float64)) # two empty arrays + yield [1], List.empty_list(types.float64) # empty right + yield List.empty_list(types.float64), [1] # empty left + yield [1], [2] # singletons - False + yield [1], [1] # singletons - True + yield [1, 2], [1] + yield [1, 2, 2], [2, 2] + yield [1, 2, 2], [2, 2, 3] + yield [1, 2], [2, 1] + yield [1, 2, 3], [1, 2, 3] + yield [2, 3, 4, 0], [3, 1] + yield [2, 3], np.arange(20) # Test the "sorting" method. + yield [2, 3], np.tile(np.arange(5), 4) + + def test_in1d_2(self): + np_pyfunc = np_in1d_2 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2) + got = np_nbfunc(ar1, ar2) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._in1d_arrays(): + check(a, b) + + def test_in1d_3a(self): + np_pyfunc = np_in1d_3a + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, assume_unique=False): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2, assume_unique) + got = np_nbfunc(ar1, ar2, assume_unique) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._in1d_arrays(): + check(a, b) + if len(np.unique(a)) == len(a) and len(np.unique(b)) == len(b): + check(a, b, assume_unique=True) + + def test_in1d_3b(self): + np_pyfunc = np_in1d_3b + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, invert=False): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2, invert) + got = np_nbfunc(ar1, ar2, invert) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._in1d_arrays(): + check(a, b, invert=False) + check(a, b, invert=True) + + def test_in1d_4(self): + np_pyfunc = np_in1d_4 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, assume_unique=False, invert=False): + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + expected = np_pyfunc(ar1, ar2, assume_unique, invert) + got = np_nbfunc(ar1, ar2, assume_unique, invert) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._in1d_arrays(): + check(a, b, invert=False) + check(a, b, invert=True) + if len(np.unique(a)) == len(a) and len(np.unique(b)) == len(b): + check(a, b, assume_unique=True, invert=False) + check(a, b, assume_unique=True, invert=True) + + def test_in1d_errors(self): + np_pyfunc = np_in1d_4 + np_nbfunc = njit(np_pyfunc) + + a = np.array([1]) + b = np.array([2]) + x = np_nbfunc(a, b) + self.assertPreciseEqual(x, np.array([False])) + + self.disable_leak_check() + with self.assertRaises(TypingError): + np_nbfunc(a, b, "foo", False) + with self.assertRaises(TypingError): + np_nbfunc(a, b, False, "foo") + with self.assertRaises(TypingError): + np_nbfunc("foo", b, True, False) + with self.assertRaises(TypingError): + np_nbfunc(a, "foo", True, False) + + @njit() + def np_in1d_kind(a, b, kind): + return np.in1d(a, b, kind=kind) + + with self.assertRaises(TypingError): + np_in1d_kind(a, b, kind=None) + with self.assertRaises(TypingError): + np_in1d_kind(a, b, kind="table") + + @staticmethod + def _isin_arrays(): + yield (List.empty_list(types.float64), + List.empty_list(types.float64)) # two empty arrays + yield (np.zeros((1, 0), dtype=np.int64), + List.empty_list(types.int64)) # two-dim array - shape (1, 0) + yield (np.zeros((0, 0), dtype=np.int64), + List.empty_list(types.int64)) + yield (np.zeros((0, 1), dtype=np.int64), + List.empty_list(types.int64)) + yield [1], List.empty_list(types.float64) # empty right + yield List.empty_list(types.float64), [1] # empty left + yield [1], [2] # singletons - False + yield [1], [1] # singletons - True + yield [1, 2], [1] + yield [1, 2, 2], [2, 2] + yield [1, 2, 2], [2, 2, 3] + yield [1, 2], [2, 1] + yield [2, 3], np.arange(20) # Test the "sorting" method. + yield [2, 3], np.tile(np.arange(5), 4) + yield np.arange(30).reshape(2, 3, 5), [5, 7, 10, 15] # 3d + + # from numpy + # https://github.com/numpy/numpy/blob/b0371ef240560e78b651a5d7c9407ae3212a3d56/numpy/lib/tests/test_arraysetops.py#L200 # noqa: E501 + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + yield a, b + yield np.array(3), b + yield a, np.array(3) + yield np.array(3), np.array(3) + yield 5, b + yield a, 6 + yield 5, 6 + yield List.empty_list(types.int64), b + yield a, List.empty_list(types.int64) + + for dtype in [bool, np.int64, np.float64]: + if dtype in {np.int64, np.float64}: + ar = np.array([10, 20, 30], dtype=dtype) + elif dtype in {bool}: + ar = np.array([True, False, False]) + + empty_array = np.array([], dtype=dtype) + + yield empty_array, ar + yield ar, empty_array + yield empty_array, empty_array + + for mult in (1, 10): + yield [5, 7, 1, 2], [2, 4, 3, 1, 5] * mult + yield [8, 7, 1, 2], [2, 4, 3, 1, 5] * mult + yield [4, 7, 1, 8], [2, 4, 3, 1, 5] * mult + a = [5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5] + yield a, [2, 3, 4] * mult + yield a, [2, 3, 4] * mult + [5, 5, 4] * mult + yield np.array([5, 7, 1, 2]), np.array([2, 4, 3, 1, 5] * mult) + yield np.array([5, 7, 1, 1, 2]), np.array([2, 4, 3, 3, 1, 5] * mult) + yield np.array([5, 5]), np.array([2, 2] * mult) + + yield np.array([5]), np.array([2]) + yield np.array([True, False]), np.array([False, False, False]) + + for dtype1, dtype2 in [ + (np.int8, np.int16), + (np.int16, np.int8), + (np.uint8, np.uint16), + (np.uint16, np.uint8), + (np.uint8, np.int16), + (np.int16, np.uint8), + ]: + is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger) + ar1 = np.array([0, 0, 1, 1], dtype=dtype1) + + if is_dtype2_signed: + ar2 = np.array([-128, 0, 127], dtype=dtype2) + else: + ar2 = np.array([127, 0, 255], dtype=dtype2) + + yield ar1, ar2 + + for dtype in np.typecodes["AllInteger"]: + a = np.array([True, False, False], dtype=bool) + b = np.array([0, 0, 0, 0], dtype=dtype) + yield a, b + yield b, a + + def test_isin_2(self): + np_pyfunc = np_isin_2 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2): + expected = np_pyfunc(ar1, ar2) + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + got = np_nbfunc(ar1, ar2) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._isin_arrays(): + check(a, b) + + def test_isin_3a(self): + np_pyfunc = np_isin_3a + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, assume_unique=False): + expected = np_pyfunc(ar1, ar2, assume_unique) + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + got = np_nbfunc(ar1, ar2, assume_unique) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._isin_arrays(): + check(a, b) + + try: + len_a = len(a) + except TypeError: + len_a = 1 + try: + len_b = len(b) + except TypeError: + len_b = 1 + if len(np.unique(a)) == len_a and len(np.unique(b)) == len_b: + check(a, b, assume_unique=True) + + def test_isin_3b(self): + np_pyfunc = np_isin_3b + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, invert=False): + expected = np_pyfunc(ar1, ar2, invert) + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + got = np_nbfunc(ar1, ar2, invert) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._isin_arrays(): + check(a, b, invert=False) + check(a, b, invert=True) + + def test_isin_4(self): + np_pyfunc = np_isin_4 + np_nbfunc = njit(np_pyfunc) + + def check(ar1, ar2, assume_unique=False, invert=False): + expected = np_pyfunc(ar1, ar2, assume_unique, invert) + if isinstance(ar1, list): + ar1 = List(ar1) + if isinstance(ar2, list): + ar2 = List(ar2) + got = np_nbfunc(ar1, ar2, assume_unique, invert) + self.assertPreciseEqual(expected, got, msg=f"ar1={ar1}, ar2={ar2}") + + for a, b in self._isin_arrays(): + check(a, b, invert=False) + check(a, b, invert=True) + + try: + len_a = len(a) + except TypeError: + len_a = 1 + try: + len_b = len(b) + except TypeError: + len_b = 1 + if len(np.unique(a)) == len_a and len(np.unique(b)) == len_b: + check(a, b, assume_unique=True, invert=False) + check(a, b, assume_unique=True, invert=True) + + def test_isin_errors(self): + np_pyfunc = np_isin_4 + np_nbfunc = njit(np_pyfunc) + + a = np.array([1]) + b = np.array([2]) + x = np_nbfunc(a, b) + self.assertPreciseEqual(x, np.array([False])) + + self.disable_leak_check() + with self.assertRaises(TypingError): + np_nbfunc(a, b, "foo", False) + with self.assertRaises(TypingError): + np_nbfunc(a, b, False, "foo") + with self.assertRaises(TypingError): + np_nbfunc("foo", b, True, False) + with self.assertRaises(TypingError): + np_nbfunc(a, "foo", True, False) + + @njit() + def np_isin_kind(a, b, kind): + return np.isin(a, b, kind=kind) + + with self.assertRaises(TypingError): + np_isin_kind(a, b, kind=None) + with self.assertRaises(TypingError): + np_isin_kind(a, b, kind="table") + + def test_setops_manyways(self): + # https://github.com/numpy/numpy/blob/b0371ef240560e78b651a5d7c9407ae3212a3d56/numpy/lib/tests/test_arraysetops.py#L588 # noqa: E501 + nb_setxor1d = njit(np_setxor1d_2) + nb_intersect1d = njit(intersect1d_2) + nb_union1d = njit(np_union1d) + nb_setdiff1d = njit(np_setdiff1d_2) + + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = nb_setxor1d(a, b) + aux1 = nb_intersect1d(a, b) + aux2 = nb_union1d(a, b) + c2 = nb_setdiff1d(aux2, aux1) + self.assertPreciseEqual(c1, c2) + + +class TestNPMachineParameters(TestCase): + # tests np.finfo, np.iinfo, np.MachAr + + template = ''' +def foo(): + ty = np.%s + return np.%s(ty) +''' + + def check(self, func, attrs, *args): + pyfunc = func + cfunc = jit(nopython=True)(pyfunc) + + expected = pyfunc(*args) + got = cfunc(*args) + + # check result + for attr in attrs: + self.assertPreciseEqual(getattr(expected, attr), + getattr(got, attr)) + + def create_harcoded_variant(self, basefunc, ty): + #create an instance of using the function with a hardcoded type + #and eval it into existence, return the function for use + tystr = ty.__name__ + basestr = basefunc.__name__ + funcstr = self.template % (tystr, basestr) + dct = {} + exec(compile(funcstr, '', 'exec'), globals(), dct) + return dct['foo'] + + def test_finfo(self): + types = [np.float32, np.float64, np.complex64, np.complex128] + attrs = ('eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'negep', + 'nexp', 'nmant', 'precision', 'resolution', 'tiny', 'bits',) + for ty in types: + self.check(finfo, attrs, ty(1)) + hc_func = self.create_harcoded_variant(np.finfo, ty) + self.check(hc_func, attrs) + + # check unsupported attr raises + with self.assertRaises(TypingError) as raises: + cfunc = jit(nopython=True)(finfo_machar) + cfunc(7.) + msg = "Unknown attribute 'machar' of type finfo" + self.assertIn(msg, str(raises.exception)) + + # check invalid type raises + with self.assertTypingError(): + cfunc = jit(nopython=True)(finfo) + cfunc(np.int32(7)) + + def test_iinfo(self): + # check types and instances of types + types = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64] + attrs = ('min', 'max', 'bits',) + for ty in types: + self.check(iinfo, attrs, ty(1)) + hc_func = self.create_harcoded_variant(np.iinfo, ty) + self.check(hc_func, attrs) + + # check invalid type raises + with self.assertTypingError(): + cfunc = jit(nopython=True)(iinfo) + cfunc(np.float64(7)) + + +class TestRegistryImports(TestCase): + + def test_unsafe_import_in_registry(self): + # See 8940 + # This should not fail + code = dedent(""" + import numba + import numpy as np + @numba.njit + def foo(): + np.array([1 for _ in range(1)]) + foo() + print("OK") + """) + result, error = run_in_subprocess(code) + # Assert that the bytestring "OK" was printed to stdout + self.assertEqual(b"OK", result.strip()) + self.assertEqual(b"", error.strip(), msg=f"--ERROR--\n{error}\n") + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_np_randomgen.py b/venv/lib/python3.10/site-packages/numba/tests/test_np_randomgen.py new file mode 100644 index 0000000000000000000000000000000000000000..1f66590eefaa6c00cb86ca4b6c1bc5def5077687 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_np_randomgen.py @@ -0,0 +1,1273 @@ +import numba +import numpy as np +import sys +import itertools +import gc +import re + +from numba import types +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.np.random.generator_methods import _get_proper_func +from numba.np.random.generator_core import next_uint32, next_uint64, next_double +from numpy.random import MT19937, Generator +from numba.core.errors import TypingError +from numba.tests.support import run_in_new_process_caching, SerialMixin + + +# TODO: Following testing tolerance adjustments should be reduced +# once NumPy Generator's fmadd issue described below is resolved: +# https://github.com/numba/numba/pull/8038#issuecomment-1165571368 +# The progress is being tracked as one of the tasks in: +# https://github.com/numba/numba/issues/8519 +adjusted_ulp_prec = 2048 + + +class TestHelperFuncs(TestCase): + def test_proper_func_provider(self): + def test_32bit_func(): + return 32 + + def test_64bit_func(): + return 64 + + self.assertEqual(_get_proper_func(test_32bit_func, test_64bit_func, + np.float64)[0](), 64) + self.assertEqual(_get_proper_func(test_32bit_func, test_64bit_func, + np.float32)[0](), 32) + + # With any other datatype it should return a TypingError + with self.assertRaises(TypingError) as raises: + _get_proper_func(test_32bit_func, test_64bit_func, np.int32) + self.assertIn( + 'Argument dtype is not one of the expected type(s)', + str(raises.exception) + ) + with self.assertRaises(TypingError) as raises: + _get_proper_func(test_32bit_func, test_64bit_func, types.float64) + self.assertIn( + 'Argument dtype is not one of the expected type(s)', + str(raises.exception) + ) + + def test_check_types(self): + rng = np.random.default_rng(1) + py_func = lambda x: x.normal(loc=(0,)) + numba_func = numba.njit(cache=True)(py_func) + with self.assertRaises(TypingError) as raises: + numba_func(rng) + + expected_pattern = ( + r"Argument loc is not one of the expected type\(s\): " + r"\[, " + r", " + r", \]" + ) + + self.assertTrue( + re.search(expected_pattern, str(raises.exception)) is not None, + "Expected pattern not found in exception message." + + f" Found {str(raises.exception)}" + ) + + def test_integers_arg_check(self): + rng = np.random.default_rng(1) + py_func = lambda x, low, high, dtype: \ + x.integers(low=low, high=high, dtype=dtype, endpoint=True) + numba_func = numba.njit()(py_func) + numba_func_low = numba.njit()(py_func) + + py_func = lambda x, low, high, dtype: \ + x.integers(low=low, high=high, dtype=dtype, endpoint=False) + numba_func_endpoint_false = numba.njit()(py_func) + + cases = [ + # low, high, dtype + (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max, np.uint8), + (np.iinfo(np.int8).min, np.iinfo(np.int8).max, np.int8), + (np.iinfo(np.uint16).min, np.iinfo(np.uint16).max, np.uint16), + (np.iinfo(np.int16).min, np.iinfo(np.int16).max, np.int16), + (np.iinfo(np.uint32).min, np.iinfo(np.uint32).max, np.uint32), + (np.iinfo(np.int32).min, np.iinfo(np.int32).max, np.int32), + ] + for low, high, dtype in cases: + with self.subTest(low=low, high=high, dtype=dtype): + with self.assertRaises(ValueError) as raises: + # min - 1 + numba_func_low(rng, low - 1, high, dtype) + self.assertIn( + 'low is out of bounds', + str(raises.exception) + ) + + with self.assertRaises(ValueError) as raises: + # max + 1, endpoint=True + numba_func(rng, low, high + 1, dtype) + self.assertIn( + 'high is out of bounds', + str(raises.exception) + ) + + with self.assertRaises(ValueError) as raises: + # max + 2, endpoint=False + numba_func_endpoint_false(rng, low, high + 2, dtype) + self.assertIn( + 'high is out of bounds', + str(raises.exception) + ) + + low, high, dtype = (np.iinfo(np.uint64).min, + np.iinfo(np.uint64).max, np.uint64) + with self.assertRaises(ValueError) as raises: + # min - 1 + numba_func_low(rng, low - 1, high, dtype) + self.assertIn( + 'low is out of bounds', + str(raises.exception) + ) + + low, high, dtype = (np.iinfo(np.int64).min, + np.iinfo(np.int64).max, np.int64) + with self.assertRaises(ValueError) as raises: + # max + 1, endpoint=True + numba_func(rng, low, high + 1, dtype) + self.assertIn( + 'high is out of bounds', + str(raises.exception) + ) + + with self.assertRaises(ValueError) as raises: + # max + 2, endpoint=False + numba_func_endpoint_false(rng, low, high + 2, dtype) + self.assertIn( + 'high is out of bounds', + str(raises.exception) + ) + + with self.assertRaises(ValueError) as raises: + numba_func(rng, 105, 100, np.uint32) + self.assertIn( + 'low is greater than high in given interval', + str(raises.exception) + ) + + +def test_generator_caching(): + nb_rng = np.random.default_rng(1) + np_rng = np.random.default_rng(1) + py_func = lambda x: x.random(10) + numba_func = numba.njit(cache=True)(py_func) + assert np.allclose(np_rng.random(10), numba_func(nb_rng)) + + +class TestRandomGenerators(MemoryLeakMixin, TestCase): + def check_numpy_parity(self, distribution_func, + bitgen_type=None, seed=None, + test_size=None, test_dtype=None, + ulp_prec=5): + + distribution_func = numba.njit(distribution_func) + if seed is None: + seed = 1 + if bitgen_type is None: + numba_rng_instance = np.random.default_rng(seed=seed) + numpy_rng_instance = np.random.default_rng(seed=seed) + else: + numba_rng_instance = Generator(bitgen_type(seed)) + numpy_rng_instance = Generator(bitgen_type(seed)) + + # Check parity for different size cases + numba_res = distribution_func(numba_rng_instance, + test_size, test_dtype) + numpy_res = distribution_func.py_func(numpy_rng_instance, + test_size, test_dtype) + + if (isinstance(numba_res, np.ndarray) and + np.issubdtype(numba_res.dtype, np.floating)) \ + or isinstance(numba_res, float): + # Float scalars and arrays + np.testing.assert_array_max_ulp(numpy_res, numba_res, + maxulp=ulp_prec, dtype=test_dtype) + else: + # Bool/int scalars and arrays + np.testing.assert_equal(numba_res, numpy_res) + + # Check if the end state of both BitGenerators is same + # after drawing the distributions + numba_gen_state = numba_rng_instance.bit_generator.state['state'] + numpy_gen_state = numpy_rng_instance.bit_generator.state['state'] + + for _state_key in numpy_gen_state: + self.assertPreciseEqual(numba_gen_state[_state_key], + numpy_gen_state[_state_key]) + + def _test_bitgen_func_parity(self, func_name, bitgen_func, seed=1): + numba_rng_instance = np.random.default_rng(seed=seed) + numpy_rng_instance = np.random.default_rng(seed=seed) + + numpy_func = getattr(numpy_rng_instance.bit_generator.ctypes, func_name) + numpy_res = numpy_func(numpy_rng_instance.bit_generator.ctypes.state) + + numba_func = numba.njit(lambda x: bitgen_func(x.bit_generator)) + numba_res = numba_func(numba_rng_instance) + + self.assertPreciseEqual(numba_res, numpy_res) + + def _check_invalid_types(self, dist_func, arg_list, + valid_args, invalid_args): + rng = np.random.default_rng() + for idx, _arg in enumerate(arg_list): + curr_args = valid_args.copy() + curr_args[idx] = invalid_args[idx] + curr_args = [rng] + curr_args + nb_dist_func = numba.njit(dist_func) + with self.assertRaises(TypingError) as raises: + nb_dist_func(*curr_args) + self.assertIn( + f'Argument {_arg} is not one of the expected type(s):', + str(raises.exception) + ) + + def test_npgen_boxing_unboxing(self): + rng_instance = np.random.default_rng() + numba_func = numba.njit(lambda x: x) + self.assertEqual(rng_instance, numba_func(rng_instance)) + self.assertEqual(id(rng_instance), id(numba_func(rng_instance))) + + def test_npgen_boxing_refcount(self): + rng_instance = np.random.default_rng() + no_box = numba.njit(lambda x:x.random()) + do_box = numba.njit(lambda x:x) + + y = do_box(rng_instance) + gc.collect() + ref_1 = sys.getrefcount(rng_instance) + del y + no_box(rng_instance) + gc.collect() + ref_2 = sys.getrefcount(rng_instance) + + self.assertEqual(ref_1, ref_2 + 1) + + def test_bitgen_funcs(self): + func_names = ["next_uint32", "next_uint64", "next_double"] + funcs = [next_uint32, next_uint64, next_double] + + for _func, _func_name in zip(funcs, func_names): + with self.subTest(_func=_func, _func_name=_func_name): + self._test_bitgen_func_parity(_func_name, _func) + + def test_integers(self): + test_sizes = [None, (), (100,), (10, 20, 30)] + test_dtypes = [np.int64, np.int32, np.int16, np.int8, + np.uint64, np.uint32, np.uint16, np.uint8] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.integers(0, 100) + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None, ulp_prec=0) + + dist_func = lambda x, size, dtype:\ + x.integers(5, 10, size=size, dtype=dtype) + for _size in test_sizes: + for _dtype in test_dtypes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _dtype=_dtype, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, _dtype, 0) + + # Checking dtype = bool seperately + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.integers(False, True, size=size, dtype=np.bool_) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, np.bool_, 0) + + # Test dtype casting for high and low + dist_func = lambda x, size, dtype: \ + x.integers(np.uint8(0), np.int64(100)) + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, low, high, size, dtype, endpoint:\ + x.integers(low=low, high=high, size=size, + dtype=dtype, endpoint=endpoint) + self._check_invalid_types(dist_func, + ['low', 'high', 'size', 'dtype', 'endpoint'], + [1, 5, (1,), np.int64, True], + ['x', 'x', ('x',), np.float64, 'x']) + + # Testing .integers() dtype wise + def test_integers_cases(self): + cases = [ + # low, high, dtype + (5, 6, np.uint64), # rng == 0 (rng stands for range) + (5, 100, np.uint64), # rng <= 0xFFFFFFFF + (0, 0xFFFFFFFFFF, np.uint64), # rng > 0xFFFFFFFF + (0, 0xFFFFFFFFFFFFFFFF - 1, np.uint64),# rng == 0xFFFFFFFFFFFFFFFF-1 + (0, 0xFFFFFFFFFFFFFFFF, np.uint64), # rng == 0xFFFFFFFFFFFFFFFF + + (5, 6, np.int64), # rng == 0 + (5, 100, np.int64), # rng <= 0xFFFFFFFF + (0, 0xFFFFFFFFFF, np.int64), # rng > 0xFFFFFFFF + (0, 0xFFFFFFFFFFFFFFF - 1, np.int64), # rng == 0xFFFFFFFFFFFFFFF - 1 + (0, 0xFFFFFFFFFFFFFFF, np.int64), # rng == 0xFFFFFFFFFFFFFFF + (-0xFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFF, np.int64), # min/max + + (5, 6, np.uint32), # rng == 0 + (5, 100, np.uint32), # rng < 0xFFFFFFFF + (0, 0xFFFFFFFF - 1, np.uint32), # rng == 0xFFFFFFFF - 1 + (0, 0xFFFFFFFF, np.uint32), # rng == 0xFFFFFFFF + + (5, 6, np.int32), # rng == 0 + (5, 100, np.int32), # rng < 0xFFFFFFFF + (0, 0xFFFFFFF - 1, np.int32), # rng == 0xFFFFFFF - 1 + (0, 0xFFFFFFF, np.int32), # rng == 0xFFFFFFF + (-0xFFFFFFF, 0xFFFFFFF, np.int32), + + (5, 6, np.uint16), # rng == 0 + (5, 100, np.uint16), # rng < 0xFFFF + (0, 0xFFFF - 1, np.uint16), # rng == 0xFFFF - 1 + (0, 0xFFFF, np.uint16), # rng == 0xFFFF + + (5, 6, np.int16), # rng == 0 + (5, 10, np.int16), # rng < 0xFFF + (0, 0xFFF - 1, np.int16), # rng == 0xFFF - 1 + (0, 0xFFF, np.int16), # rng == 0xFFF + (-0xFFF, 0xFFF, np.int16), + + (5, 6, np.uint8), # rng == 0 + (5, 10, np.uint8), # rng < 0xFF + (0, 0xFF - 1, np.uint8), # rng == 0xFF - 1 + (0, 0xFF, np.uint8), # rng == 0xFF + + (5, 6, np.int8), # rng == 0 + (5, 10, np.int8), # rng < 0xF + (0, 0xF - 1, np.int8), # rng == 0xF-1 + (0, 0xF, np.int8), # rng == 0xF + (-0xF, 0xF, np.int8), + ] + size = (2, 3) + + for low, high, dtype in cases: + with self.subTest(low=low, high=high, dtype=dtype): + dist_func = lambda x, size, dtype:\ + x.integers(low, high, size=size, dtype=dtype) + self.check_numpy_parity(dist_func, None, + None, size, dtype, 0) + + def test_random(self): + test_sizes = [None, (), (100,), (10, 20, 30)] + test_dtypes = [np.float32, np.float64] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.random() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, size, dtype:x.random(size=size, dtype=dtype) + + for _size in test_sizes: + for _dtype in test_dtypes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _dtype=_dtype, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, _dtype) + dist_func = lambda x, size, dtype:\ + x.random(size=size, dtype=dtype) + self._check_invalid_types(dist_func, ['size', 'dtype'], + [(1,), np.float64], [('x',), 0.]) + + def test_standard_normal(self): + test_sizes = [None, (), (100,), (10, 20, 30)] + test_dtypes = [np.float32, np.float64] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.standard_normal() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, size, dtype:\ + x.standard_normal(size=size, dtype=dtype) + + for _size in test_sizes: + for _dtype in test_dtypes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _dtype=_dtype, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, _dtype) + dist_func = lambda x, size, dtype:\ + x.standard_normal(size=size, dtype=dtype) + self._check_invalid_types(dist_func, ['size', 'dtype'], + [(1,), np.float32], [('x',), 0]) + + def test_standard_exponential(self): + test_sizes = [None, (), (100,), (10, 20, 30)] + test_dtypes = [np.float32, np.float64] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.standard_exponential() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, size, dtype:\ + x.standard_exponential(size=size, dtype=dtype) + + for _size in test_sizes: + for _dtype in test_dtypes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _dtype=_dtype, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, _dtype) + + dist_func = lambda x, method, size, dtype:\ + x.standard_exponential(method=method, size=size, dtype=dtype) + self._check_invalid_types(dist_func, ['method', 'size', 'dtype'], + ['zig', (1,), np.float32], [0, ('x',), 0]) + + def test_standard_exponential_inv(self): + test_sizes = [None, (), (100,), (10, 20, 30)] + test_dtypes = [np.float32, np.float64] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.standard_exponential(size=size, dtype=dtype, method='inv') + for _size in test_sizes: + for _dtype in test_dtypes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _dtype=_dtype, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, _dtype) + + def test_standard_gamma(self): + test_sizes = [None, (), (100,), (10, 20, 30)] + test_dtypes = [np.float32, np.float64] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype: \ + x.standard_gamma(shape=5.0, size=size, dtype=dtype) + for _size in test_sizes: + for _dtype in test_dtypes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _dtype=_dtype, + _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, _dtype, + adjusted_ulp_prec) + dist_func = lambda x, shape, size, dtype:\ + x.standard_gamma(shape=shape, size=size, dtype=dtype) + self._check_invalid_types(dist_func, ['shape', 'size', 'dtype'], + [5.0, (1,), np.float32], ['x', ('x',), 0]) + + def test_normal(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.normal() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None, + ulp_prec=adjusted_ulp_prec) + + dist_func = lambda x, size, dtype:x.normal(loc=1.5, scale=3, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + dist_func = lambda x, loc, scale, size:\ + x.normal(loc=loc, scale=scale, size=size) + self._check_invalid_types(dist_func, ['loc', 'scale', 'size'], + [1.5, 3, (1,)], ['x', 'x', ('x',)]) + + def test_uniform(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.uniform() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None, + ulp_prec=adjusted_ulp_prec) + + dist_func = lambda x, size, dtype:x.uniform(low=1.5, high=3, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + dist_func = lambda x, low, high, size:\ + x.uniform(low=low, high=high, size=size) + self._check_invalid_types(dist_func, ['low', 'high', 'size'], + [1.5, 3, (1,)], ['x', 'x', ('x',)]) + + def test_exponential(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.exponential() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, size, dtype:x.exponential(scale=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + dist_func = lambda x, scale, size:\ + x.exponential(scale=scale, size=size) + self._check_invalid_types(dist_func, ['scale', 'size'], + [1.5, (1,)], ['x', ('x',)]) + + def test_gamma(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.gamma(shape=5.0, scale=1.5, + size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + dist_func = lambda x, shape, scale, size:\ + x.gamma(shape=shape, scale=scale, size=size) + self._check_invalid_types(dist_func, ['shape', 'scale', 'size'], + [5.0, 1.5, (1,)], ['x', 'x', ('x',)]) + + def test_beta(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.beta(a=1.5, b=2.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, a, b, size:x.beta(a=a, b=b, size=size) + self._check_invalid_types(dist_func, ['a', 'b', 'size'], + [5.0, 1.5, (1,)], ['x', 'x', ('x',)]) + + def test_f(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.f(dfnum=2, dfden=3, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, dfnum, dfden, size:\ + x.f(dfnum=dfnum, dfden=dfden, size=size) + self._check_invalid_types(dist_func, ['dfnum', 'dfden', 'size'], + [5, 1, (1,)], ['x', 'x', ('x',)]) + + def test_chisquare(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.chisquare(df=2, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, df, size:\ + x.chisquare(df=df, size=size) + self._check_invalid_types(dist_func, ['df', 'size'], + [2, (1,)], ['x', ('x',)]) + + def test_standard_cauchy(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.standard_cauchy() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, size, dtype:x.standard_cauchy(size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, size:x.standard_cauchy(size=size) + self._check_invalid_types(dist_func, ['size'], + [(1,)], [('x',)]) + + def test_pareto(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.pareto(a=1.0, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, a, size:x.pareto(a=a, size=size) + self._check_invalid_types(dist_func, ['a', 'size'], + [1, (1,)], ['x', ('x',)]) + + def test_weibull(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.weibull(a=1.0, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, a, size:x.weibull(a=a, size=size) + self._check_invalid_types(dist_func, ['a', 'size'], + [1, (1,)], ['x', ('x',)]) + + def test_power(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.power(a=0.75, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, a, size:x.power(a=a, size=size) + self._check_invalid_types(dist_func, ['a', 'size'], + [0.75, (1,)], ['x', ('x',)]) + + def test_laplace(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.laplace() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None, + ulp_prec=adjusted_ulp_prec) + + dist_func = lambda x, size, dtype:\ + x.laplace(loc=1.0, scale=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, loc, scale, size:\ + x.laplace(loc=loc, scale=scale, size=size) + self._check_invalid_types(dist_func, ['loc', 'scale', 'size'], + [1.0, 1.5, (1,)], ['x', 'x', ('x',)]) + + def test_logistic(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.logistic() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None, + ulp_prec=adjusted_ulp_prec) + + dist_func = lambda x, size, dtype:\ + x.logistic(loc=1.0,scale=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, loc, scale, size:\ + x.logistic(loc=loc, scale=scale, size=size) + self._check_invalid_types(dist_func, ['loc', 'scale', 'size'], + [1.0, 1.5, (1,)], ['x', 'x', ('x',)]) + + def test_lognormal(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.lognormal() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None, + ulp_prec=adjusted_ulp_prec) + + dist_func = lambda x, size, dtype:\ + x.lognormal(mean=5.0, sigma=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, mean, sigma, size:\ + x.lognormal(mean=mean, sigma=sigma, size=size) + self._check_invalid_types(dist_func, ['mean', 'sigma', 'size'], + [1.0, 1.5, (1,)], ['x', 'x', ('x',)]) + + def test_rayleigh(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + # Test with no arguments + dist_func = lambda x, size, dtype:x.rayleigh() + with self.subTest(): + self.check_numpy_parity(dist_func, test_size=None, + test_dtype=None) + + dist_func = lambda x, size, dtype:x.rayleigh(scale=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, scale, size:x.rayleigh(scale=scale, size=size) + self._check_invalid_types(dist_func, ['scale', 'size'], + [1.5, (1,)], ['x', ('x',)]) + + def test_standard_t(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.standard_t(df=2, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, df, size:x.standard_t(df=df, size=size) + self._check_invalid_types(dist_func, ['df', 'size'], + [2, (1,)], ['x', ('x',)]) + + def test_wald(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.wald(mean=5.0, scale=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, mean, scale, size:\ + x.wald(mean=mean, scale=scale, size=size) + self._check_invalid_types(dist_func, ['mean', 'scale', 'size'], + [1.0, 1.5, (1,)], ['x', 'x', ('x',)]) + + def test_geometric(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.geometric(p=0.75, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, p, size:x.geometric(p=p, size=size) + self._check_invalid_types(dist_func, ['p', 'size'], + [0.75, (1,)], ['x', ('x',)]) + + def test_zipf(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.zipf(a=1.5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, a, size:x.zipf(a=a, size=size) + self._check_invalid_types(dist_func, ['a', 'size'], + [1, (1,)], ['x', ('x',)]) + + def test_triangular(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.triangular(left=0, mode=3, right=5, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, left, mode, right, size:\ + x.triangular(left=left, mode=mode, right=right, size=size) + self._check_invalid_types(dist_func, ['left', 'mode', 'right', 'size'], + [0, 3, 5, (1,)], ['x', 'x', 'x', ('x',)]) + + def test_poisson(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:x.poisson(lam=15, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, lam, size:x.poisson(lam=lam, size=size) + self._check_invalid_types(dist_func, ['lam', 'size'], + [15, (1,)], ['x', ('x',)]) + + def test_negative_binomial(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.negative_binomial(n=1, p=0.1, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, n, p, size:\ + x.negative_binomial(n=n, p=p, size=size) + self._check_invalid_types(dist_func, ['n', 'p', 'size'], + [1, 0.75, (1,)], ['x', 'x', ('x',)]) + + # NumPy tests at: + # https://github.com/numpy/numpy/blob/95e3e7f445407e4f355b23d6a9991d8774f0eb0c/numpy/random/tests/test_generator_mt19937.py#L936 + # Written in following format for semblance with existing Generator tests. + def test_shuffle(self): + test_sizes = [(10, 20, 30)] + bitgen_types = [None, MT19937] + axes = [0, 1, 2] + + for _size, _bitgen, _axis in itertools.product(test_sizes, + bitgen_types, + axes): + with self.subTest(_size=_size, _bitgen=_bitgen, _axis=_axis): + def dist_func(x, size, dtype): + arr = x.random(size=size) + x.shuffle(arr, axis=_axis) + return arr + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + 0) + + def test_shuffle_empty(self): + a = np.array([]) + b = np.array([]) + + def dist_func(x, arr): + x.shuffle(arr) + return arr + + nb_func = numba.njit(dist_func) + rng = lambda: np.random.default_rng(1) + + self.assertPreciseEqual(dist_func(rng(), a), nb_func(rng(), b)) + + def test_shuffle_check(self): + self.disable_leak_check() + + def dist_func(x, arr, axis): + x.shuffle(arr, axis=axis) + return arr + + self._check_invalid_types(dist_func, ['x', 'axis'], + [np.array([3,4,5]), 0], ['x', 'x']) + + rng = np.random.default_rng(1) + with self.assertRaises(IndexError) as raises: + numba.njit(dist_func)(rng, np.array([3,4,5]), 2) + self.assertIn( + 'Axis is out of bounds for the given array', + str(raises.exception) + ) + + # NumPy tests at: + # https://github.com/numpy/numpy/blob/95e3e7f445407e4f355b23d6a9991d8774f0eb0c/numpy/random/tests/test_generator_mt19937.py#L1030 + # Written in following format for semblance with existing Generator tests. + def test_permutation(self): + test_sizes = [(10, 20, 30)] + bitgen_types = [None, MT19937] + axes = [0, 1, 2, -1, -2] + + for _size, _bitgen, _axis in itertools.product(test_sizes, + bitgen_types, + axes): + with self.subTest(_size=_size, _bitgen=_bitgen, _axis=_axis): + def dist_func(x, size, dtype): + arr = x.random(size=size) + return x.permutation(arr, axis=1) + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + 0) + + # Test that permutation is actually done on a copy of the array + dist_func = numba.njit(lambda rng, arr: rng.permutation(arr)) + rng = np.random.default_rng() + arr = rng.random(size=(10, 20)) + arr_cpy = arr.copy() + dist_func(rng, arr) + self.assertPreciseEqual(arr, arr_cpy) + + def test_permutation_exception(self): + self.disable_leak_check() + + def dist_func(x, arr, axis): + return x.permutation(arr, axis=axis) + + self._check_invalid_types(dist_func, ['x', 'axis'], + [np.array([3,4,5]), 0], ['x', 'x']) + + rng = np.random.default_rng(1) + with self.assertRaises(IndexError) as raises: + numba.njit(dist_func)(rng, np.array([3,4,5]), 2) + self.assertIn( + 'Axis is out of bounds for the given array', + str(raises.exception) + ) + with self.assertRaises(IndexError) as raises: + numba.njit(dist_func)(rng, np.array([3,4,5]), -2) + self.assertIn( + 'Axis is out of bounds for the given array', + str(raises.exception) + ) + + def test_permutation_empty(self): + a = np.array([]) + b = np.array([]) + + def dist_func(x, arr): + return x.permutation(arr) + + nb_func = numba.njit(dist_func) + rng = lambda: np.random.default_rng(1) + + self.assertPreciseEqual(dist_func(rng(), a), nb_func(rng(), b)) + + def test_noncentral_chisquare(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.noncentral_chisquare(3.0, 20.0, size=size) + for _size, _bitgen in itertools.product(test_sizes, bitgen_types): + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, df, nonc, size:\ + x.noncentral_chisquare(df=df, nonc=nonc, size=size) + valid_args = [3.0, 5.0, (1,)] + self._check_invalid_types(dist_func, ['df', 'nonc', 'size'], + valid_args, ['x', 'x', ('x',)]) + + # Test argument bounds + rng = np.random.default_rng() + valid_args = [rng] + valid_args + nb_dist_func = numba.njit(dist_func) + with self.assertRaises(ValueError) as raises: + curr_args = valid_args.copy() + # Change df to an invalid value + curr_args[1] = 0 + nb_dist_func(*curr_args) + self.assertIn('df <= 0', str(raises.exception)) + with self.assertRaises(ValueError) as raises: + curr_args = valid_args.copy() + # Change nonc to an invalid value + curr_args[2] = -1 + nb_dist_func(*curr_args) + self.assertIn('nonc < 0', str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_noncentral_f(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.noncentral_f(3.0, 20.0, 3.0, size=size) + for _size, _bitgen in itertools.product(test_sizes, bitgen_types): + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, dfnum, dfden, nonc, size:\ + x.noncentral_f(dfnum=dfnum, dfden=dfden, nonc=nonc, size=size) + valid_args = [3.0, 5.0, 3.0, (1,)] + self._check_invalid_types(dist_func, ['dfnum', 'dfden', 'nonc', 'size'], + valid_args, ['x', 'x', 'x', ('x',)]) + + # Test argument bounds + rng = np.random.default_rng() + valid_args = [rng] + valid_args + nb_dist_func = numba.njit(dist_func) + with self.assertRaises(ValueError) as raises: + curr_args = valid_args.copy() + # Change dfnum to an invalid value + curr_args[1] = 0 + nb_dist_func(*curr_args) + self.assertIn('dfnum <= 0', str(raises.exception)) + with self.assertRaises(ValueError) as raises: + curr_args = valid_args.copy() + # Change dfden to an invalid value + curr_args[2] = 0 + nb_dist_func(*curr_args) + self.assertIn('dfden <= 0', str(raises.exception)) + with self.assertRaises(ValueError) as raises: + curr_args = valid_args.copy() + # Change nonc to an invalid value + curr_args[3] = -1 + nb_dist_func(*curr_args) + self.assertIn('nonc < 0', str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_logseries(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.logseries(0.3, size=size) + for _size, _bitgen in itertools.product(test_sizes, bitgen_types): + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None) + + dist_func = lambda x, p, size:\ + x.logseries(p=p, size=size) + valid_args = [0.3, (1,)] + self._check_invalid_types(dist_func, ['p', 'size'], + valid_args, ['x', ('x',)]) + + # Test argument bounds + rng = np.random.default_rng(1) + valid_args = [rng] + valid_args + nb_dist_func = numba.njit(dist_func) + for _p in [-0.1, 1, np.nan]: + with self.assertRaises(ValueError) as raises: + curr_args = valid_args.copy() + # Change p to an invalid negative, positive and nan value + curr_args[1] = _p + nb_dist_func(*curr_args) + self.assertIn('p < 0, p >= 1 or p is NaN', str(raises.exception)) + # Exceptions leak references + self.disable_leak_check() + + def test_binomial(self): + # For this test dtype argument is never used, so we pass [None] as dtype + # to make sure it runs only once with default system type. + + test_sizes = [None, (), (100,), (10, 20, 30)] + bitgen_types = [None, MT19937] + + dist_func = lambda x, size, dtype:\ + x.binomial(n=1, p=0.1, size=size) + for _size in test_sizes: + for _bitgen in bitgen_types: + with self.subTest(_size=_size, _bitgen=_bitgen): + self.check_numpy_parity(dist_func, _bitgen, + None, _size, None, + adjusted_ulp_prec) + + dist_func = lambda x, n, p, size:\ + x.binomial(n=n, p=p, size=size) + self._check_invalid_types(dist_func, ['n', 'p', 'size'], + [1, 0.75, (1,)], ['x', 'x', ('x',)]) + + def test_binomial_cases(self): + cases = [ + (1, 0.1), # p <= 0.5 && n * p <= 30 + (50, 0.9), # p > 0.5 && n * p <= 30 + (100, 0.4), # p <= 0.5 && n * p > 30 + (100, 0.9) # p > 0.5 && n * p > 30 + ] + size = None + + for n, p in cases: + with self.subTest(n=n, p=p): + dist_func = lambda x, size, dtype:\ + x.binomial(n, p, size=size) + self.check_numpy_parity(dist_func, None, + None, size, None, 0) + + def test_binomial_specific_issues(self): + # The algorithm for "binomial" is quite involved. This test contains + # subtests for specific issues reported on the issue tracker. + + # testing specific bugs found in binomial. + with self.subTest("infinite loop issue #9493"): + # This specific generator state caused a "hang" as noted in #9493 + + gen1 = np.random.default_rng(0) + gen2 = np.random.default_rng(0) + + @numba.jit + def foo(gen): + return gen.binomial(700, 0.1, 100) + + got = foo(gen1) + expected = foo.py_func(gen2) + self.assertPreciseEqual(got, expected) + + with self.subTest("issue with midrange value branch #9493/#9734"): + # The use of 301 is specific to trigger use of random_binomial_btpe + # with an input state that caused incorrect values to be computed. + + gen1 = np.random.default_rng(0) + gen2 = np.random.default_rng(0) + + @numba.jit + def foo(gen): + return gen.binomial(301, 0.1, 100) + + got = foo(gen1) + expected = foo.py_func(gen2) + self.assertPreciseEqual(got, expected) + + +class TestGeneratorCaching(TestCase, SerialMixin): + def test_randomgen_caching(self): + nb_rng = np.random.default_rng(1) + np_rng = np.random.default_rng(1) + + numba_func = numba.njit(lambda x: x.random(10), cache=True) + self.assertPreciseEqual(np_rng.random(10), numba_func(nb_rng)) + # Run the function twice to make sure caching doesn't break anything. + self.assertPreciseEqual(np_rng.random(10), numba_func(nb_rng)) + # Check that the function can be retrieved successfully from the cache. + res = run_in_new_process_caching(test_generator_caching) + self.assertEqual(res['exitcode'], 0) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_npdatetime.py b/venv/lib/python3.10/site-packages/numba/tests/test_npdatetime.py new file mode 100644 index 0000000000000000000000000000000000000000..794e2340b323928ebd8eb02213e30aaba24e2d57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_npdatetime.py @@ -0,0 +1,1201 @@ +""" +Test np.datetime64 and np.timedelta64 support. +""" + +# NOTE: datetime64 and timedelta64 ufuncs are tested in test_ufuncs. + + +import contextlib +import itertools +import re +import unittest +import warnings + +import numpy as np + +from numba import jit, vectorize, njit +from numba.np.numpy_support import numpy_version +from numba.core import types, config +from numba.core.errors import TypingError +from numba.tests.support import TestCase, tag, skip_parfors_unsupported +from numba.np import npdatetime_helpers, numpy_support + +TIMEDELTA_M = np.dtype('timedelta64[M]') +TIMEDELTA_Y = np.dtype('timedelta64[Y]') + +def value_unit(val): + ty = numpy_support.from_dtype(val.dtype) + return ty.unit + + +date_units = ('Y', 'M') +time_units = ('W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as') +# All except generic ("") +all_units = date_units + time_units + + +def add_usecase(x, y): + return x + y + +def sub_usecase(x, y): + return x - y + +def mul_usecase(x, y): + return x * y + +def div_usecase(x, y): + return x / y + +def floordiv_usecase(x, y): + return x // y + +def eq_usecase(x, y): + return x == y + +def ne_usecase(x, y): + return x != y + +def lt_usecase(x, y): + return x < y + +def le_usecase(x, y): + return x <= y + +def gt_usecase(x, y): + return x > y + +def ge_usecase(x, y): + return x >= y + +def pos_usecase(x): + return +x + +def neg_usecase(x): + return -x + +def abs_usecase(x): + return abs(x) + +def hash_usecase(x): + return hash(x) + +def min_usecase(x, y): + return min(x, y) + +def max_usecase(x, y): + return max(x, y) + +def int_cast_usecase(x): + return int(x) + +def make_add_constant(const): + def add_constant(x): + return x + const + return add_constant + + +class TestModuleHelpers(TestCase): + """ + Test the various helpers in numba.npdatetime_helpers. + """ + + def test_can_cast_timedelta(self): + f = npdatetime_helpers.can_cast_timedelta_units + for a, b in itertools.product(date_units, time_units): + self.assertFalse(f(a, b), (a, b)) + self.assertFalse(f(b, a), (a, b)) + for unit in all_units: + self.assertFalse(f(unit, '')) + self.assertTrue(f('', unit)) + for unit in all_units + ('',): + self.assertTrue(f(unit, unit)) + + def check_units_group(group): + for i, a in enumerate(group): + for b in group[:i]: + # large into smaller is ok + self.assertTrue(f(b, a)) + # small into larger is not + self.assertFalse(f(a, b)) + + check_units_group(date_units) + check_units_group(time_units) + + def test_timedelta_conversion(self): + f = npdatetime_helpers.get_timedelta_conversion_factor + for unit in all_units + ('',): + self.assertEqual(f(unit, unit), 1) + for unit in all_units: + self.assertEqual(f('', unit), 1) + for a, b in itertools.product(time_units, date_units): + self.assertIs(f(a, b), None) + self.assertIs(f(b, a), None) + + def check_units_group(group): + for i, a in enumerate(group): + for b in group[:i]: + self.assertGreater(f(b, a), 1, (b, a)) + self.assertIs(f(a, b), None) + + check_units_group(date_units) + check_units_group(time_units) + + # Check some hand-picked values + self.assertEqual(f('Y', 'M'), 12) + self.assertEqual(f('W', 'h'), 24 * 7) + self.assertEqual(f('W', 'm'), 24 * 7 * 60) + self.assertEqual(f('W', 'us'), 24 * 7 * 3600 * 1000 * 1000) + + def test_datetime_timedelta_scaling(self): + f = npdatetime_helpers.get_datetime_timedelta_conversion + def check_error(dt_unit, td_unit): + with self.assertRaises(RuntimeError): + f(dt_unit, td_unit) + # Cannot combine a Y or M timedelta64 with a finer-grained datetime64 + for dt_unit, td_unit in itertools.product(time_units, date_units): + check_error(dt_unit, td_unit) + # Sanity check that all other unit pairs can be converted, we'll + # check individual results below + for dt_unit, td_unit in itertools.product(time_units, time_units): + f(dt_unit, td_unit) + for dt_unit, td_unit in itertools.product(date_units, time_units): + f(dt_unit, td_unit) + for dt_unit, td_unit in itertools.product(date_units, date_units): + f(dt_unit, td_unit) + # No-op conversions + for unit in all_units: + self.assertEqual(f(unit, unit), (unit, 1, 1)) + self.assertEqual(f(unit, ''), (unit, 1, 1)) + self.assertEqual(f('', unit), ('', 1, 1)) + self.assertEqual(f('', ''), ('', 1, 1)) + # "Regular" values + self.assertEqual(f('Y', 'M'), ('M', 12, 1)) + self.assertEqual(f('M', 'Y'), ('M', 1, 12)) + self.assertEqual(f('W', 'D'), ('D', 7, 1)) + self.assertEqual(f('D', 'W'), ('D', 1, 7)) + self.assertEqual(f('W', 's'), ('s', 7 * 24 * 3600, 1)) + self.assertEqual(f('s', 'W'), ('s', 1, 7 * 24 * 3600)) + self.assertEqual(f('s', 'as'), ('as', 1000 ** 6, 1)) + self.assertEqual(f('as', 's'), ('as', 1, 1000 ** 6)) + # "Interesting" values + self.assertEqual(f('Y', 'D'), ('D', 97 + 400 * 365, 400)) + self.assertEqual(f('Y', 'W'), ('W', 97 + 400 * 365, 400 * 7)) + self.assertEqual(f('M', 'D'), ('D', 97 + 400 * 365, 400 * 12)) + self.assertEqual(f('M', 'W'), ('W', 97 + 400 * 365, 400 * 12 * 7)) + self.assertEqual(f('Y', 's'), ('s', (97 + 400 * 365) * 24 * 3600, 400)) + self.assertEqual(f('M', 's'), ('s', (97 + 400 * 365) * 24 * 3600, 400 * 12)) + + def test_combine_datetime_timedelta_units(self): + f = npdatetime_helpers.combine_datetime_timedelta_units + for unit in all_units: + self.assertEqual(f(unit, unit), unit) + self.assertEqual(f('', unit), unit) + self.assertEqual(f(unit, ''), unit) + self.assertEqual(f('', ''), '') + for dt_unit, td_unit in itertools.product(time_units, date_units): + self.assertIs(f(dt_unit, td_unit), None) + for dt_unit, td_unit in itertools.product(date_units, time_units): + self.assertEqual(f(dt_unit, td_unit), td_unit) + + def test_same_kind(self): + f = npdatetime_helpers.same_kind + for u in all_units: + self.assertTrue(f(u, u)) + A = ('Y', 'M', 'W', 'D') + B = ('h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', 'as') + for a, b in itertools.product(A, A): + self.assertTrue(f(a, b)) + for a, b in itertools.product(B, B): + self.assertTrue(f(a, b)) + for a, b in itertools.product(A, B): + self.assertFalse(f(a, b)) + self.assertFalse(f(b, a)) + + +TD = np.timedelta64 +DT = np.datetime64 + + +class TestMiscCompiling(TestCase): + + def test_jit_explicit_signature(self): + def _check_explicit_signature(sig): + f = jit(sig, nopython=True)(add_usecase) + # Just a sanity check + args = DT(1, 'ms'), TD(2, 'us') + expected = add_usecase(*args) + self.assertPreciseEqual(f(*args), expected) + + # Test passing the signature in object form + sig = types.NPDatetime('us')(types.NPDatetime('ms'), types.NPTimedelta('us')) + _check_explicit_signature(sig) + # Same with the signature in string form + sig = "NPDatetime('us')(NPDatetime('ms'), NPTimedelta('us'))" + _check_explicit_signature(sig) + + def test_vectorize_explicit_signature(self): + def _check_explicit_signature(sig): + f = vectorize([sig], nopython=True)(mul_usecase) + # This isn't really right but we can't do better than this, + # since Numpy's ufuncs don't store the metadata of return types. + # Related to https://github.com/numpy/numpy/issues/5429 + self.assertPreciseEqual(f(TD(2), 3), TD(6)) + + # Test passing the signature in object form (issue #917) + sig = types.NPTimedelta('s')(types.NPTimedelta('s'), types.int64) + _check_explicit_signature(sig) + # Same with the signature in string form + sig = "NPTimedelta('s')(NPTimedelta('s'), int64)" + _check_explicit_signature(sig) + + def test_constant_datetime(self): + def check(const): + pyfunc = make_add_constant(const) + f = jit(nopython=True)(pyfunc) + x = TD(4, 'D') + expected = pyfunc(x) + self.assertPreciseEqual(f(x), expected) + check(DT('2001-01-01')) + check(DT('NaT', 'D')) + + def test_constant_timedelta(self): + def check(const): + pyfunc = make_add_constant(const) + f = jit(nopython=True)(pyfunc) + x = TD(4, 'D') + expected = pyfunc(x) + self.assertPreciseEqual(f(x), expected) + check(TD(4, 'D')) + check(TD(-4, 'D')) + check(TD('NaT', 'D')) + + +class TestTimedeltaArithmetic(TestCase): + + jitargs = dict(forceobj=True) + + def jit(self, pyfunc): + return jit(**self.jitargs)(pyfunc) + + def test_add(self): + f = self.jit(add_usecase) + def check(a, b, expected): + self.assertPreciseEqual(f(a, b), expected) + self.assertPreciseEqual(f(b, a), expected) + + check(TD(1), TD(2), TD(3)) + check(TD(1, 's'), TD(2, 's'), TD(3, 's')) + # Implicit unit promotion + check(TD(1, 's'), TD(2, 'us'), TD(1000002, 'us')) + check(TD(1, 'W'), TD(2, 'D'), TD(9, 'D')) + # NaTs + check(TD('NaT'), TD(1), TD('NaT')) + check(TD('NaT', 's'), TD(1, 'D'), TD('NaT', 's')) + check(TD('NaT', 's'), TD(1, 'ms'), TD('NaT', 'ms')) + # Cannot add days and months + with self.assertRaises((TypeError, TypingError)): + f(TD(1, 'M'), TD(1, 'D')) + + def test_sub(self): + f = self.jit(sub_usecase) + def check(a, b, expected): + self.assertPreciseEqual(f(a, b), expected) + self.assertPreciseEqual(f(b, a), -expected) + + check(TD(3), TD(2), TD(1)) + check(TD(3, 's'), TD(2, 's'), TD(1, 's')) + # Implicit unit promotion + check(TD(3, 's'), TD(2, 'us'), TD(2999998, 'us')) + check(TD(1, 'W'), TD(2, 'D'), TD(5, 'D')) + # NaTs + check(TD('NaT'), TD(1), TD('NaT')) + check(TD('NaT', 's'), TD(1, 'D'), TD('NaT', 's')) + check(TD('NaT', 's'), TD(1, 'ms'), TD('NaT', 'ms')) + # Cannot sub days to months + with self.assertRaises((TypeError, TypingError)): + f(TD(1, 'M'), TD(1, 'D')) + + def test_mul(self): + f = self.jit(mul_usecase) + def check(a, b, expected): + self.assertPreciseEqual(f(a, b), expected) + self.assertPreciseEqual(f(b, a), expected) + + # non-int64 int * timedelta64 + check(TD(3), np.uint32(2), TD(6)) + # int * timedelta64 + check(TD(3), 2, TD(6)) + check(TD(3, 'ps'), 2, TD(6, 'ps')) + check(TD('NaT', 'ps'), 2, TD('NaT', 'ps')) + # float * timedelta64 + check(TD(7), 1.5, TD(10)) + check(TD(-7), 1.5, TD(-10)) + check(TD(7, 'ps'), -1.5, TD(-10, 'ps')) + check(TD(-7), -1.5, TD(10)) + check(TD('NaT', 'ps'), -1.5, TD('NaT', 'ps')) + check(TD(7, 'ps'), float('nan'), TD('NaT', 'ps')) + # wraparound on overflow + check(TD(2**62, 'ps'), 16, TD(0, 'ps')) + + def test_div(self): + div = self.jit(div_usecase) + floordiv = self.jit(floordiv_usecase) + def check(a, b, expected): + self.assertPreciseEqual(div(a, b), expected) + self.assertPreciseEqual(floordiv(a, b), expected) + + # timedelta64 / non-int64 int + check(TD(-3, 'ps'), np.uint32(2), TD(-1, 'ps')) + # timedelta64 / int + check(TD(3), 2, TD(1)) + check(TD(-3, 'ps'), 2, TD(-1, 'ps')) + check(TD('NaT', 'ps'), 2, TD('NaT', 'ps')) + check(TD(3, 'ps'), 0, TD('NaT', 'ps')) + check(TD('NaT', 'ps'), 0, TD('NaT', 'ps')) + # timedelta64 / float + check(TD(7), 0.5, TD(14)) + check(TD(-7, 'ps'), 1.5, TD(-4, 'ps')) + check(TD('NaT', 'ps'), 2.5, TD('NaT', 'ps')) + check(TD(3, 'ps'), 0.0, TD('NaT', 'ps')) + check(TD('NaT', 'ps'), 0.0, TD('NaT', 'ps')) + check(TD(3, 'ps'), float('nan'), TD('NaT', 'ps')) + check(TD('NaT', 'ps'), float('nan'), TD('NaT', 'ps')) + + def test_homogeneous_div(self): + div = self.jit(div_usecase) + def check(a, b, expected): + self.assertPreciseEqual(div(a, b), expected) + + # timedelta64 / timedelta64 + check(TD(7), TD(3), 7. / 3.) + check(TD(7, 'us'), TD(3, 'ms'), 7. / 3000.) + check(TD(7, 'ms'), TD(3, 'us'), 7000. / 3.) + check(TD(7), TD(0), float('+inf')) + check(TD(-7), TD(0), float('-inf')) + check(TD(0), TD(0), float('nan')) + # NaTs + check(TD('nat'), TD(3), float('nan')) + check(TD(3), TD('nat'), float('nan')) + check(TD('nat'), TD(0), float('nan')) + # Cannot div months with days + with self.assertRaises((TypeError, TypingError)): + div(TD(1, 'M'), TD(1, 'D')) + + def test_eq_ne(self): + eq = self.jit(eq_usecase) + ne = self.jit(ne_usecase) + def check(a, b, expected): + expected_val = expected + not_expected_val = not expected + + # all NaT comparisons are False, including NaT==NaT, + # conversely != is True + if np.isnat(a) or np.isnat(a): + expected_val = False + not_expected_val = True + + self.assertPreciseEqual(eq(a, b), expected_val) + self.assertPreciseEqual(eq(b, a), expected_val) + self.assertPreciseEqual(ne(a, b), not_expected_val) + self.assertPreciseEqual(ne(b, a), not_expected_val) + + check(TD(1), TD(2), False) + check(TD(1), TD(1), True) + check(TD(1, 's'), TD(2, 's'), False) + check(TD(1, 's'), TD(1, 's'), True) + check(TD(2000, 's'), TD(2, 's'), False) + check(TD(2000, 'ms'), TD(2, 's'), True) + check(TD(1, 'Y'), TD(12, 'M'), True) + # NaTs + check(TD('Nat'), TD('Nat'), True) + check(TD('Nat', 'ms'), TD('Nat', 's'), True) + check(TD('Nat'), TD(1), False) + # Incompatible units => timedeltas compare unequal + if numpy_version < (1, 25): + check(TD(1, 'Y'), TD(365, 'D'), False) + check(TD(1, 'Y'), TD(366, 'D'), False) + # ... except when both are NaT! + check(TD('NaT', 'W'), TD('NaT', 'D'), True) + else: + # incompatible units raise + # The exception is different depending on Python mode + with self.assertRaises((TypeError, TypingError)): + eq(TD(1, 'Y'), TD(365, 'D')) + with self.assertRaises((TypeError, TypingError)): + ne(TD(1, 'Y'), TD(365, 'D')) + + def test_lt_ge(self): + lt = self.jit(lt_usecase) + ge = self.jit(ge_usecase) + def check(a, b, expected): + expected_val = expected + not_expected_val = not expected + + # since np 1.16 all NaT magnitude comparisons including equality + # are False (as NaT == NaT is now False) + if np.isnat(a) or np.isnat(a): + expected_val = False + not_expected_val = False + + self.assertPreciseEqual(lt(a, b), expected_val) + self.assertPreciseEqual(ge(a, b), not_expected_val) + + check(TD(1), TD(2), True) + check(TD(1), TD(1), False) + check(TD(2), TD(1), False) + check(TD(1, 's'), TD(2, 's'), True) + check(TD(1, 's'), TD(1, 's'), False) + check(TD(2, 's'), TD(1, 's'), False) + check(TD(1, 'm'), TD(61, 's'), True) + check(TD(1, 'm'), TD(60, 's'), False) + # NaTs + check(TD('Nat'), TD('Nat'), False) + check(TD('Nat', 'ms'), TD('Nat', 's'), False) + check(TD('Nat'), TD(-(2**63)+1), True) + # Incompatible units => exception raised + with self.assertRaises((TypeError, TypingError)): + lt(TD(1, 'Y'), TD(365, 'D')) + with self.assertRaises((TypeError, TypingError)): + ge(TD(1, 'Y'), TD(365, 'D')) + # ... even when both are NaT + with self.assertRaises((TypeError, TypingError)): + lt(TD('NaT', 'Y'), TD('NaT', 'D')) + with self.assertRaises((TypeError, TypingError)): + ge(TD('NaT', 'Y'), TD('NaT', 'D')) + + def test_le_gt(self): + le = self.jit(le_usecase) + gt = self.jit(gt_usecase) + def check(a, b, expected): + expected_val = expected + not_expected_val = not expected + + # since np 1.16 all NaT magnitude comparisons including equality + # are False (as NaT == NaT is now False) + if np.isnat(a) or np.isnat(a): + expected_val = False + not_expected_val = False + self.assertPreciseEqual(le(a, b), expected_val) + self.assertPreciseEqual(gt(a, b), not_expected_val) + + check(TD(1), TD(2), True) + check(TD(1), TD(1), True) + check(TD(2), TD(1), False) + check(TD(1, 's'), TD(2, 's'), True) + check(TD(1, 's'), TD(1, 's'), True) + check(TD(2, 's'), TD(1, 's'), False) + check(TD(1, 'm'), TD(61, 's'), True) + check(TD(1, 'm'), TD(60, 's'), True) + check(TD(1, 'm'), TD(59, 's'), False) + # NaTs + check(TD('Nat'), TD('Nat'), True) + check(TD('Nat', 'ms'), TD('Nat', 's'), True) + check(TD('Nat'), TD(-(2**63)+1), True) + # Incompatible units => exception raised + with self.assertRaises((TypeError, TypingError)): + le(TD(1, 'Y'), TD(365, 'D')) + with self.assertRaises((TypeError, TypingError)): + gt(TD(1, 'Y'), TD(365, 'D')) + # ... even when both are NaT + with self.assertRaises((TypeError, TypingError)): + le(TD('NaT', 'Y'), TD('NaT', 'D')) + with self.assertRaises((TypeError, TypingError)): + gt(TD('NaT', 'Y'), TD('NaT', 'D')) + + def test_pos(self): + pos = self.jit(pos_usecase) + def check(a): + self.assertPreciseEqual(pos(a), +a) + + check(TD(3)) + check(TD(-4)) + check(TD(3, 'ms')) + check(TD(-4, 'ms')) + check(TD('NaT')) + check(TD('NaT', 'ms')) + + def test_neg(self): + neg = self.jit(neg_usecase) + def check(a): + self.assertPreciseEqual(neg(a), -a) + + check(TD(3)) + check(TD(-4)) + check(TD(3, 'ms')) + check(TD(-4, 'ms')) + check(TD('NaT')) + check(TD('NaT', 'ms')) + + def test_abs(self): + f = self.jit(abs_usecase) + def check(a): + self.assertPreciseEqual(f(a), abs(a)) + + check(TD(3)) + check(TD(-4)) + check(TD(3, 'ms')) + check(TD(-4, 'ms')) + check(TD('NaT')) + check(TD('NaT', 'ms')) + + def test_hash(self): + f = self.jit(hash_usecase) + def check(a): + if numpy_version >= (2, 2): + # Generic timedeltas (those without a unit) + # are no longer hashable beyond NumPy 2.2 + # Non-generic timedeltas will have dtype name + # as timedelta64[] + if a.dtype.name == 'timedelta64': + return + + # If the function is not being compiled in objmode + # then the hash should be equal to the hash of the + # integer representation of the timedelta + if self.jitargs.get('nopython', False): + self.assertPreciseEqual(f(a), a.astype(int)) + else: + self.assertPreciseEqual(f(a), hash(a)) + else: + self.assertPreciseEqual(f(a), hash(a)) + + TD_CASES = ((3,), (-4,), (3, 'ms'), (-4, 'ms'), (27, 'D'), + (2, 'D'), (2, 'W'), (2, 'Y'), (3, 'W'), + (365, 'D'), (10000, 'D'), (-10000, 'D'), + ('NaT',), ('NaT', 'ms'), ('NaT', 'D'), (-1,)) + DT_CASES = (('2014',), ('2016',), ('2000',), ('2014-02',), + ('2014-03',), ('2014-04',), ('2016-02',), ('2000-12-31',), + ('2014-01-16',), ('2014-01-05',), ('2014-01-07',), + ('2014-01-06',), ('2014-02-02',), ('2014-02-27',), + ('2014-02-16',), ('2014-03-01',), ('2000-01-01T01:02:03.002Z',), + ('2000-01-01T01:02:03Z',), ('NaT',)) + + for case, typ in zip(TD_CASES + DT_CASES, + (TD,) * len(TD_CASES) + (DT,) * len(TD_CASES)): + check(typ(*case)) + + if numpy_version >= (2, 2): + with self.assertRaises(ValueError) as raises: + f(TD(3)) + self.assertIn("Can't hash generic timedelta64", str(raises.exception)) + + def _test_min_max(self, usecase): + f = self.jit(usecase) + def check(a, b): + self.assertPreciseEqual(f(a, b), usecase(a, b)) + + for cases in ( + (TD(0), TD(1), TD(2), TD('NaT')), + (TD(0, 's'), TD(1, 's'), TD(2, 's'), TD('NaT', 's')), + ): + for a, b in itertools.product(cases, cases): + check(a, b) + + def test_min(self): + self._test_min_max(min_usecase) + + def test_max(self): + self._test_min_max(max_usecase) + + +class TestTimedeltaArithmeticNoPython(TestTimedeltaArithmetic): + + jitargs = dict(nopython=True) + + def test_int_cast(self): + f = self.jit(int_cast_usecase) + def check(a): + self.assertPreciseEqual(f(a), int(a)) + + for (delta, unit) in ((3, 'ns'), (-4, 'ns'), (30000, 'ns'), + (-40000000, 'ns'), (1, 'Y')): + check(TD(delta, unit).astype('timedelta64[ns]')) + + for time in ('2014', '2016', '2000', '2014-02', '2014-03', '2014-04', + '2016-02', '2000-12-31', '2014-01-16', '2014-01-05', + '2014-01-07', '2014-01-06', '2014-02-02', '2014-02-27', + '2014-02-16', '2014-03-01', '2000-01-01T01:02:03.002Z', + '2000-01-01T01:02:03Z'): + check(DT(time).astype('datetime64[ns]')) + + with self.assertRaises(TypingError, msg=('Only datetime64[ns] can be ' + + 'converted, but got ' + + 'datetime64[y]')): + f(DT('2014')) + + +class TestDatetimeArithmetic(TestCase): + + jitargs = dict(forceobj=True) + + def jit(self, pyfunc): + return jit(**self.jitargs)(pyfunc) + + @contextlib.contextmanager + def silence_numpy_warnings(self): + # Numpy can raise warnings when combining e.g. a generic timedelta64 + # with a non-generic datetime64. + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', + message='Implicitly casting between incompatible kinds', + category=DeprecationWarning) + yield + + def test_add_sub_timedelta(self): + """ + Test `datetime64 + timedelta64` and `datetime64 - timedelta64`. + """ + add = self.jit(add_usecase) + sub = self.jit(sub_usecase) + def check(a, b, expected): + with self.silence_numpy_warnings(): + self.assertPreciseEqual(add(a, b), expected, (a, b)) + self.assertPreciseEqual(add(b, a), expected, (a, b)) + self.assertPreciseEqual(sub(a, -b), expected, (a, b)) + # Did we get it right? + self.assertPreciseEqual(a + b, expected) + + # Y + ... + check(DT('2014'), TD(2, 'Y'), DT('2016')) + check(DT('2014'), TD(2, 'M'), DT('2014-03')) + check(DT('2014'), TD(3, 'W'), DT('2014-01-16', 'W')) + check(DT('2014'), TD(4, 'D'), DT('2014-01-05')) + check(DT('2000'), TD(365, 'D'), DT('2000-12-31')) + # M + ... + check(DT('2014-02'), TD(2, 'Y'), DT('2016-02')) + check(DT('2014-02'), TD(2, 'M'), DT('2014-04')) + check(DT('2014-02'), TD(2, 'D'), DT('2014-02-03')) + # W + ... + check(DT('2014-01-07', 'W'), TD(2, 'W'), DT('2014-01-16', 'W')) + # D + ... + check(DT('2014-02-02'), TD(27, 'D'), DT('2014-03-01')) + check(DT('2012-02-02'), TD(27, 'D'), DT('2012-02-29')) + check(DT('2012-02-02'), TD(2, 'W'), DT('2012-02-16')) + # s + ... + check(DT('2000-01-01T01:02:03Z'), TD(2, 'h'), DT('2000-01-01T03:02:03Z')) + check(DT('2000-01-01T01:02:03Z'), TD(2, 'ms'), DT('2000-01-01T01:02:03.002Z')) + # More thorough checking with leap years and faraway years + for dt_str in ('600', '601', '604', '801', + '1900', '1904', '2200', '2300', '2304', + '2400', '6001'): + for dt_suffix in ('', '-01', '-12'): + dt = DT(dt_str + dt_suffix) + for td in [TD(2, 'D'), TD(2, 'W'), + TD(100, 'D'), TD(10000, 'D'), + TD(-100, 'D'), TD(-10000, 'D'), + TD(100, 'W'), TD(10000, 'W'), + TD(-100, 'W'), TD(-10000, 'W'), + TD(100, 'M'), TD(10000, 'M'), + TD(-100, 'M'), TD(-10000, 'M')]: + self.assertEqual(add(dt, td), dt + td, (dt, td)) + self.assertEqual(add(td, dt), dt + td, (dt, td)) + self.assertEqual(sub(dt, -td), dt + td, (dt, td)) + + # NaTs + check(DT('NaT'), TD(2), DT('NaT')) + check(DT('NaT', 's'), TD(2, 'h'), DT('NaT', 's')) + check(DT('NaT', 's'), TD(2, 'ms'), DT('NaT', 'ms')) + check(DT('2014'), TD('NaT', 'W'), DT('NaT', 'W')) + check(DT('2014-01-01'), TD('NaT', 'W'), DT('NaT', 'D')) + check(DT('NaT', 's'), TD('NaT', 'ms'), DT('NaT', 'ms')) + + # Cannot add datetime days and timedelta months or years + for f in (add, sub): + with self.assertRaises((TypeError, TypingError)): + f(DT(1, '2014-01-01'), TD(1, 'Y')) + with self.assertRaises((TypeError, TypingError)): + f(DT(1, '2014-01-01'), TD(1, 'M')) + + def datetime_samples(self): + dt_years = ['600', '601', '604', '1968', '1969', '1973', + '2000', '2004', '2005', '2100', '2400', '2401'] + dt_suffixes = ['', '-01', '-12', '-02-28', '-12-31', + '-01-05T12:30:56Z', '-01-05T12:30:56.008Z'] + dts = [DT(a + b) for (a, b) in itertools.product(dt_years, dt_suffixes)] + dts += [DT(s, 'W') for s in dt_years] + return dts + + def test_datetime_difference(self): + """ + Test `datetime64 - datetime64`. + """ + sub = self.jit(sub_usecase) + def check(a, b, expected=None): + with self.silence_numpy_warnings(): + self.assertPreciseEqual(sub(a, b), a - b, (a, b)) + self.assertPreciseEqual(sub(b, a), b - a, (a, b)) + # Did we get it right? + self.assertPreciseEqual(a - b, expected) + + check(DT('2014'), DT('2017'), TD(-3, 'Y')) + check(DT('2014-02'), DT('2017-01'), TD(-35, 'M')) + check(DT('2014-02-28'), DT('2015-03-01'), TD(-366, 'D')) + # NaTs + check(DT('NaT', 'M'), DT('2000'), TD('NaT', 'M')) + check(DT('NaT', 'M'), DT('2000-01-01'), TD('NaT', 'D')) + check(DT('NaT'), DT('NaT'), TD('NaT')) + # Test many more values + with self.silence_numpy_warnings(): + dts = self.datetime_samples() + for a, b in itertools.product(dts, dts): + if (not npdatetime_helpers.same_kind(value_unit(a), value_unit(b))): + continue + self.assertPreciseEqual(sub(a, b), a - b, (a, b)) + + def test_comparisons(self): + # Test all datetime comparisons all at once + eq = self.jit(eq_usecase) + ne = self.jit(ne_usecase) + lt = self.jit(lt_usecase) + le = self.jit(le_usecase) + gt = self.jit(gt_usecase) + ge = self.jit(ge_usecase) + + def check_eq(a, b, expected): + expected_val = expected + not_expected_val = not expected + + # since np 1.16 all NaT comparisons bar != are False, including + # NaT==NaT + if np.isnat(a) or np.isnat(b): + expected_val = False + not_expected_val = True + self.assertFalse(le(a, b), (a, b)) + self.assertFalse(ge(a, b), (a, b)) + self.assertFalse(le(b, a), (a, b)) + self.assertFalse(ge(b, a), (a, b)) + self.assertFalse(lt(a, b), (a, b)) + self.assertFalse(gt(a, b), (a, b)) + self.assertFalse(lt(b, a), (a, b)) + self.assertFalse(gt(b, a), (a, b)) + + with self.silence_numpy_warnings(): + self.assertPreciseEqual(eq(a, b), expected_val, (a, b, expected)) + self.assertPreciseEqual(eq(b, a), expected_val, (a, b, expected)) + self.assertPreciseEqual(ne(a, b), not_expected_val, (a, b, expected)) + self.assertPreciseEqual(ne(b, a), not_expected_val, (a, b, expected)) + if expected_val: + # If equal, then equal-ordered comparisons are true + self.assertTrue(le(a, b), (a, b)) + self.assertTrue(ge(a, b), (a, b)) + self.assertTrue(le(b, a), (a, b)) + self.assertTrue(ge(b, a), (a, b)) + # and strictly ordered comparisons are false + self.assertFalse(lt(a, b), (a, b)) + self.assertFalse(gt(a, b), (a, b)) + self.assertFalse(lt(b, a), (a, b)) + self.assertFalse(gt(b, a), (a, b)) + # Did we get it right? + self.assertPreciseEqual(a == b, expected_val) + + def check_lt(a, b, expected): + expected_val = expected + not_expected_val = not expected + + # since np 1.16 all NaT magnitude comparisons including equality + # are False (as NaT == NaT is now False) + if np.isnat(a) or np.isnat(b): + expected_val = False + not_expected_val = False + + with self.silence_numpy_warnings(): + lt = self.jit(lt_usecase) + self.assertPreciseEqual(lt(a, b), expected_val, (a, b, expected)) + self.assertPreciseEqual(gt(b, a), expected_val, (a, b, expected)) + self.assertPreciseEqual(ge(a, b), not_expected_val, (a, b, expected)) + self.assertPreciseEqual(le(b, a), not_expected_val, (a, b, expected)) + if expected_val: + # If true, then values are not equal + check_eq(a, b, False) + # Did we get it right? + self.assertPreciseEqual(a < b, expected_val) + + check_eq(DT('2014'), DT('2017'), False) + check_eq(DT('2014'), DT('2014-01'), True) + check_eq(DT('2014'), DT('2014-01-01'), True) + check_eq(DT('2014'), DT('2014-01-01', 'W'), True) + check_eq(DT('2014-01'), DT('2014-01-01', 'W'), True) + # Yes, it's not transitive + check_eq(DT('2014-01-01'), DT('2014-01-01', 'W'), False) + check_eq(DT('2014-01-02'), DT('2014-01-06', 'W'), True) + # with times + check_eq(DT('2014-01-01T00:01:00Z', 's'), + DT('2014-01-01T00:01Z', 'm'), True) + check_eq(DT('2014-01-01T00:01:01Z', 's'), + DT('2014-01-01T00:01Z', 'm'), False) + # NaTs + check_lt(DT('NaT', 'Y'), DT('2017'), True) + check_eq(DT('NaT'), DT('NaT'), True) + + # Check comparison between various units + dts = self.datetime_samples() + for a in dts: + # Take a number of smaller units + a_unit = a.dtype.str.split('[')[1][:-1] + i = all_units.index(a_unit) + units = all_units[i:i+6] + for unit in units: + # Force conversion + b = a.astype('M8[%s]' % unit) + if (not npdatetime_helpers.same_kind(value_unit(a), + value_unit(b))): + continue + check_eq(a, b, True) + check_lt(a, b + np.timedelta64(1, unit), True) + check_lt(b - np.timedelta64(1, unit), a, True) + + def _test_min_max(self, usecase): + f = self.jit(usecase) + def check(a, b): + self.assertPreciseEqual(f(a, b), usecase(a, b)) + + for cases in ( + (DT(0, 'ns'), DT(1, 'ns'), DT(2, 'ns'), DT('NaT', 'ns')), + (DT(0, 's'), DT(1, 's'), DT(2, 's'), DT('NaT', 's')), + ): + for a, b in itertools.product(cases, cases): + check(a, b) + + def test_min(self): + self._test_min_max(min_usecase) + + def test_max(self): + self._test_min_max(max_usecase) + +class TestDatetimeArithmeticNoPython(TestDatetimeArithmetic): + + jitargs = dict(nopython=True) + + +class TestMetadataScalingFactor(TestCase): + """ + Tests than non-1 scaling factors are not supported in datetime64 + and timedelta64 dtypes. + """ + + def test_datetime(self, jitargs={'forceobj':True}): + eq = jit(**jitargs)(eq_usecase) + self.assertTrue(eq(DT('2014', '10Y'), DT('2010'))) + + def test_datetime_npm(self): + with self.assertTypingError(): + self.test_datetime(jitargs={'nopython':True}) + + def test_timedelta(self, jitargs={'forceobj':True}): + eq = jit(**jitargs)(eq_usecase) + self.assertTrue(eq(TD(2, '10Y'), TD(20, 'Y'))) + + def test_timedelta_npm(self): + with self.assertTypingError(): + self.test_timedelta(jitargs={'nopython':True}) + + +class TestDatetimeDeltaOps(TestCase): + def test_div(self): + """ + Test the division of a timedelta by numeric types + """ + def arr_div(a, b): + return a / b + + py_func = arr_div + cfunc = njit(arr_div) + test_cases = [ + (np.ones(3, TIMEDELTA_M), np.ones(3, TIMEDELTA_M)), + (np.ones(3, TIMEDELTA_M), np.ones(3, TIMEDELTA_Y)), + (np.ones(3, TIMEDELTA_Y), np.ones(3, TIMEDELTA_M)), + (np.ones(3, TIMEDELTA_Y), np.ones(3, TIMEDELTA_Y)), + (np.ones(3, TIMEDELTA_M), 1), + (np.ones(3, TIMEDELTA_M), np.ones(3, np.int64)), + (np.ones(3, TIMEDELTA_M), np.ones(3, np.float64)), + ] + for a, b in test_cases: + self.assertTrue(np.array_equal(py_func(a, b), cfunc(a, b))) + + +class TestDatetimeArrayOps(TestCase): + + def _test_td_add_or_sub(self, operation, parallel): + """ + Test the addition/subtraction of a datetime array with a timedelta type + """ + def impl(a, b): + return operation(a, b) + + arr_one = np.array([ + np.datetime64("2011-01-01"), + np.datetime64("1971-02-02"), + np.datetime64("2021-03-03"), + np.datetime64("2004-12-07"), + ], dtype="datetime64[ns]") + arr_two = np.array([ + np.datetime64("2011-01-01"), + np.datetime64("1971-02-02"), + np.datetime64("2021-03-03"), + np.datetime64("2004-12-07"), + ], dtype="datetime64[D]") + py_func = impl + cfunc = njit(parallel=parallel)(impl) + test_cases = [ + (arr_one, np.timedelta64(1000)), + (arr_two, np.timedelta64(1000)), + (arr_one, np.timedelta64(-54557)), + (arr_two, np.timedelta64(-54557)), + ] + # np.add is commutative so test the reversed order + if operation is np.add: + test_cases.extend([ + (np.timedelta64(1000), arr_one), + (np.timedelta64(1000), arr_two), + (np.timedelta64(-54557), arr_one), + (np.timedelta64(-54557), arr_two), + ]) + for a, b in test_cases: + self.assertTrue(np.array_equal(py_func(a, b), cfunc(a, b))) + + def test_add_td(self): + self._test_td_add_or_sub(np.add, False) + + @skip_parfors_unsupported + def test_add_td_parallel(self): + self._test_td_add_or_sub(np.add, True) + + def test_sub_td(self): + self._test_td_add_or_sub(np.subtract, False) + + @skip_parfors_unsupported + def test_sub_td_parallel(self): + self._test_td_add_or_sub(np.subtract, True) + + def _test_add_sub_td_no_match(self, operation): + """ + Tests that attempting to add/sub a datetime64 and timedelta64 + with types that cannot be cast raises a reasonable exception. + """ + @njit + def impl(a, b): + return operation(a, b) + + fname = operation.__name__ + expected = re.escape((f"ufunc '{fname}' is not supported between " + "datetime64[ns] and timedelta64[M]")) + with self.assertRaisesRegex((TypingError, TypeError), expected): + impl( + np.array([np.datetime64("2011-01-01"),], + dtype="datetime64[ns]"), + np.timedelta64(1000,'M') + ) + + def test_add_td_no_match(self): + self._test_add_sub_td_no_match(np.add) + + def test_sub_td_no_match(self): + self._test_add_sub_td_no_match(np.subtract) + + def _get_testcases(self): + test_cases = [ + np.array([ + DT(0, "ns"), + DT(1, "ns"), + DT(2, "ns"), + DT(3, "ns"), + ]), + np.array([ + DT("2011-01-01", "ns"), + DT("1971-02-02", "ns"), + DT("1900-01-01", "ns"), + DT("2021-03-03", "ns"), + DT("2004-12-07", "ns"), + ]), + np.array([ + DT("2011-01-01", "D"), + DT("1971-02-02", "D"), + DT("1900-01-01", "D"), + DT("2021-03-03", "D"), + DT("2004-12-07", "D"), + ]), + np.array([ + DT("2011-01-01", "ns"), + DT("1971-02-02", "ns"), + DT("1900-01-01", "ns"), + DT("2021-03-03", "ns"), + DT("2004-12-07", "ns"), + DT("NaT", "ns"), + ]), + np.array([ + DT("NaT", "ns"), + DT("2011-01-01", "ns"), + DT("1971-02-02", "ns"), + DT("1900-01-01", "ns"), + DT("2021-03-03", "ns"), + DT("2004-12-07", "ns"), + ]), + np.array([ + DT("1971-02-02", "ns"), + DT("NaT", "ns"), + ]), + np.array([ + DT("NaT", "ns"), + DT("NaT", "ns"), + DT("NaT", "ns"), + ]), + np.array([ + TD(1, "ns"), + TD(2, "ns"), + TD(3, "ns"), + TD(4, "ns"), + ]), + np.array([ + TD(1, "D"), + TD(2, "D"), + TD(3, "D"), + TD(4, "D"), + ]), + np.array([ + TD("NaT", "ns"), + TD(1, "ns"), + TD(2, "ns"), + TD(3, "ns"), + TD(4, "ns"), + ]), + np.array([ + TD(1, "ns"), + TD(2, "ns"), + TD(3, "ns"), + TD(4, "ns"), + TD("NaT", "ns"), + ]), + np.array([ + TD("NaT", "ns"), + ]), + np.array([ + TD("NaT", "ns"), + TD("NaT", "ns"), + TD("NaT", "ns"), + ]), + ] + return test_cases + + def _test_min_max(self, operation, parallel, method): + if method: + if operation is np.min: + def impl(arr): + return arr.min() + else: + def impl(arr): + return arr.max() + else: + def impl(arr): + return operation(arr) + + py_func = impl + cfunc = njit(parallel=parallel)(impl) + + test_cases = self._get_testcases() + for arr in test_cases: + py_res = py_func(arr) + c_res = cfunc(arr) + if np.isnat(py_res) or np.isnat(c_res): + self.assertTrue(np.isnat(py_res)) + self.assertTrue(np.isnat(c_res)) + else: + self.assertEqual(py_res, c_res) + + def test_min_func(self): + self._test_min_max(min, False, False) + + def test_np_min_func(self): + self._test_min_max(np.min, False, False) + + def test_min_method(self): + self._test_min_max(np.min, False, True) + + def test_max_func(self): + self._test_min_max(max, False, False) + + def test_np_max_func(self): + self._test_min_max(np.max, False, False) + + def test_max_method(self): + self._test_min_max(np.max, False, True) + + @skip_parfors_unsupported + def test_min_func_parallel(self): + self._test_min_max(np.min, True, False) + + @skip_parfors_unsupported + def test_min_method_parallel(self): + self._test_min_max(np.min, True, True) + + @skip_parfors_unsupported + def test_max_func_parallel(self): + self._test_min_max(np.max, True, False) + + @skip_parfors_unsupported + def test_max_method_parallel(self): + self._test_min_max(np.max, True, True) + + def test_searchsorted_datetime(self): + from .test_np_functions import ( + searchsorted, searchsorted_left, searchsorted_right, + ) + pyfunc_list = [searchsorted, searchsorted_left, searchsorted_right] + cfunc_list = [jit(fn) for fn in pyfunc_list] + + def check(pyfunc, cfunc, a, v): + expected = pyfunc(a, v) + got = cfunc(a, v) + self.assertPreciseEqual(expected, got) + + cases = self._get_testcases() + for pyfunc, cfunc in zip(pyfunc_list, cfunc_list): + for arr in cases: + arr = np.sort(arr) + for n in range(1, min(3, arr.size) + 1): + idx = np.random.randint(0, arr.size, n) + vs = arr[idx] + if n == 1: + [v] = vs + check(pyfunc, cfunc, arr, v) + check(pyfunc, cfunc, arr, vs) + + + +class TestDatetimeTypeOps(TestCase): + def test_isinstance_datetime(self): + @njit + def is_complex(a): + return isinstance(a, complex) + @njit + def is_datetime(a): + return isinstance(a, np.datetime64) + @njit + def is_timedelta(a): + return isinstance(a, np.timedelta64) + + dt_a = np.datetime64(1, 'ns') + dt_b = np.datetime64(2, 'ns') + td_c = dt_b - dt_a + + def check(jit_func, x): + with self.subTest(f'{jit_func.__name__}({type(x).__name__})'): + got = jit_func(x) + expect = jit_func.py_func(x) + self.assertEqual(got, expect) + + fns = [ + is_complex, + is_datetime, + is_timedelta, + ] + args = [ + dt_a, + dt_b, + td_c, + ] + for fn, arg in itertools.product(fns, args): + check(fn, arg) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_nrt.py b/venv/lib/python3.10/site-packages/numba/tests/test_nrt.py new file mode 100644 index 0000000000000000000000000000000000000000..f02b48cb711caa7d8c3866c2813342f18d49252e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_nrt.py @@ -0,0 +1,857 @@ +import math +import os +import platform +import sys +import re + +import numpy as np + +from numba import njit +from numba.core import types +from numba.core.runtime import ( + rtsys, + nrtopt, + _nrt_python, + nrt, +) +from numba.core.extending import intrinsic, include_path +from numba.core.typing import signature +from numba.core.imputils import impl_ret_untracked +from llvmlite import ir +import llvmlite.binding as llvm +from numba.core.unsafe.nrt import NRT_get_api + +from numba.tests.support import (EnableNRTStatsMixin, TestCase, temp_directory, + import_dynamic, skip_if_32bit, + skip_unless_cffi, run_in_subprocess) +from numba.core.registry import cpu_target +import unittest + + +linux_only = unittest.skipIf(not sys.platform.startswith('linux'), + 'linux only test') +x86_only = unittest.skipIf(platform.machine() not in ('i386', 'x86_64'), + 'x86 only test') + + +class Dummy(object): + alive = 0 + + def __init__(self): + type(self).alive += 1 + + def __del__(self): + type(self).alive -= 1 + + +class TestNrtMemInfoNotInitialized(unittest.TestCase): + """ + Unit test for checking the use of the NRT fails if the + initialization sequence has not been run. + """ + _numba_parallel_test_ = False + + def test_init_fail(self): + methods = {'library': (), + 'meminfo_new': ((), ()), + 'meminfo_alloc': ((),), + } + + for meth, args in methods.items(): + try: + with self.assertRaises(RuntimeError) as raises: + rtsys._init = False + fn = getattr(rtsys, meth) + fn(*args) + + msg = "Runtime must be initialized before use." + self.assertIn(msg, str(raises.exception)) + finally: + rtsys._init = True + + +class TestNrtMemInfo(unittest.TestCase): + """ + Unit test for core MemInfo functionality + """ + + def setUp(self): + # Reset the Dummy class + Dummy.alive = 0 + # initialize the NRT (in case the tests are run in isolation) + rtsys.initialize(cpu_target.target_context) + super(TestNrtMemInfo, self).setUp() + + def test_meminfo_refct_1(self): + d = Dummy() + self.assertEqual(Dummy.alive, 1) + addr = 0xdeadcafe # some made up location + + mi = rtsys.meminfo_new(addr, d) + self.assertEqual(mi.refcount, 1) + del d + self.assertEqual(Dummy.alive, 1) + mi.acquire() + self.assertEqual(mi.refcount, 2) + self.assertEqual(Dummy.alive, 1) + mi.release() + self.assertEqual(mi.refcount, 1) + del mi + self.assertEqual(Dummy.alive, 0) + + def test_meminfo_refct_2(self): + d = Dummy() + self.assertEqual(Dummy.alive, 1) + addr = 0xdeadcafe # some made up location + + mi = rtsys.meminfo_new(addr, d) + self.assertEqual(mi.refcount, 1) + del d + self.assertEqual(Dummy.alive, 1) + for ct in range(100): + mi.acquire() + self.assertEqual(mi.refcount, 1 + 100) + self.assertEqual(Dummy.alive, 1) + for _ in range(100): + mi.release() + self.assertEqual(mi.refcount, 1) + del mi + self.assertEqual(Dummy.alive, 0) + + def test_fake_memoryview(self): + d = Dummy() + self.assertEqual(Dummy.alive, 1) + addr = 0xdeadcafe # some made up location + + mi = rtsys.meminfo_new(addr, d) + self.assertEqual(mi.refcount, 1) + mview = memoryview(mi) + self.assertEqual(mi.refcount, 1) + self.assertEqual(addr, mi.data) + self.assertFalse(mview.readonly) + self.assertIs(mi, mview.obj) + self.assertTrue(mview.c_contiguous) + self.assertEqual(mview.itemsize, 1) + self.assertEqual(mview.ndim, 1) + del d + del mi + + self.assertEqual(Dummy.alive, 1) + del mview + self.assertEqual(Dummy.alive, 0) + + def test_memoryview(self): + from ctypes import c_uint32, c_void_p, POINTER, cast + + dtype = np.dtype(np.uint32) + bytesize = dtype.itemsize * 10 + mi = rtsys.meminfo_alloc(bytesize, safe=True) + addr = mi.data + c_arr = cast(c_void_p(mi.data), POINTER(c_uint32 * 10)) + # Check 0xCB-filling + for i in range(10): + self.assertEqual(c_arr.contents[i], 0xcbcbcbcb) + + # Init array with ctypes + for i in range(10): + c_arr.contents[i] = i + 1 + mview = memoryview(mi) + self.assertEqual(mview.nbytes, bytesize) + self.assertFalse(mview.readonly) + self.assertIs(mi, mview.obj) + self.assertTrue(mview.c_contiguous) + self.assertEqual(mview.itemsize, 1) + self.assertEqual(mview.ndim, 1) + del mi + arr = np.ndarray(dtype=dtype, shape=mview.nbytes // dtype.itemsize, + buffer=mview) + del mview + # Modify array with NumPy + np.testing.assert_equal(np.arange(arr.size) + 1, arr) + + arr += 1 + + # Check value reflected in ctypes + for i in range(10): + self.assertEqual(c_arr.contents[i], i + 2) + + self.assertEqual(arr.ctypes.data, addr) + del arr + # At this point the memory is zero filled + # We can't check this deterministically because the memory could be + # consumed by another thread. + + def test_buffer(self): + from ctypes import c_uint32, c_void_p, POINTER, cast + + dtype = np.dtype(np.uint32) + bytesize = dtype.itemsize * 10 + mi = rtsys.meminfo_alloc(bytesize, safe=True) + self.assertEqual(mi.refcount, 1) + addr = mi.data + c_arr = cast(c_void_p(addr), POINTER(c_uint32 * 10)) + # Check 0xCB-filling + for i in range(10): + self.assertEqual(c_arr.contents[i], 0xcbcbcbcb) + + # Init array with ctypes + for i in range(10): + c_arr.contents[i] = i + 1 + + arr = np.ndarray(dtype=dtype, shape=bytesize // dtype.itemsize, + buffer=mi) + self.assertEqual(mi.refcount, 1) + del mi + # Modify array with NumPy + np.testing.assert_equal(np.arange(arr.size) + 1, arr) + + arr += 1 + + # Check value reflected in ctypes + for i in range(10): + self.assertEqual(c_arr.contents[i], i + 2) + + self.assertEqual(arr.ctypes.data, addr) + del arr + # At this point the memory is zero filled + # We can't check this deterministically because the memory could be + # consumed by another thread. + + @skip_if_32bit + def test_allocate_invalid_size(self): + # Checks that attempting to allocate too big a region fails gracefully. + size = types.size_t.maxval // 8 // 2 + for pred in (True, False): + with self.assertRaises(MemoryError) as raises: + rtsys.meminfo_alloc(size, safe=pred) + self.assertIn(f"Requested allocation of {size} bytes failed.", + str(raises.exception)) + + def test_allocate_negative_size(self): + # Checks that attempting to allocate negative number of bytes fails + # gracefully. + size = -10 + for pred in (True, False): + with self.assertRaises(ValueError) as raises: + rtsys.meminfo_alloc(size, safe=pred) + msg = f"Cannot allocate a negative number of bytes: {size}." + self.assertIn(msg, str(raises.exception)) + + +class TestTracemalloc(unittest.TestCase): + """ + Test NRT-allocated memory can be tracked by tracemalloc. + """ + + def measure_memory_diff(self, func): + try: + import tracemalloc + except ImportError: + self.skipTest("tracemalloc not available") + tracemalloc.start() + try: + before = tracemalloc.take_snapshot() + # Keep the result and only delete it after taking a snapshot + res = func() + after = tracemalloc.take_snapshot() + del res + return after.compare_to(before, 'lineno') + finally: + tracemalloc.stop() + + def test_snapshot(self): + N = 1000000 + dtype = np.int8 + + @njit + def alloc_nrt_memory(): + """ + Allocate and return a large array. + """ + return np.empty(N, dtype) + + def keep_memory(): + return alloc_nrt_memory() + + def release_memory(): + alloc_nrt_memory() + + alloc_lineno = keep_memory.__code__.co_firstlineno + 1 + + # Warmup JIT + alloc_nrt_memory() + + # The large NRT-allocated array should appear topmost in the diff + diff = self.measure_memory_diff(keep_memory) + stat = diff[0] + # There is a slight overhead, so the allocated size won't exactly be N + self.assertGreaterEqual(stat.size, N) + self.assertLess(stat.size, N * 1.015, + msg=("Unexpected allocation overhead encountered. " + "May be due to difference in CPython " + "builds or running under coverage")) + frame = stat.traceback[0] + self.assertEqual(os.path.basename(frame.filename), "test_nrt.py") + self.assertEqual(frame.lineno, alloc_lineno) + + # If NRT memory is released before taking a snapshot, it shouldn't + # appear. + diff = self.measure_memory_diff(release_memory) + stat = diff[0] + # Something else appears, but nothing the magnitude of N + self.assertLess(stat.size, N * 0.01) + + +class TestNRTIssue(TestCase): + def test_issue_with_refct_op_pruning(self): + """ + GitHub Issue #1244 https://github.com/numba/numba/issues/1244 + """ + @njit + def calculate_2D_vector_mag(vector): + x, y = vector + + return math.sqrt(x ** 2 + y ** 2) + + @njit + def normalize_2D_vector(vector): + normalized_vector = np.empty(2, dtype=np.float64) + + mag = calculate_2D_vector_mag(vector) + x, y = vector + + normalized_vector[0] = x / mag + normalized_vector[1] = y / mag + + return normalized_vector + + @njit + def normalize_vectors(num_vectors, vectors): + normalized_vectors = np.empty((num_vectors, 2), dtype=np.float64) + + for i in range(num_vectors): + vector = vectors[i] + + normalized_vector = normalize_2D_vector(vector) + + normalized_vectors[i, 0] = normalized_vector[0] + normalized_vectors[i, 1] = normalized_vector[1] + + return normalized_vectors + + num_vectors = 10 + test_vectors = np.random.random((num_vectors, 2)) + got = normalize_vectors(num_vectors, test_vectors) + expected = normalize_vectors.py_func(num_vectors, test_vectors) + + np.testing.assert_almost_equal(expected, got) + + def test_incref_after_cast(self): + # Issue #1427: when casting a value before returning it, the + # cast result should be incref'ed, not the original value. + def f(): + return 0.0, np.zeros(1, dtype=np.int32) + + # Note the return type isn't the same as the tuple type above: + # the first element is a complex rather than a float. + cfunc = njit((types.Tuple((types.complex128, + types.Array(types.int32, 1, 'C') )))())(f) + z, arr = cfunc() + self.assertPreciseEqual(z, 0j) + self.assertPreciseEqual(arr, np.zeros(1, dtype=np.int32)) + + def test_refct_pruning_issue_1511(self): + @njit + def f(): + a = np.ones(10, dtype=np.float64) + b = np.ones(10, dtype=np.float64) + return a, b[:] + + a, b = f() + np.testing.assert_equal(a, b) + np.testing.assert_equal(a, np.ones(10, dtype=np.float64)) + + def test_refct_pruning_issue_1526(self): + @njit + def udt(image, x, y): + next_loc = np.where(image == 1) + + if len(next_loc[0]) == 0: + y_offset = 1 + x_offset = 1 + else: + y_offset = next_loc[0][0] + x_offset = next_loc[1][0] + + next_loc_x = (x - 1) + x_offset + next_loc_y = (y - 1) + y_offset + + return next_loc_x, next_loc_y + + a = np.array([[1, 0, 1, 0, 1, 0, 0, 1, 0, 0]]) + expect = udt.py_func(a, 1, 6) + got = udt(a, 1, 6) + + self.assertEqual(expect, got) + + @TestCase.run_test_in_subprocess + def test_no_nrt_on_njit_decoration(self): + # Checks that the NRT is not initialized/compiled as a result of + # decorating a function with `@njit`. + from numba import njit + + # check the NRT is not initialized. + self.assertFalse(rtsys._init) + + # decorate + @njit + def foo(): + return 123 + + # check the NRT is still not initialized + self.assertFalse(rtsys._init) + + # execute + self.assertEqual(foo(), foo.py_func()) + + # check the NRT is still now initialized as execution has definitely + # occurred. + self.assertTrue(rtsys._init) + + +class TestRefCtPruning(unittest.TestCase): + + sample_llvm_ir = ''' +define i32 @"MyFunction"(i8** noalias nocapture %retptr, { i8*, i32 }** noalias nocapture %excinfo, i8* noalias nocapture readnone %env, double %arg.vt.0, double %arg.vt.1, double %arg.vt.2, double %arg.vt.3, double %arg.bounds.0, double %arg.bounds.1, double %arg.bounds.2, double %arg.bounds.3, i8* %arg.xs.0, i8* nocapture readnone %arg.xs.1, i64 %arg.xs.2, i64 %arg.xs.3, double* nocapture readonly %arg.xs.4, i64 %arg.xs.5.0, i64 %arg.xs.6.0, i8* %arg.ys.0, i8* nocapture readnone %arg.ys.1, i64 %arg.ys.2, i64 %arg.ys.3, double* nocapture readonly %arg.ys.4, i64 %arg.ys.5.0, i64 %arg.ys.6.0, i8* %arg.aggs_and_cols.0.0, i8* nocapture readnone %arg.aggs_and_cols.0.1, i64 %arg.aggs_and_cols.0.2, i64 %arg.aggs_and_cols.0.3, i32* nocapture %arg.aggs_and_cols.0.4, i64 %arg.aggs_and_cols.0.5.0, i64 %arg.aggs_and_cols.0.5.1, i64 %arg.aggs_and_cols.0.6.0, i64 %arg.aggs_and_cols.0.6.1) local_unnamed_addr { +entry: +tail call void @NRT_incref(i8* %arg.xs.0) +tail call void @NRT_incref(i8* %arg.ys.0) +tail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0) +%.251 = icmp sgt i64 %arg.xs.5.0, 0 +br i1 %.251, label %B42.preheader, label %B160 + +B42.preheader: ; preds = %entry +%0 = add i64 %arg.xs.5.0, 1 +br label %B42 + +B42: ; preds = %B40.backedge, %B42.preheader +%lsr.iv3 = phi i64 [ %lsr.iv.next, %B40.backedge ], [ %0, %B42.preheader ] +%lsr.iv1 = phi double* [ %scevgep2, %B40.backedge ], [ %arg.xs.4, %B42.preheader ] +%lsr.iv = phi double* [ %scevgep, %B40.backedge ], [ %arg.ys.4, %B42.preheader ] +%.381 = load double, double* %lsr.iv1, align 8 +%.420 = load double, double* %lsr.iv, align 8 +%.458 = fcmp ole double %.381, %arg.bounds.1 +%not..432 = fcmp oge double %.381, %arg.bounds.0 +%"$phi82.1.1" = and i1 %.458, %not..432 +br i1 %"$phi82.1.1", label %B84, label %B40.backedge + +B84: ; preds = %B42 +%.513 = fcmp ole double %.420, %arg.bounds.3 +%not..487 = fcmp oge double %.420, %arg.bounds.2 +%"$phi106.1.1" = and i1 %.513, %not..487 +br i1 %"$phi106.1.1", label %B108.endif.endif.endif, label %B40.backedge + +B160: ; preds = %B40.backedge, %entry +tail call void @NRT_decref(i8* %arg.ys.0) +tail call void @NRT_decref(i8* %arg.xs.0) +tail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0) +store i8* null, i8** %retptr, align 8 +ret i32 0 + +B108.endif.endif.endif: ; preds = %B84 +%.575 = fmul double %.381, %arg.vt.0 +%.583 = fadd double %.575, %arg.vt.1 +%.590 = fptosi double %.583 to i64 +%.630 = fmul double %.420, %arg.vt.2 +%.638 = fadd double %.630, %arg.vt.3 +%.645 = fptosi double %.638 to i64 +tail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0) ; GONE 1 +tail call void @NRT_decref(i8* null) ; GONE 2 +tail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0), !noalias !0 ; GONE 3 +%.62.i.i = icmp slt i64 %.645, 0 +%.63.i.i = select i1 %.62.i.i, i64 %arg.aggs_and_cols.0.5.0, i64 0 +%.64.i.i = add i64 %.63.i.i, %.645 +%.65.i.i = icmp slt i64 %.590, 0 +%.66.i.i = select i1 %.65.i.i, i64 %arg.aggs_and_cols.0.5.1, i64 0 +%.67.i.i = add i64 %.66.i.i, %.590 +%.84.i.i = mul i64 %.64.i.i, %arg.aggs_and_cols.0.5.1 +%.87.i.i = add i64 %.67.i.i, %.84.i.i +%.88.i.i = getelementptr i32, i32* %arg.aggs_and_cols.0.4, i64 %.87.i.i +%.89.i.i = load i32, i32* %.88.i.i, align 4, !noalias !3 +%.99.i.i = add i32 %.89.i.i, 1 +store i32 %.99.i.i, i32* %.88.i.i, align 4, !noalias !3 +tail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0), !noalias !0 ; GONE 4 +tail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0) ; GONE 5 +br label %B40.backedge + +B40.backedge: ; preds = %B108.endif.endif.endif, %B84, %B42 +%scevgep = getelementptr double, double* %lsr.iv, i64 1 +%scevgep2 = getelementptr double, double* %lsr.iv1, i64 1 +%lsr.iv.next = add i64 %lsr.iv3, -1 +%.294 = icmp sgt i64 %lsr.iv.next, 1 +br i1 %.294, label %B42, label %B160 +} + ''' # noqa + + def test_refct_pruning_op_recognize(self): + input_ir = self.sample_llvm_ir + input_lines = list(input_ir.splitlines()) + before_increfs = [ln for ln in input_lines if 'NRT_incref' in ln] + before_decrefs = [ln for ln in input_lines if 'NRT_decref' in ln] + + # prune + output_ir = nrtopt._remove_redundant_nrt_refct(input_ir) + output_lines = list(output_ir.splitlines()) + after_increfs = [ln for ln in output_lines if 'NRT_incref' in ln] + after_decrefs = [ln for ln in output_lines if 'NRT_decref' in ln] + + # check + self.assertNotEqual(before_increfs, after_increfs) + self.assertNotEqual(before_decrefs, after_decrefs) + + pruned_increfs = set(before_increfs) - set(after_increfs) + pruned_decrefs = set(before_decrefs) - set(after_decrefs) + + # the symm difference == or-combined + combined = pruned_increfs | pruned_decrefs + self.assertEqual(combined, pruned_increfs ^ pruned_decrefs) + pruned_lines = '\n'.join(combined) + + # all GONE lines are pruned + for i in [1, 2, 3, 4, 5]: + gone = '; GONE {}'.format(i) + self.assertIn(gone, pruned_lines) + # no other lines + self.assertEqual(len(list(pruned_lines.splitlines())), len(combined)) + + @unittest.skip("Pass removed as it was buggy. Re-enable when fixed.") + def test_refct_pruning_with_branches(self): + '''testcase from #2350''' + @njit + def _append_non_na(x, y, agg, field): + if not np.isnan(field): + agg[y, x] += 1 + + @njit + def _append(x, y, agg, field): + if not np.isnan(field): + if np.isnan(agg[y, x]): + agg[y, x] = field + else: + agg[y, x] += field + + @njit + def append(x, y, agg, field): + _append_non_na(x, y, agg, field) + _append(x, y, agg, field) + + # Disable python wrapper to avoid detecting necessary + # refcount inside it + @njit(no_cpython_wrapper=True) + def extend(arr, field): + for i in range(arr.shape[0]): + for j in range(arr.shape[1]): + append(j, i, arr, field) + + # Compile + extend.compile("(f4[:,::1], f4)") + + # Test there are no reference count operations + llvmir = str(extend.inspect_llvm(extend.signatures[0])) + refops = list(re.finditer(r'(NRT_incref|NRT_decref)\([^\)]+\)', llvmir)) + self.assertEqual(len(refops), 0) + + @linux_only + @x86_only + def test_inline_asm(self): + """The InlineAsm class from llvmlite.ir has no 'name' attr the refcount + pruning pass should be tolerant to this""" + llvm.initialize() + llvm.initialize_native_target() + llvm.initialize_native_asmprinter() + llvm.initialize_native_asmparser() + + @intrinsic + def bar(tyctx, x, y): + def codegen(cgctx, builder, sig, args): + (arg_0, arg_1) = args + fty = ir.FunctionType(ir.IntType(32), [ir.IntType(32), + ir.IntType(32)]) + mul = builder.asm(fty, "mov $2, $0; imul $1, $0", "=&r,r,r", + (arg_0, arg_1), name="asm_mul", + side_effect=False) + return impl_ret_untracked(cgctx, builder, sig.return_type, mul) + return signature(types.int32, types.int32, types.int32), codegen + + @njit(['int32(int32)']) + def foo(x): + x += 1 + z = bar(x, 2) + return z + + self.assertEqual(foo(10), 22) # expect (10 + 1) * 2 = 22 + + +@skip_unless_cffi +class TestNrtExternalCFFI(EnableNRTStatsMixin, TestCase): + """Testing the use of externally compiled C code that use NRT + """ + def setUp(self): + # initialize the NRT (in case the tests are run in isolation) + cpu_target.target_context + super(TestNrtExternalCFFI, self).setUp() + + def compile_cffi_module(self, name, source, cdef): + from cffi import FFI + + ffi = FFI() + ffi.set_source(name, source, include_dirs=[include_path()]) + ffi.cdef(cdef) + tmpdir = temp_directory("cffi_test_{}".format(name)) + ffi.compile(tmpdir=tmpdir) + sys.path.append(tmpdir) + try: + mod = import_dynamic(name) + finally: + sys.path.remove(tmpdir) + + return ffi, mod + + def get_nrt_api_table(self): + from cffi import FFI + + ffi = FFI() + nrt_get_api = ffi.cast("void* (*)()", _nrt_python.c_helpers['get_api']) + table = nrt_get_api() + return table + + def test_manage_memory(self): + name = "{}_test_manage_memory".format(self.__class__.__name__) + source = r""" +#include +#include "numba/core/runtime/nrt_external.h" + +int status = 0; + +void my_dtor(void *ptr) { + free(ptr); + status = 0xdead; +} + +NRT_MemInfo* test_nrt_api(NRT_api_functions *nrt) { + void * data = malloc(10); + NRT_MemInfo *mi = nrt->manage_memory(data, my_dtor); + nrt->acquire(mi); + nrt->release(mi); + status = 0xa110c; + return mi; +} + """ + cdef = """ +void* test_nrt_api(void *nrt); +extern int status; + """ + + ffi, mod = self.compile_cffi_module(name, source, cdef) + # Init status is 0 + self.assertEqual(mod.lib.status, 0) + table = self.get_nrt_api_table() + out = mod.lib.test_nrt_api(table) + # status is now 0xa110c + self.assertEqual(mod.lib.status, 0xa110c) + mi_addr = int(ffi.cast("size_t", out)) + mi = nrt.MemInfo(mi_addr) + self.assertEqual(mi.refcount, 1) + del mi # force deallocation on mi + # status is now 0xdead + self.assertEqual(mod.lib.status, 0xdead) + + def test_allocate(self): + name = "{}_test_allocate".format(self.__class__.__name__) + source = r""" +#include +#include "numba/core/runtime/nrt_external.h" + +NRT_MemInfo* test_nrt_api(NRT_api_functions *nrt, size_t n) { + size_t *data = NULL; + NRT_MemInfo *mi = nrt->allocate(n); + data = nrt->get_data(mi); + data[0] = 0xded; + data[1] = 0xabc; + data[2] = 0xdef; + return mi; +} + """ + cdef = "void* test_nrt_api(void *nrt, size_t n);" + ffi, mod = self.compile_cffi_module(name, source, cdef) + + table = self.get_nrt_api_table() + + numbytes = 3 * np.dtype(np.intp).itemsize + out = mod.lib.test_nrt_api(table, numbytes) + + mi_addr = int(ffi.cast("size_t", out)) + mi = nrt.MemInfo(mi_addr) + self.assertEqual(mi.refcount, 1) + + buffer = ffi.buffer(ffi.cast("char [{}]".format(numbytes), mi.data)) + arr = np.ndarray(shape=(3,), dtype=np.intp, buffer=buffer) + np.testing.assert_equal(arr, [0xded, 0xabc, 0xdef]) + + def test_get_api(self): + from cffi import FFI + + @njit + def test_nrt_api(): + return NRT_get_api() + + ffi = FFI() + expect = int(ffi.cast('size_t', self.get_nrt_api_table())) + got = test_nrt_api() + self.assertEqual(expect, got) + + +class TestNrtStatistics(TestCase): + + def setUp(self): + # Store the current stats state + self.__stats_state = _nrt_python.memsys_stats_enabled() + + def tearDown(self): + # Set stats state back to whatever it was before the test ran + if self.__stats_state: + _nrt_python.memsys_enable_stats() + else: + _nrt_python.memsys_disable_stats() + + def test_stats_env_var_explicit_on(self): + # Checks that explicitly turning the stats on via the env var works. + src = """if 1: + from numba import njit + import numpy as np + from numba.core.runtime import rtsys, _nrt_python + from numba.core.registry import cpu_target + + @njit + def foo(): + return np.arange(10)[0] + + # initialize the NRT before use + rtsys.initialize(cpu_target.target_context) + assert _nrt_python.memsys_stats_enabled() + orig_stats = rtsys.get_allocation_stats() + foo() + new_stats = rtsys.get_allocation_stats() + total_alloc = new_stats.alloc - orig_stats.alloc + total_free = new_stats.free - orig_stats.free + total_mi_alloc = new_stats.mi_alloc - orig_stats.mi_alloc + total_mi_free = new_stats.mi_free - orig_stats.mi_free + + expected = 1 + assert total_alloc == expected + assert total_free == expected + assert total_mi_alloc == expected + assert total_mi_free == expected + """ + # Check env var explicitly being set works + env = os.environ.copy() + env['NUMBA_NRT_STATS'] = "1" + run_in_subprocess(src, env=env) + + def check_env_var_off(self, env): + + src = """if 1: + from numba import njit + import numpy as np + from numba.core.runtime import rtsys, _nrt_python + + @njit + def foo(): + return np.arange(10)[0] + + assert _nrt_python.memsys_stats_enabled() == False + try: + rtsys.get_allocation_stats() + except RuntimeError as e: + assert "NRT stats are disabled." in str(e) + """ + run_in_subprocess(src, env=env) + + def test_stats_env_var_explicit_off(self): + # Checks that explicitly turning the stats off via the env var works. + env = os.environ.copy() + env['NUMBA_NRT_STATS'] = "0" + self.check_env_var_off(env) + + def test_stats_env_var_default_off(self): + # Checks that the env var not being set is the same as "off", i.e. + # default for Numba is off. + env = os.environ.copy() + env.pop('NUMBA_NRT_STATS', None) + self.check_env_var_off(env) + + def test_stats_status_toggle(self): + + @njit + def foo(): + tmp = np.ones(3) + return np.arange(5 * tmp[0]) + + # Switch on stats + _nrt_python.memsys_enable_stats() + # check the stats are on + self.assertTrue(_nrt_python.memsys_stats_enabled()) + + for i in range(2): + # capture the stats state + stats_1 = rtsys.get_allocation_stats() + # Switch off stats + _nrt_python.memsys_disable_stats() + # check the stats are off + self.assertFalse(_nrt_python.memsys_stats_enabled()) + # run something that would move the counters were they enabled + foo() + # Switch on stats + _nrt_python.memsys_enable_stats() + # check the stats are on + self.assertTrue(_nrt_python.memsys_stats_enabled()) + # capture the stats state (should not have changed) + stats_2 = rtsys.get_allocation_stats() + # run something that will move the counters + foo() + # capture the stats state (should have changed) + stats_3 = rtsys.get_allocation_stats() + # check stats_1 == stats_2 + self.assertEqual(stats_1, stats_2) + # check stats_2 < stats_3 + self.assertLess(stats_2, stats_3) + + def test_rtsys_stats_query_raises_exception_when_disabled(self): + # Checks that the standard rtsys.get_allocation_stats() query raises + # when stats counters are turned off. + + _nrt_python.memsys_disable_stats() + self.assertFalse(_nrt_python.memsys_stats_enabled()) + + with self.assertRaises(RuntimeError) as raises: + rtsys.get_allocation_stats() + + self.assertIn("NRT stats are disabled.", str(raises.exception)) + + def test_nrt_explicit_stats_query_raises_exception_when_disabled(self): + # Checks the various memsys_get_stats functions raise if queried when + # the stats counters are disabled. + method_variations = ('alloc', 'free', 'mi_alloc', 'mi_free') + for meth in method_variations: + stats_func = getattr(_nrt_python, f'memsys_get_stats_{meth}') + with self.subTest(stats_func=stats_func): + # Turn stats off + _nrt_python.memsys_disable_stats() + self.assertFalse(_nrt_python.memsys_stats_enabled()) + with self.assertRaises(RuntimeError) as raises: + stats_func() + self.assertIn("NRT stats are disabled.", str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_nrt_refct.py b/venv/lib/python3.10/site-packages/numba/tests/test_nrt_refct.py new file mode 100644 index 0000000000000000000000000000000000000000..3049c7a3b3cb7239394b02d75f4d14f3bf2abd8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_nrt_refct.py @@ -0,0 +1,115 @@ +""" +Tests issues or edge cases for producing invalid NRT refct +""" + + +import gc + +import numpy as np + +import unittest +from numba import njit +from numba.core.runtime import rtsys +from numba.tests.support import TestCase, EnableNRTStatsMixin + + +class TestNrtRefCt(EnableNRTStatsMixin, TestCase): + + def setUp(self): + # Clean up any NRT-backed objects hanging in a dead reference cycle + gc.collect() + super(TestNrtRefCt, self).setUp() + + def test_no_return(self): + """ + Test issue #1291 + """ + + @njit + def foo(n): + for i in range(n): + temp = np.zeros(2) + return 0 + + n = 10 + init_stats = rtsys.get_allocation_stats() + foo(n) + cur_stats = rtsys.get_allocation_stats() + self.assertEqual(cur_stats.alloc - init_stats.alloc, n) + self.assertEqual(cur_stats.free - init_stats.free, n) + + def test_escaping_var_init_in_loop(self): + """ + Test issue #1297 + """ + + @njit + def g(n): + + x = np.zeros((n, 2)) + + for i in range(n): + y = x[i] + + for i in range(n): + y = x[i] + + return 0 + + init_stats = rtsys.get_allocation_stats() + g(10) + cur_stats = rtsys.get_allocation_stats() + self.assertEqual(cur_stats.alloc - init_stats.alloc, 1) + self.assertEqual(cur_stats.free - init_stats.free, 1) + + def test_invalid_computation_of_lifetime(self): + """ + Test issue #1573 + """ + @njit + def if_with_allocation_and_initialization(arr1, test1): + tmp_arr = np.zeros_like(arr1) + + for i in range(tmp_arr.shape[0]): + pass + + if test1: + np.zeros_like(arr1) + + return tmp_arr + + arr = np.random.random((5, 5)) # the values are not consumed + + init_stats = rtsys.get_allocation_stats() + if_with_allocation_and_initialization(arr, False) + cur_stats = rtsys.get_allocation_stats() + self.assertEqual(cur_stats.alloc - init_stats.alloc, + cur_stats.free - init_stats.free) + + def test_del_at_beginning_of_loop(self): + """ + Test issue #1734 + """ + @njit + def f(arr): + res = 0 + + for i in (0, 1): + # `del t` is issued here before defining t. It must be + # correctly handled by the lowering phase. + t = arr[i] + if t[i] > 1: + res += t[i] + + return res + + arr = np.ones((2, 2)) + init_stats = rtsys.get_allocation_stats() + f(arr) + cur_stats = rtsys.get_allocation_stats() + self.assertEqual(cur_stats.alloc - init_stats.alloc, + cur_stats.free - init_stats.free) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_num_threads.py b/venv/lib/python3.10/site-packages/numba/tests/test_num_threads.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd51ac3dda7056919d9fe1218ffe33c1b97cdbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_num_threads.py @@ -0,0 +1,632 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +import sys +import os +import re +import multiprocessing +import unittest + +import numpy as np + +from numba import (njit, set_num_threads, get_num_threads, prange, config, + threading_layer, guvectorize) +from numba.np.ufunc.parallel import get_thread_id +from numba.core.errors import TypingError +from numba.tests.support import TestCase, skip_parfors_unsupported, tag +from numba.tests.test_parallel_backend import TestInSubprocess + + +class TestNumThreads(TestCase): + _numba_parallel_test_ = False + + def setUp(self): + # Make sure the num_threads is set to the max. This also makes sure + # the threads are launched. + set_num_threads(config.NUMBA_NUM_THREADS) + + def check_mask(self, expected, result): + # There's no guarantee that TBB will use a full mask worth of + # threads if it deems it inefficient to do so + if threading_layer() == 'tbb': + self.assertTrue(np.all(result <= expected)) + elif threading_layer() in ('omp', 'workqueue'): + np.testing.assert_equal(expected, result) + else: + assert 0, 'unreachable' + + @skip_parfors_unsupported + def test_set_num_threads_type(self): + + @njit + def foo(): + set_num_threads('wrong_type') + + expected = "The number of threads specified must be an integer" + for fn, errty in ((foo, TypingError), (foo.py_func, TypeError)): + with self.assertRaises(errty) as raises: + fn() + self.assertIn(expected, str(raises.exception)) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_set_num_threads_basic(self): + max_threads = config.NUMBA_NUM_THREADS + + self.assertEqual(get_num_threads(), max_threads) + set_num_threads(2) + self.assertEqual(get_num_threads(), 2) + set_num_threads(max_threads) + self.assertEqual(get_num_threads(), max_threads) + + with self.assertRaises(ValueError): + set_num_threads(0) + + with self.assertRaises(ValueError): + set_num_threads(max_threads + 1) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_set_num_threads_basic_jit(self): + max_threads = config.NUMBA_NUM_THREADS + + @njit + def get_n(): + return get_num_threads() + + self.assertEqual(get_n(), max_threads) + set_num_threads(2) + self.assertEqual(get_n(), 2) + set_num_threads(max_threads) + self.assertEqual(get_n(), max_threads) + + @njit + def set_get_n(n): + set_num_threads(n) + return get_num_threads() + + self.assertEqual(set_get_n(2), 2) + self.assertEqual(set_get_n(max_threads), max_threads) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_set_num_threads_basic_guvectorize(self): + max_threads = config.NUMBA_NUM_THREADS + + @guvectorize(['void(int64[:])'], + '(n)', + nopython=True, + target='parallel') + def get_n(x): + x[:] = get_num_threads() + + x = np.zeros((5000000,), dtype=np.int64) + get_n(x) + np.testing.assert_equal(x, max_threads) + set_num_threads(2) + x = np.zeros((5000000,), dtype=np.int64) + get_n(x) + np.testing.assert_equal(x, 2) + set_num_threads(max_threads) + x = np.zeros((5000000,), dtype=np.int64) + get_n(x) + np.testing.assert_equal(x, max_threads) + + @guvectorize(['void(int64[:])'], + '(n)', + nopython=True, + target='parallel') + def set_get_n(n): + set_num_threads(n[0]) + n[:] = get_num_threads() + + x = np.zeros((5000000,), dtype=np.int64) + x[0] = 2 + set_get_n(x) + np.testing.assert_equal(x, 2) + x = np.zeros((5000000,), dtype=np.int64) + x[0] = max_threads + set_get_n(x) + np.testing.assert_equal(x, max_threads) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_set_num_threads_outside_jit(self): + + # Test set_num_threads outside a jitted function + set_num_threads(2) + + @njit(parallel=True) + def test_func(): + x = 5 + buf = np.empty((x,)) + for i in prange(x): + buf[i] = get_num_threads() + return buf + + @guvectorize(['void(int64[:])'], + '(n)', + nopython=True, + target='parallel') + def test_gufunc(x): + x[:] = get_num_threads() + + out = test_func() + np.testing.assert_equal(out, 2) + + x = np.zeros((5000000,), dtype=np.int64) + test_gufunc(x) + np.testing.assert_equal(x, 2) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_set_num_threads_inside_jit(self): + # Test set_num_threads inside a jitted function + @njit(parallel=True) + def test_func(nthreads): + x = 5 + buf = np.empty((x,)) + set_num_threads(nthreads) + for i in prange(x): + buf[i] = get_num_threads() + return buf + + mask = 2 + out = test_func(mask) + np.testing.assert_equal(out, mask) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_set_num_threads_inside_guvectorize(self): + # Test set_num_threads inside a jitted guvectorize function + @guvectorize(['void(int64[:])'], + '(n)', + nopython=True, + target='parallel') + def test_func(x): + set_num_threads(x[0]) + x[:] = get_num_threads() + + x = np.zeros((5000000,), dtype=np.int64) + mask = 2 + x[0] = mask + test_func(x) + np.testing.assert_equal(x, mask) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_get_num_threads_truth_outside_jit(self): + + for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)): + set_num_threads(mask) + + # a lot of work, hopefully will trigger "mask" count of threads to + # join the parallel region (for those backends with dynamic threads) + @njit(parallel=True) + def test_func(): + x = 5000000 + buf = np.empty((x,)) + for i in prange(x): + buf[i] = get_thread_id() + return len(np.unique(buf)), get_num_threads() + + out = test_func() + self.check_mask((mask, mask), out) + + @guvectorize(['void(int64[:], int64[:])'], + '(n), (m)', + nopython=True, + target='parallel') + def test_gufunc(x, out): + x[:] = get_thread_id() + out[0] = get_num_threads() + + # Reshape to force parallelism + x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000)) + out = np.zeros((1,), dtype=np.int64) + test_gufunc(x, out) + self.check_mask(mask, out) + self.check_mask(mask, len(np.unique(x))) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_get_num_threads_truth_inside_jit(self): + + for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)): + + # a lot of work, hopefully will trigger "mask" count of threads to + # join the parallel region (for those backends with dynamic threads) + @njit(parallel=True) + def test_func(): + set_num_threads(mask) + x = 5000000 + buf = np.empty((x,)) + for i in prange(x): + buf[i] = get_thread_id() + return len(np.unique(buf)), get_num_threads() + + out = test_func() + self.check_mask((mask, mask), out) + + @guvectorize(['void(int64[:], int64[:])'], + '(n), (m)', + nopython=True, + target='parallel') + def test_gufunc(x, out): + set_num_threads(mask) + x[:] = get_thread_id() + out[0] = get_num_threads() + + # Reshape to force parallelism + x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000)) + out = np.zeros((1,), dtype=np.int64) + test_gufunc(x, out) + self.check_mask(mask, out) + self.check_mask(mask, len(np.unique(x))) + + # this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not + # set or >= 2) and TBB backends + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_nested_parallelism_1(self): + if threading_layer() == 'workqueue': + self.skipTest("workqueue is not threadsafe") + + # check that get_num_threads is ok in nesting + mask = config.NUMBA_NUM_THREADS - 1 + + N = config.NUMBA_NUM_THREADS + M = 2 * config.NUMBA_NUM_THREADS + + @njit(parallel=True) + def child_func(buf, fid): + M, N = buf.shape + for i in prange(N): + buf[fid, i] = get_num_threads() + + def get_test(test_type): + if test_type == 'njit': + def test_func(nthreads, py_func=False): + @njit(parallel=True) + def _test_func(nthreads): + acc = 0 + buf = np.zeros((M, N)) + set_num_threads(nthreads) + for i in prange(M): + local_mask = 1 + i % mask + # set threads in parent function + set_num_threads(local_mask) + if local_mask < N: + child_func(buf, local_mask) + acc += get_num_threads() + return acc, buf + if py_func: + return _test_func.py_func(nthreads) + else: + return _test_func(nthreads) + + elif test_type == 'guvectorize': + def test_func(nthreads, py_func=False): + def _test_func(acc, buf, local_mask): + set_num_threads(nthreads) + # set threads in parent function + set_num_threads(local_mask[0]) + if local_mask[0] < N: + child_func(buf, local_mask[0]) + acc[0] += get_num_threads() + + buf = np.zeros((M, N), dtype=np.int64) + acc = np.zeros((M, 1), dtype=np.int64) + local_mask = (1 + np.arange(M) % mask).reshape((M, 1)) + sig = ['void(int64[:], int64[:, :], int64[:])'] + layout = '(p), (n, m), (p)' + if not py_func: + _test_func = guvectorize(sig, layout, nopython=True, + target='parallel')(_test_func) + else: + _test_func = guvectorize(sig, layout, + forceobj=True)(_test_func) + _test_func(acc, buf, local_mask) + return acc, buf + + return test_func + + for test_type in ['njit', 'guvectorize']: + test_func = get_test(test_type) + got_acc, got_arr = test_func(mask) + exp_acc, exp_arr = test_func(mask, py_func=True) + np.testing.assert_equal(exp_acc, got_acc) + np.testing.assert_equal(exp_arr, got_arr) + + # check the maths reconciles, guvectorize does not reduce, njit does + math_acc_exp = 1 + np.arange(M) % mask + if test_type == 'guvectorize': + math_acc = math_acc_exp.reshape((M, 1)) + else: + math_acc = np.sum(math_acc_exp) + + np.testing.assert_equal(math_acc, got_acc) + + math_arr = np.zeros((M, N)) + for i in range(1, N): + # there's branches on 1, ..., num_threads - 1 + math_arr[i, :] = i + np.testing.assert_equal(math_arr, got_arr) + + # this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not + # set or >= 2) and TBB backends + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + def _test_nested_parallelism_2(self): + if threading_layer() == 'workqueue': + self.skipTest("workqueue is not threadsafe") + + # check that get_num_threads is ok in nesting + + N = config.NUMBA_NUM_THREADS + 1 + M = 4 * config.NUMBA_NUM_THREADS + 1 + + def get_impl(child_type, test_type): + + if child_type == 'parallel': + child_dec = njit(parallel=True) + elif child_type == 'njit': + child_dec = njit(parallel=False) + elif child_type == 'none': + def child_dec(x): + return x + + @child_dec + def child(buf, fid): + M, N = buf.shape + set_num_threads(fid) # set threads in child function + for i in prange(N): + buf[fid, i] = get_num_threads() + + if test_type in ['parallel', 'njit', 'none']: + if test_type == 'parallel': + test_dec = njit(parallel=True) + elif test_type == 'njit': + test_dec = njit(parallel=False) + elif test_type == 'none': + def test_dec(x): + return x + + @test_dec + def test_func(nthreads): + buf = np.zeros((M, N)) + set_num_threads(nthreads) + for i in prange(M): + local_mask = 1 + i % mask + # when the threads exit the child functions they should + # have a TLS slot value of the local mask as it was set + # in child + if local_mask < config.NUMBA_NUM_THREADS: + child(buf, local_mask) + assert get_num_threads() == local_mask + return buf + else: + if test_type == 'guvectorize': + test_dec = guvectorize(['int64[:,:], int64[:]'], + '(n, m), (k)', nopython=True, + target='parallel') + elif test_type == 'guvectorize-obj': + test_dec = guvectorize(['int64[:,:], int64[:]'], + '(n, m), (k)', forceobj=True) + + def test_func(nthreads): + @test_dec + def _test_func(buf, local_mask): + set_num_threads(nthreads) + # when the threads exit the child functions they should + # have a TLS slot value of the local mask as it was set + # in child + if local_mask[0] < config.NUMBA_NUM_THREADS: + child(buf, local_mask[0]) + assert get_num_threads() == local_mask[0] + + buf = np.zeros((M, N), dtype=np.int64) + local_mask = (1 + np.arange(M) % mask).reshape((M, 1)) + _test_func(buf, local_mask) + return buf + + return test_func + + mask = config.NUMBA_NUM_THREADS - 1 + + res_arrays = {} + for test_type in ['parallel', 'njit', 'none', + 'guvectorize', 'guvectorize-obj']: + for child_type in ['parallel', 'njit', 'none']: + if child_type == 'none' and test_type != 'none': + continue + set_num_threads(mask) + res_arrays[test_type, child_type] = get_impl( + child_type, test_type)(mask) + + py_arr = res_arrays['none', 'none'] + for arr in res_arrays.values(): + np.testing.assert_equal(arr, py_arr) + + # check the maths reconciles + math_arr = np.zeros((M, N)) + # there's branches on modulo mask but only NUMBA_NUM_THREADS funcs + for i in range(1, config.NUMBA_NUM_THREADS): + math_arr[i, :] = i + + np.testing.assert_equal(math_arr, py_arr) + + # this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not + # set or >= 2) and TBB backends + # This test needs at least 3 threads to run, N>=2 for the launch, M>=N+1 for + # the nested function + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 3, "Not enough CPU cores") + def _test_nested_parallelism_3(self): + if threading_layer() == 'workqueue': + self.skipTest("workqueue is not threadsafe") + + # check that the right number of threads are present in nesting + # this relies on there being a load of cores present + BIG = 1000000 + + @njit(parallel=True) + def work(local_nt): # arg is value 3 + tid = np.zeros(BIG) + acc = 0 + set_num_threads(local_nt) # set to 3 threads + for i in prange(BIG): + acc += 1 + tid[i] = get_thread_id() + return acc, np.unique(tid) + + @njit(parallel=True) + def test_func_jit(nthreads): + set_num_threads(nthreads) # set to 2 threads + lens = np.zeros(nthreads) + total = 0 + for i in prange(nthreads): + my_acc, tids = work(nthreads + 1) # call with value 3 + lens[i] = len(tids) + total += my_acc + return total, np.unique(lens) + + NT = 2 + expected_acc = BIG * NT + expected_thread_count = NT + 1 + + got_acc, got_tc = test_func_jit(NT) + self.assertEqual(expected_acc, got_acc) + self.check_mask(expected_thread_count, got_tc) + + def test_guvectorize(nthreads): + @guvectorize(['int64[:], int64[:]'], + '(n), (n)', + nopython=True, + target='parallel') + def test_func_guvectorize(total, lens): + my_acc, tids = work(nthreads + 1) + lens[0] = len(tids) + total[0] += my_acc + + total = np.zeros((nthreads, 1), dtype=np.int64) + lens = np.zeros(nthreads, dtype=np.int64).reshape((nthreads, 1)) + + test_func_guvectorize(total, lens) + # vectorize does not reduce, so total is summed + return total.sum(), np.unique(lens) + + got_acc, got_tc = test_guvectorize(NT) + + self.assertEqual(expected_acc, got_acc) + self.check_mask(expected_thread_count, got_tc) + + @skip_parfors_unsupported + @unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores") + @unittest.skipIf(not sys.platform.startswith('linux'), "Linux only") + def _test_threadmask_across_fork(self): + forkctx = multiprocessing.get_context('fork') + + @njit + def foo(): + return get_num_threads() + + def wrap(queue): + queue.put(foo()) + + mask = 1 + self.assertEqual(foo(), config.NUMBA_NUM_THREADS) + set_num_threads(mask) + self.assertEqual(foo(), mask) + shared_queue = forkctx.Queue() + # check TLS slot inheritance in fork + p = forkctx.Process(target=wrap, args=(shared_queue,)) + p.start() + p.join() + self.assertEqual(shared_queue.get(), mask) + + def tearDown(self): + set_num_threads(config.NUMBA_NUM_THREADS) + + @skip_parfors_unsupported + def _test_get_thread_id_not_parallel(self): + python_get_thread_id = get_thread_id() + check_array_size = 8 + + @njit(parallel=False) + def par_false(size): + njit_par_false_tid = get_thread_id() + res = np.ones(size) + for i in prange(size): + res[i] = get_thread_id() + return njit_par_false_tid, res + + @njit(parallel=True) + def par_true(size): + njit_par_true_tid = get_thread_id() + res = np.ones(size) + for i in range(size): + res[i] = get_thread_id() + return njit_par_true_tid, res + + self.assertEqual(python_get_thread_id, 0) + njit_par_false_tid, njit_par_false_arr = par_false(check_array_size) + self.assertEqual(njit_par_false_tid, 0) + np.testing.assert_equal(njit_par_false_arr, 0) + njit_par_true_tid, njit_par_true_arr = par_true(check_array_size) + self.assertEqual(njit_par_true_tid, 0) + np.testing.assert_equal(njit_par_true_arr, 0) + + +class TestNumThreadsBackends(TestInSubprocess, TestCase): + _class = TestNumThreads + _DEBUG = False + + # 1 is mainly here to ensure tests skip correctly + num_threads = [i for i in [1, 2, 4, 8, 16] if i <= config.NUMBA_NUM_THREADS] + + def run_test_in_separate_process(self, test, threading_layer, num_threads): + env_copy = os.environ.copy() + env_copy['NUMBA_THREADING_LAYER'] = str(threading_layer) + env_copy['NUMBA_NUM_THREADS'] = str(num_threads) + cmdline = [sys.executable, "-m", "numba.runtests", "-v", test] + return self.run_cmd(cmdline, env_copy) + + @classmethod + def _inject(cls, name, backend, backend_guard, num_threads): + themod = cls.__module__ + thecls = cls._class.__name__ + injected_method = '%s.%s.%s' % (themod, thecls, name) + + def test_template(self): + o, e = self.run_test_in_separate_process(injected_method, backend, + num_threads) + if self._DEBUG: + print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e)) + # If the test was skipped in the subprocess, then mark this as a + # skipped test. + m = re.search(r"\.\.\. skipped '(.*?)'", e) + if m is not None: + self.skipTest(m.group(1)) + self.assertIn('OK', e) + self.assertTrue('FAIL' not in e) + self.assertTrue('ERROR' not in e) + + injected_test = "%s_%s_%s_threads" % (name[1:], backend, num_threads) + setattr(cls, injected_test, + tag('long_running')(backend_guard(test_template))) + + @classmethod + def generate(cls): + for name in cls._class.__dict__.copy(): + for backend, backend_guard in cls.backends.items(): + for num_threads in cls.num_threads: + if not name.startswith('_test_'): + continue + cls._inject(name, backend, backend_guard, num_threads) + + +TestNumThreadsBackends.generate() + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_numberctor.py b/venv/lib/python3.10/site-packages/numba/tests/test_numberctor.py new file mode 100644 index 0000000000000000000000000000000000000000..fd074bef5f7b4436589f0b9262f8c4fabf290c55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_numberctor.py @@ -0,0 +1,241 @@ +import numpy as np + +from numba import jit, njit +from numba.core import types + +from numba.tests.support import TestCase, tag +import unittest + + +def dobool(a): + return bool(a) + + +def doint(a): + return int(a) + + +def dofloat(a): + return float(a) + + +def docomplex(a): + return complex(a) + + +def docomplex2(a, b): + return complex(a, b) + + +def complex_calc(a): + z = complex(a) + return z.real ** 2 + z.imag ** 2 + + +def complex_calc2(a, b): + z = complex(a, b) + return z.real ** 2 + z.imag ** 2 + + +def converter(tp): + def f(a): + return tp(a) + return f + + +def real_np_types(): + for tp_name in ('int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'intc', 'uintc', 'intp', 'uintp', + 'float32', 'float64', 'bool_'): + yield tp_name + +def complex_np_types(): + for tp_name in ('complex64', 'complex128'): + yield tp_name + + +class TestScalarNumberCtor(TestCase): + """ + Test (some scalar) + """ + + def check_int_constructor(self, pyfunc): + x_types = [ + types.boolean, types.int32, types.int64, types.float32, types.float64 + ] + x_values = [1, 0, 1000, 12.2, 23.4] + + for ty, x in zip(x_types, x_values): + cfunc = njit((ty,))(pyfunc) + self.assertPreciseEqual(pyfunc(x), cfunc(x)) + + def test_bool(self): + self.check_int_constructor(dobool) + + def test_int(self): + self.check_int_constructor(doint) + + def test_float(self): + pyfunc = dofloat + + x_types = [ + types.int32, types.int64, types.float32, types.float64 + ] + x_values = [1, 1000, 12.2, 23.4] + + for ty, x in zip(x_types, x_values): + cfunc = njit((ty,))(pyfunc) + self.assertPreciseEqual(pyfunc(x), cfunc(x), + prec='single' if ty is types.float32 else 'exact') + + def test_complex(self): + pyfunc = docomplex + + x_types = [ + types.int32, types.int64, types.float32, types.float64, + types.complex64, types.complex128, + ] + x_values = [1, 1000, 12.2, 23.4, 1.5-5j, 1-4.75j] + + for ty, x in zip(x_types, x_values): + cfunc = njit((ty,))(pyfunc) + got = cfunc(x) + expected = pyfunc(x) + self.assertPreciseEqual(pyfunc(x), cfunc(x), + prec='single' if ty is types.float32 else 'exact') + + # Check that complex(float32) really creates a complex64, + # by checking the accuracy of computations. + pyfunc = complex_calc + x = 1.0 + 2**-50 + cfunc = njit((types.float32,))(pyfunc) + self.assertPreciseEqual(cfunc(x), 1.0) + # Control (complex128) + cfunc = njit((types.float64,))(pyfunc) + self.assertGreater(cfunc(x), 1.0) + + def test_complex2(self): + pyfunc = docomplex2 + + x_types = [ + types.int32, types.int64, types.float32, types.float64 + ] + x_values = [1, 1000, 12.2, 23.4] + y_values = [x - 3 for x in x_values] + + for ty, x, y in zip(x_types, x_values, y_values): + cfunc = njit((ty, ty))(pyfunc) + self.assertPreciseEqual(pyfunc(x, y), cfunc(x, y), + prec='single' if ty is types.float32 else 'exact') + + # Check that complex(float32, float32) really creates a complex64, + # by checking the accuracy of computations. + pyfunc = complex_calc2 + x = 1.0 + 2**-50 + cfunc = njit((types.float32, types.float32))(pyfunc) + self.assertPreciseEqual(cfunc(x, x), 2.0) + # Control (complex128) + cfunc = njit((types.float64, types.float32))(pyfunc) + self.assertGreater(cfunc(x, x), 2.0) + + def check_type_converter(self, tp, np_type, values): + pyfunc = converter(tp) + cfunc = jit(nopython=True)(pyfunc) + if issubclass(np_type, np.integer): + # Converting from a Python int to a small Numpy int on 32-bit + # builds can raise "OverflowError: Python int too large to + # convert to C long". Work around by going through a large + # Numpy int first. + np_converter = lambda x: np_type(np.int64(x)) + else: + np_converter = np_type + dtype = np.dtype(np_type) + for val in values: + if dtype.kind == 'u' and isinstance(val, float) and val < 0.0: + # Converting negative float to unsigned int yields undefined + # behaviour (and concretely different on ARM vs. x86) + continue + expected = np_converter(val) + got = cfunc(val) + self.assertPreciseEqual(got, expected, + msg="for type %s with arg %s" % (np_type, val)) + + def check_number_types(self, tp_factory): + values = [0, 1, -1, 100003, 10000000000007, -100003, -10000000000007, + 1.5, -3.5] + for tp_name in real_np_types(): + np_type = getattr(np, tp_name) + tp = tp_factory(tp_name) + self.check_type_converter(tp, np_type, values) + values.append(1.5+3j) + for tp_name in complex_np_types(): + np_type = getattr(np, tp_name) + tp = tp_factory(tp_name) + self.check_type_converter(tp, np_type, values) + + def test_numba_types(self): + """ + Test explicit casting to Numba number types. + """ + def tp_factory(tp_name): + return getattr(types, tp_name) + self.check_number_types(tp_factory) + + def test_numpy_types(self): + """ + Test explicit casting to Numpy number types. + """ + def tp_factory(tp_name): + return getattr(np, tp_name) + self.check_number_types(tp_factory) + + +class TestArrayNumberCtor(TestCase): + """ + Test (some sequence) + """ + + def check_type_constructor(self, np_type, values): + pyfunc = converter(np_type) + cfunc = jit(nopython=True)(pyfunc) + for val in values: + expected = np_type(val) + got = cfunc(val) + self.assertPreciseEqual(got, expected) + + def test_1d(self): + values = [ + (1.0, 2.5), + (1, 2.5), + [1.0, 2.5], + (), + ] + for tp_name in real_np_types(): + np_type = getattr(np, tp_name) + self.check_type_constructor(np_type, values) + values = [ + (1j, 2.5), + [1.0, 2.5], + ] + for tp_name in complex_np_types(): + np_type = getattr(np, tp_name) + self.check_type_constructor(np_type, values) + + def test_2d(self): + values = [ + ((1.0, 2.5), (3.5, 4)), + [(1.0, 2.5), (3.5, 4.0)], + ([1.0, 2.5], [3.5, 4.0]), + [(), ()], + ] + for tp_name in real_np_types(): + np_type = getattr(np, tp_name) + self.check_type_constructor(np_type, values) + for tp_name in complex_np_types(): + np_type = getattr(np, tp_name) + self.check_type_constructor(np_type, values) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_numbers.py b/venv/lib/python3.10/site-packages/numba/tests/test_numbers.py new file mode 100644 index 0000000000000000000000000000000000000000..1f7c6d4c6e23d8044c6e7374ad893f9896ba19bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_numbers.py @@ -0,0 +1,90 @@ +# (Some) Tests for targets/numbers.py + +import numpy as np + +from numba import njit +from numba.core import types +from numba.core.errors import TypingError +from numba.tests.support import TestCase + + +def gen_view(a,b): + def impl(x): + return a(x).view(b) + return impl + + +class TestViewIntFloat(TestCase): + """ This tests the 'view' method on NumPy scalars. """ + + def do_testing(self, inputs, dtypes): + for value, initial_type, expected in inputs: + for target_type, result in zip(dtypes, expected): + view = njit(gen_view(initial_type, target_type)) + if not np.isnan(result): + # check against predefined value + self.assertEqual(view(value), target_type(result)) + # check against numpy + self.assertEqual(view(value), + view.py_func(value)) + else: + # check that our implementation results in nan + self.assertTrue(np.isnan(view(value))) + # check that numpy results in nan + self.assertTrue(np.isnan(view.py_func(value))) + + def test_8_bits(self): + dtypes = (np.uint8, np.int8) + # Value Initial Type Expected answers using dtypes + inputs = ((1, np.uint8, (1, 1)), + (-1, np.int8, (255, -1))) + self.do_testing(inputs, dtypes) + + def test_32_bits(self): + dtypes = (np.uint32, np.int32, np.float32) + # Value Initial Type Expected answers using dtypes + inputs = ((1, np.uint32, (1, 1, 1.401298464324817e-45)), + (-1, np.int32, (4294967295, -1, np.nan)), + (1.0, np.float32, (1065353216, 1065353216, 1.0))) + self.do_testing(inputs, dtypes) + + def test_64_bits(self): + dtypes = (np.uint64, np.int64, np.float64) + # Value Initial Type Expected answers using dtypes + inputs = ((1, np.uint64, (1, 1, 5e-324)), + (-1, np.int64, (18446744073709551615, -1, np.nan)), + (1.0, np.float64, (4607182418800017408, + 4607182418800017408, + 1.0)) + ) + self.do_testing(inputs, dtypes) + + def test_python_scalar_exception(self): + intty = getattr(np, 'int{}'.format(types.intp.bitwidth)) + + @njit + def myview(): + a = 1 + a.view(intty) + + with self.assertRaises(TypingError) as e: + myview() + self.assertIn("'view' can only be called on NumPy dtypes, " + "try wrapping the variable 'a' with 'np.()'", + str(e.exception)) + + def do_testing_exceptions(self, pair): + with self.assertRaises(TypingError) as e: + view = njit(gen_view(pair[0], pair[1])) + view(1) + self.assertIn("Changing the dtype of a 0d array is only supported " + "if the itemsize is unchanged", + str(e.exception)) + + def test_exceptions32(self): + for pair in ((np.int32, np.int8), (np.int8, np.int32)): + self.do_testing_exceptions(pair) + + def test_exceptions64(self): + for pair in ((np.int32, np.int64), (np.int64, np.int32)): + self.do_testing_exceptions(pair) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_numconv.py b/venv/lib/python3.10/site-packages/numba/tests/test_numconv.py new file mode 100644 index 0000000000000000000000000000000000000000..16679e886ebb0bae0318c7f4853037fd0966536d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_numconv.py @@ -0,0 +1,38 @@ +import itertools +import unittest +from numba import njit +from numba.core import types + + +def template(fromty, toty): + def closure(self): + def cast(x): + y = x + return y + + cfunc = njit(toty(fromty))(cast) + self.assertAlmostEqual(cfunc(1), 1) + + return closure + + +class TestNumberConversion(unittest.TestCase): + """ + Test all int/float numeric conversion to ensure we have all the external + dependencies to perform these conversions. + """ + # NOTE: more implicit tests are in test_numberctor + + @classmethod + def automatic_populate(cls): + tys = types.integer_domain | types.real_domain + for fromty, toty in itertools.permutations(tys, r=2): + test_name = "test_{fromty}_to_{toty}".format(fromty=fromty, + toty=toty) + setattr(cls, test_name, template(fromty, toty)) + + +TestNumberConversion.automatic_populate() + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_numpy_support.py b/venv/lib/python3.10/site-packages/numba/tests/test_numpy_support.py new file mode 100644 index 0000000000000000000000000000000000000000..bf77dfbccfd81272a7bdbdd542058cca5e88659d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_numpy_support.py @@ -0,0 +1,437 @@ +""" +Test helper functions from numba.numpy_support. +""" + + +import sys +from itertools import product + +import numpy as np + +import unittest +from numba.core import types +from numba.core.errors import NumbaNotImplementedError +from numba.tests.support import TestCase +from numba.tests.enum_usecases import Shake, RequestError +from numba.np import numpy_support + + +class TestFromDtype(TestCase): + + def test_number_types(self): + """ + Test from_dtype() and as_dtype() with the various scalar number types. + """ + f = numpy_support.from_dtype + + def check(typechar, numba_type): + # Only native ordering and alignment is supported + dtype = np.dtype(typechar) + self.assertIs(f(dtype), numba_type) + self.assertIs(f(np.dtype('=' + typechar)), numba_type) + self.assertEqual(dtype, numpy_support.as_dtype(numba_type)) + + check('?', types.bool_) + check('f', types.float32) + check('f4', types.float32) + check('d', types.float64) + check('f8', types.float64) + + check('F', types.complex64) + check('c8', types.complex64) + check('D', types.complex128) + check('c16', types.complex128) + + check('O', types.pyobject) + + check('b', types.int8) + check('i1', types.int8) + check('B', types.uint8) + check('u1', types.uint8) + + check('h', types.int16) + check('i2', types.int16) + check('H', types.uint16) + check('u2', types.uint16) + + check('i', types.int32) + check('i4', types.int32) + check('I', types.uint32) + check('u4', types.uint32) + + check('q', types.int64) + check('Q', types.uint64) + for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'intp', 'uintp'): + self.assertIs(f(np.dtype(name)), getattr(types, name)) + + # Non-native alignments are unsupported (except for 1-byte types) + foreign_align = '>' if sys.byteorder == 'little' else '<' + for letter in 'hHiIlLqQfdFD': + self.assertRaises(NumbaNotImplementedError, f, + np.dtype(foreign_align + letter)) + + def test_string_types(self): + """ + Test from_dtype() and as_dtype() with the character string types. + """ + def check(typestring, numba_type): + # Only native ordering and alignment is supported + dtype = np.dtype(typestring) + self.assertEqual(numpy_support.from_dtype(dtype), numba_type) + self.assertEqual(dtype, numpy_support.as_dtype(numba_type)) + + check('S10', types.CharSeq(10)) + check('a11', types.CharSeq(11)) + check('U12', types.UnicodeCharSeq(12)) + + def check_datetime_types(self, letter, nb_class): + def check(dtype, numba_type, code): + tp = numpy_support.from_dtype(dtype) + self.assertEqual(tp, numba_type) + self.assertEqual(tp.unit_code, code) + self.assertEqual(numpy_support.as_dtype(numba_type), dtype) + self.assertEqual(numpy_support.as_dtype(tp), dtype) + + # Unit-less ("generic") type + check(np.dtype(letter), nb_class(''), 14) + + def test_datetime_types(self): + """ + Test from_dtype() and as_dtype() with the datetime types. + """ + self.check_datetime_types('M', types.NPDatetime) + + def test_timedelta_types(self): + """ + Test from_dtype() and as_dtype() with the timedelta types. + """ + self.check_datetime_types('m', types.NPTimedelta) + + def test_struct_types(self): + def check(dtype, fields, size, aligned): + tp = numpy_support.from_dtype(dtype) + self.assertIsInstance(tp, types.Record) + # Only check for dtype equality, as the Numba type may be interned + self.assertEqual(tp.dtype, dtype) + self.assertEqual(tp.fields, fields) + self.assertEqual(tp.size, size) + self.assertEqual(tp.aligned, aligned) + + dtype = np.dtype([('a', np.int16), ('b', np.int32)]) + check(dtype, + fields={'a': (types.int16, 0, None, None), + 'b': (types.int32, 2, None, None)}, + size=6, aligned=False) + + dtype = np.dtype([('a', np.int16), ('b', np.int32)], align=True) + check(dtype, + fields={'a': (types.int16, 0, None, None), + 'b': (types.int32, 4, None, None)}, + size=8, aligned=True) + + dtype = np.dtype([('m', np.int32), ('n', 'S5')]) + check(dtype, + fields={'m': (types.int32, 0, None, None), + 'n': (types.CharSeq(5), 4, None, None)}, + size=9, aligned=False) + + def test_enum_type(self): + + def check(base_inst, enum_def, type_class): + np_dt = np.dtype(base_inst) + nb_ty = numpy_support.from_dtype(np_dt) + inst = type_class(enum_def, nb_ty) + recovered = numpy_support.as_dtype(inst) + self.assertEqual(np_dt, recovered) + + dts = [np.float64, np.int32, np.complex128, np.bool_] + enums = [Shake, RequestError] + + for dt, enum in product(dts, enums): + check(dt, enum, types.EnumMember) + + for dt, enum in product(dts, enums): + check(dt, enum, types.IntEnumMember) + + +class ValueTypingTestBase(object): + """ + Common tests for the typing of values. Also used by test_special. + """ + + def check_number_values(self, func): + """ + Test *func*() with scalar numeric values. + """ + f = func + # Standard Python types get inferred by numpy + self.assertIn(f(1), (types.int32, types.int64)) + self.assertIn(f(2**31 - 1), (types.int32, types.int64)) + self.assertIn(f(-2**31), (types.int32, types.int64)) + self.assertIs(f(1.0), types.float64) + self.assertIs(f(1.0j), types.complex128) + self.assertIs(f(True), types.bool_) + self.assertIs(f(False), types.bool_) + # Numpy scalar types get converted by from_dtype() + for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'intc', 'uintc', 'intp', 'uintp', + 'float32', 'float64', 'complex64', 'complex128', + 'bool_'): + val = getattr(np, name)() + self.assertIs(f(val), getattr(types, name)) + + def _base_check_datetime_values(self, func, np_type, nb_type): + f = func + for unit in [ + '', 'Y', 'M', 'D', 'h', 'm', 's', + 'ms', 'us', 'ns', 'ps', 'fs', 'as', + ]: + if unit: + t = np_type(3, unit) + else: + # "generic" datetime / timedelta + t = np_type('Nat') + tp = f(t) + # This ensures the unit hasn't been lost + self.assertEqual(tp, nb_type(unit)) + + def check_datetime_values(self, func): + """ + Test *func*() with np.datetime64 values. + """ + self._base_check_datetime_values(func, np.datetime64, types.NPDatetime) + + def check_timedelta_values(self, func): + """ + Test *func*() with np.timedelta64 values. + """ + self._base_check_datetime_values(func, np.timedelta64, + types.NPTimedelta) + + +class TestArrayScalars(ValueTypingTestBase, TestCase): + + def test_number_values(self): + """ + Test map_arrayscalar_type() with scalar number values. + """ + self.check_number_values(numpy_support.map_arrayscalar_type) + + def test_datetime_values(self): + """ + Test map_arrayscalar_type() with np.datetime64 values. + """ + f = numpy_support.map_arrayscalar_type + self.check_datetime_values(f) + # datetime64s with a non-one factor shouldn't be supported + t = np.datetime64('2014', '10Y') + with self.assertRaises(NumbaNotImplementedError): + f(t) + + def test_timedelta_values(self): + """ + Test map_arrayscalar_type() with np.timedelta64 values. + """ + f = numpy_support.map_arrayscalar_type + self.check_timedelta_values(f) + # timedelta64s with a non-one factor shouldn't be supported + t = np.timedelta64(10, '10Y') + with self.assertRaises(NumbaNotImplementedError): + f(t) + + +class FakeUFunc(object): + __slots__ = ('nin', 'nout', 'types', 'ntypes') + __name__ = "fake ufunc" + + def __init__(self, types): + self.types = types + in_, out = self.types[0].split('->') + self.nin = len(in_) + self.nout = len(out) + self.ntypes = len(types) + for tp in types: + in_, out = self.types[0].split('->') + assert len(in_) == self.nin + assert len(out) == self.nout + + +# Typical types for np.add, np.multiply, np.isnan +_add_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', + 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d', + 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'Mm->M', 'mm->m', 'mM->M', + 'OO->O'] + +_mul_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', + 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d', + 'gg->g', 'FF->F', 'DD->D', 'GG->G', 'mq->m', 'qm->m', 'md->m', + 'dm->m', 'OO->O'] + +# Those ones only have floating-point loops +_isnan_types = ['e->?', 'f->?', 'd->?', 'g->?', 'F->?', 'D->?', 'G->?'] +_sqrt_types = ['e->e', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + +class TestUFuncs(TestCase): + """ + Test ufunc helpers. + """ + + def test_ufunc_find_matching_loop(self): + f = numpy_support.ufunc_find_matching_loop + np_add = FakeUFunc(_add_types) + np_mul = FakeUFunc(_mul_types) + np_isnan = FakeUFunc(_isnan_types) + np_sqrt = FakeUFunc(_sqrt_types) + + def check(ufunc, input_types, sigs, output_types=()): + """ + Check that ufunc_find_matching_loop() finds one of the given + *sigs* for *ufunc*, *input_types* and optional *output_types*. + """ + loop = f(ufunc, input_types + output_types) + self.assertTrue(loop) + if isinstance(sigs, str): + sigs = (sigs,) + self.assertIn(loop.ufunc_sig, sigs, + "inputs=%s and outputs=%s should have selected " + "one of %s, got %s" + % (input_types, output_types, sigs, loop.ufunc_sig)) + self.assertEqual(len(loop.numpy_inputs), len(loop.inputs)) + self.assertEqual(len(loop.numpy_outputs), len(loop.outputs)) + if not output_types: + # Add explicit outputs and check the result is the same + loop_explicit = f(ufunc, list(input_types) + loop.outputs) + self.assertEqual(loop_explicit, loop) + else: + self.assertEqual(loop.outputs, list(output_types)) + # Round-tripping inputs and outputs + loop_rt = f(ufunc, loop.inputs + loop.outputs) + self.assertEqual(loop_rt, loop) + return loop + + def check_exact(ufunc, input_types, sigs, output_types=()): + """ + Like check(), but also ensure no casting of inputs occurred. + """ + loop = check(ufunc, input_types, sigs, output_types) + self.assertEqual(loop.inputs, list(input_types)) + + def check_no_match(ufunc, input_types): + loop = f(ufunc, input_types) + self.assertIs(loop, None) + + # Exact matching for number types + check_exact(np_add, (types.bool_, types.bool_), '??->?') + check_exact(np_add, (types.int8, types.int8), 'bb->b') + check_exact(np_add, (types.uint8, types.uint8), 'BB->B') + check_exact(np_add, (types.int64, types.int64), ('ll->l', 'qq->q')) + check_exact(np_add, (types.uint64, types.uint64), ('LL->L', 'QQ->Q')) + check_exact(np_add, (types.float32, types.float32), 'ff->f') + check_exact(np_add, (types.float64, types.float64), 'dd->d') + check_exact(np_add, (types.complex64, types.complex64), 'FF->F') + check_exact(np_add, (types.complex128, types.complex128), 'DD->D') + + # Exact matching for datetime64 and timedelta64 types + check_exact(np_add, (types.NPTimedelta('s'), types.NPTimedelta('s')), + 'mm->m', output_types=(types.NPTimedelta('s'),)) + check_exact(np_add, (types.NPTimedelta('ms'), types.NPDatetime('s')), + 'mM->M', output_types=(types.NPDatetime('ms'),)) + check_exact(np_add, (types.NPDatetime('s'), types.NPTimedelta('s')), + 'Mm->M', output_types=(types.NPDatetime('s'),)) + check_exact(np_add, (types.NPDatetime('s'), types.NPTimedelta('')), + 'Mm->M', output_types=(types.NPDatetime('s'),)) + check_exact(np_add, (types.NPDatetime('ns'), types.NPTimedelta('')), + 'Mm->M', output_types=(types.NPDatetime('ns'),)) + check_exact(np_add, (types.NPTimedelta(''), types.NPDatetime('s')), + 'mM->M', output_types=(types.NPDatetime('s'),)) + check_exact(np_add, (types.NPTimedelta(''), types.NPDatetime('ns')), + 'mM->M', output_types=(types.NPDatetime('ns'),)) + check_exact(np_mul, (types.NPTimedelta('s'), types.int64), + 'mq->m', output_types=(types.NPTimedelta('s'),)) + check_exact(np_mul, (types.float64, types.NPTimedelta('s')), + 'dm->m', output_types=(types.NPTimedelta('s'),)) + + # Mix and match number types, with casting + check(np_add, (types.bool_, types.int8), 'bb->b') + check(np_add, (types.uint8, types.bool_), 'BB->B') + check(np_add, (types.int16, types.uint16), 'ii->i') + check(np_add, (types.complex64, types.float64), 'DD->D') + check(np_add, (types.float64, types.complex64), 'DD->D') + # Integers, when used together with floating-point numbers, + # should cast to any real or complex (see #2006) + int_types = [types.int32, types.uint32, types.int64, types.uint64] + for intty in int_types: + check(np_add, (types.float32, intty), 'ff->f') + check(np_add, (types.float64, intty), 'dd->d') + check(np_add, (types.complex64, intty), 'FF->F') + check(np_add, (types.complex128, intty), 'DD->D') + # However, when used alone, they should cast only to + # floating-point types of sufficient precision + # (typical use case: np.sqrt(2) should give an accurate enough value) + for intty in int_types: + check(np_sqrt, (intty,), 'd->d') + check(np_isnan, (intty,), 'd->?') + + # With some timedelta64 arguments as well + check(np_mul, (types.NPTimedelta('s'), types.int32), + 'mq->m', output_types=(types.NPTimedelta('s'),)) + check(np_mul, (types.NPTimedelta('s'), types.uint32), + 'mq->m', output_types=(types.NPTimedelta('s'),)) + check(np_mul, (types.NPTimedelta('s'), types.float32), + 'md->m', output_types=(types.NPTimedelta('s'),)) + check(np_mul, (types.float32, types.NPTimedelta('s')), + 'dm->m', output_types=(types.NPTimedelta('s'),)) + + # No match + check_no_match(np_add, (types.NPDatetime('s'), types.NPDatetime('s'))) + # No implicit casting from int64 to timedelta64 (Numpy would allow + # this). + check_no_match(np_add, (types.NPTimedelta('s'), types.int64)) + + def test_layout_checker(self): + def check_arr(arr): + dims = arr.shape + strides = arr.strides + itemsize = arr.dtype.itemsize + is_c = numpy_support.is_contiguous(dims, strides, itemsize) + is_f = numpy_support.is_fortran(dims, strides, itemsize) + expect_c = arr.flags['C_CONTIGUOUS'] + expect_f = arr.flags['F_CONTIGUOUS'] + self.assertEqual(is_c, expect_c) + self.assertEqual(is_f, expect_f) + + arr = np.arange(24) + # 1D + check_arr(arr) + # 2D + check_arr(arr.reshape((3, 8))) + check_arr(arr.reshape((3, 8)).T) + check_arr(arr.reshape((3, 8))[::2]) + # 3D + check_arr(arr.reshape((2, 3, 4))) + check_arr(arr.reshape((2, 3, 4)).T) + # middle axis is shape 1 + check_arr(arr.reshape((2, 3, 4))[:, ::3]) + check_arr(arr.reshape((2, 3, 4)).T[:, ::3]) + + # leading axis is shape 1 + check_arr(arr.reshape((2, 3, 4))[::2]) + check_arr(arr.reshape((2, 3, 4)).T[:, :, ::2]) + # 2 leading axis are shape 1 + check_arr(arr.reshape((2, 3, 4))[::2, ::3]) + check_arr(arr.reshape((2, 3, 4)).T[:, ::3, ::2]) + # single item slices for all axis + check_arr(arr.reshape((2, 3, 4))[::2, ::3, ::4]) + check_arr(arr.reshape((2, 3, 4)).T[::4, ::3, ::2]) + # 4D + check_arr(arr.reshape((2, 2, 3, 2))[::2, ::2, ::3]) + check_arr(arr.reshape((2, 2, 3, 2)).T[:, ::3, ::2, ::2]) + # outer zero dims + check_arr(arr.reshape((2, 2, 3, 2))[::5, ::2, ::3]) + check_arr(arr.reshape((2, 2, 3, 2)).T[:, ::3, ::2, ::5]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_numpyadapt.py b/venv/lib/python3.10/site-packages/numba/tests/test_numpyadapt.py new file mode 100644 index 0000000000000000000000000000000000000000..8df1f028813cee233e67c610e7feddcc8bc14ce2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_numpyadapt.py @@ -0,0 +1,43 @@ +from ctypes import * + +import numpy as np + +import unittest +from numba import _helperlib + + +class ArrayStruct3D(Structure): + # Mimic the structure defined in numba.targets.arrayobj's make_array() + _fields_ = [ + ("meminfo", c_void_p), + ("parent", c_void_p), + ("nitems", c_ssize_t), + ("itemsize", c_ssize_t), + ("data", c_void_p), + ("shape", (c_ssize_t * 3)), + ("strides", (c_ssize_t * 3)), + ] + + +class TestArrayAdaptor(unittest.TestCase): + def test_array_adaptor(self): + arystruct = ArrayStruct3D() + + adaptorptr = _helperlib.c_helpers['adapt_ndarray'] + adaptor = PYFUNCTYPE(c_int, py_object, c_void_p)(adaptorptr) + + ary = np.arange(60).reshape(2, 3, 10) + status = adaptor(ary, byref(arystruct)) + self.assertEqual(status, 0) + self.assertEqual(arystruct.data, ary.ctypes.data) + self.assertNotEqual(arystruct.meminfo, 0) + self.assertEqual(arystruct.parent, id(ary)) + self.assertEqual(arystruct.nitems, 60) + self.assertEqual(arystruct.itemsize, ary.itemsize) + for i in range(3): + self.assertEqual(arystruct.shape[i], ary.ctypes.shape[i]) + self.assertEqual(arystruct.strides[i], ary.ctypes.strides[i]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_obj_lifetime.py b/venv/lib/python3.10/site-packages/numba/tests/test_obj_lifetime.py new file mode 100644 index 0000000000000000000000000000000000000000..4c46559fa81ba38bbd9e04fba727be4c2b647f82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_obj_lifetime.py @@ -0,0 +1,494 @@ +import collections +import weakref +import gc +import operator +from itertools import takewhile + +import unittest +from numba import njit, jit +from numba.core.compiler import CompilerBase, DefaultPassBuilder +from numba.core.untyped_passes import PreserveIR +from numba.core.typed_passes import IRLegalization +from numba.core import types, ir +from numba.tests.support import TestCase, override_config, SerialMixin + + +class _Dummy(object): + + def __init__(self, recorder, name): + self.recorder = recorder + self.name = name + recorder._add_dummy(self) + + def __add__(self, other): + assert isinstance(other, _Dummy) + return _Dummy(self.recorder, "%s + %s" % (self.name, other.name)) + + def __iter__(self): + return _DummyIterator(self.recorder, "iter(%s)" % self.name) + + +class _DummyIterator(_Dummy): + + count = 0 + + def __next__(self): + if self.count >= 3: + raise StopIteration + self.count += 1 + return _Dummy(self.recorder, "%s#%s" % (self.name, self.count)) + + next = __next__ + + +class RefRecorder(object): + """ + An object which records events when instances created through it + are deleted. Custom events can also be recorded to aid in + diagnosis. + """ + + def __init__(self): + self._counts = collections.defaultdict(int) + self._events = [] + self._wrs = {} + + def make_dummy(self, name): + """ + Make an object whose deletion will be recorded as *name*. + """ + return _Dummy(self, name) + + def _add_dummy(self, dummy): + wr = weakref.ref(dummy, self._on_disposal) + self._wrs[wr] = dummy.name + + __call__ = make_dummy + + def mark(self, event): + """ + Manually append *event* to the recorded events. + *event* can be formatted using format(). + """ + count = self._counts[event] + 1 + self._counts[event] = count + self._events.append(event.format(count=count)) + + def _on_disposal(self, wr): + name = self._wrs.pop(wr) + self._events.append(name) + + @property + def alive(self): + """ + A list of objects which haven't been deleted yet. + """ + return [wr() for wr in self._wrs] + + @property + def recorded(self): + """ + A list of recorded events. + """ + return self._events + + +def simple_usecase1(rec): + a = rec('a') + b = rec('b') + c = rec('c') + a = b + c + rec.mark('--1--') + d = a + a # b + c + b + c + rec.mark('--2--') + return d + +def simple_usecase2(rec): + a = rec('a') + b = rec('b') + rec.mark('--1--') + x = a + y = x + a = None + return y + +def looping_usecase1(rec): + a = rec('a') + b = rec('b') + c = rec('c') + x = b + for y in a: + x = x + y + rec.mark('--loop bottom--') + rec.mark('--loop exit--') + x = x + c + return x + +def looping_usecase2(rec): + a = rec('a') + b = rec('b') + cum = rec('cum') + for x in a: + rec.mark('--outer loop top--') + cum = cum + x + z = x + x + rec.mark('--inner loop entry #{count}--') + for y in b: + rec.mark('--inner loop top #{count}--') + cum = cum + y + rec.mark('--inner loop bottom #{count}--') + rec.mark('--inner loop exit #{count}--') + if cum: + cum = y + z + else: + # Never gets here, but let the Numba compiler see a `break` opcode + break + rec.mark('--outer loop bottom #{count}--') + else: + rec.mark('--outer loop else--') + rec.mark('--outer loop exit--') + return cum + +def generator_usecase1(rec): + a = rec('a') + b = rec('b') + yield a + yield b + +def generator_usecase2(rec): + a = rec('a') + b = rec('b') + for x in a: + yield x + yield b + + +class MyError(RuntimeError): + pass + +def do_raise(x): + raise MyError(x) + +def raising_usecase1(rec): + a = rec('a') + b = rec('b') + d = rec('d') + if a: + do_raise("foo") + c = rec('c') + c + a + c + b + +def raising_usecase2(rec): + a = rec('a') + b = rec('b') + if a: + c = rec('c') + do_raise(b) + a + c + +def raising_usecase3(rec): + a = rec('a') + b = rec('b') + if a: + raise MyError(b) + + +def del_before_definition(rec): + """ + This test reveal a bug that there is a del on uninitialized variable + """ + n = 5 + for i in range(n): + rec.mark(str(i)) + n = 0 + for j in range(n): + return 0 + else: + if i < 2: + continue + elif i == 2: + for j in range(i): + return i + rec.mark('FAILED') + rec.mark('FAILED') + rec.mark('FAILED') + rec.mark('OK') + return -1 + + +def inf_loop_multiple_back_edge(rec): + """ + test to reveal bug of invalid liveness when infinite loop has multiple + backedge. + """ + while True: + rec.mark("yield") + yield + p = rec('p') + if p: + rec.mark('bra') + pass + + +class TestObjLifetime(TestCase): + """ + Test lifetime of Python objects inside jit-compiled functions. + """ + + def compile(self, pyfunc): + # Note: looplift must be disabled. The test require the function + # control-flow to be unchanged. + cfunc = jit((types.pyobject,), forceobj=True, looplift=False)(pyfunc) + return cfunc + + def compile_and_record(self, pyfunc, raises=None): + rec = RefRecorder() + cfunc = self.compile(pyfunc) + if raises is not None: + with self.assertRaises(raises): + cfunc(rec) + else: + cfunc(rec) + return rec + + def assertRecordOrder(self, rec, expected): + """ + Check that the *expected* markers occur in that order in *rec*'s + recorded events. + """ + actual = [] + recorded = rec.recorded + remaining = list(expected) + # Find out in which order, if any, the expected events were recorded + for d in recorded: + if d in remaining: + actual.append(d) + # User may or may not expect duplicates, handle them properly + remaining.remove(d) + self.assertEqual(actual, expected, + "the full list of recorded events is: %r" % (recorded,)) + + def test_simple1(self): + rec = self.compile_and_record(simple_usecase1) + self.assertFalse(rec.alive) + self.assertRecordOrder(rec, ['a', 'b', '--1--']) + self.assertRecordOrder(rec, ['a', 'c', '--1--']) + self.assertRecordOrder(rec, ['--1--', 'b + c', '--2--']) + + def test_simple2(self): + rec = self.compile_and_record(simple_usecase2) + self.assertFalse(rec.alive) + self.assertRecordOrder(rec, ['b', '--1--', 'a']) + + def test_looping1(self): + rec = self.compile_and_record(looping_usecase1) + self.assertFalse(rec.alive) + # a and b are unneeded after the loop, check they were disposed of + self.assertRecordOrder(rec, ['a', 'b', '--loop exit--', 'c']) + # check disposal order of iterator items and iterator + self.assertRecordOrder(rec, ['iter(a)#1', '--loop bottom--', + 'iter(a)#2', '--loop bottom--', + 'iter(a)#3', '--loop bottom--', + 'iter(a)', '--loop exit--', + ]) + + def test_looping2(self): + rec = self.compile_and_record(looping_usecase2) + self.assertFalse(rec.alive) + # `a` is disposed of after its iterator is taken + self.assertRecordOrder(rec, ['a', '--outer loop top--']) + # Check disposal of iterators + self.assertRecordOrder(rec, ['iter(a)', '--outer loop else--', + '--outer loop exit--']) + self.assertRecordOrder(rec, ['iter(b)', '--inner loop exit #1--', + 'iter(b)', '--inner loop exit #2--', + 'iter(b)', '--inner loop exit #3--', + ]) + # Disposal of in-loop variable `x` + self.assertRecordOrder(rec, ['iter(a)#1', '--inner loop entry #1--', + 'iter(a)#2', '--inner loop entry #2--', + 'iter(a)#3', '--inner loop entry #3--', + ]) + # Disposal of in-loop variable `z` + self.assertRecordOrder(rec, ['iter(a)#1 + iter(a)#1', + '--outer loop bottom #1--', + ]) + + def exercise_generator(self, genfunc): + cfunc = self.compile(genfunc) + # Exhaust the generator + rec = RefRecorder() + with self.assertRefCount(rec): + gen = cfunc(rec) + next(gen) + self.assertTrue(rec.alive) + list(gen) + self.assertFalse(rec.alive) + # Instantiate the generator but never iterate + rec = RefRecorder() + with self.assertRefCount(rec): + gen = cfunc(rec) + del gen + gc.collect() + self.assertFalse(rec.alive) + # Stop iterating before exhaustion + rec = RefRecorder() + with self.assertRefCount(rec): + gen = cfunc(rec) + next(gen) + self.assertTrue(rec.alive) + del gen + gc.collect() + self.assertFalse(rec.alive) + + def test_generator1(self): + self.exercise_generator(generator_usecase1) + + def test_generator2(self): + self.exercise_generator(generator_usecase2) + + def test_del_before_definition(self): + rec = self.compile_and_record(del_before_definition) + self.assertEqual(rec.recorded, ['0', '1', '2']) + + def test_raising1(self): + with self.assertRefCount(do_raise): + rec = self.compile_and_record(raising_usecase1, raises=MyError) + self.assertFalse(rec.alive) + + def test_raising2(self): + with self.assertRefCount(do_raise): + rec = self.compile_and_record(raising_usecase2, raises=MyError) + self.assertFalse(rec.alive) + + def test_raising3(self): + with self.assertRefCount(MyError): + rec = self.compile_and_record(raising_usecase3, raises=MyError) + self.assertFalse(rec.alive) + + def test_inf_loop_multiple_back_edge(self): + cfunc = self.compile(inf_loop_multiple_back_edge) + rec = RefRecorder() + iterator = iter(cfunc(rec)) + next(iterator) + self.assertEqual(rec.alive, []) + next(iterator) + self.assertEqual(rec.alive, []) + next(iterator) + self.assertEqual(rec.alive, []) + self.assertEqual(rec.recorded, + ['yield', 'p', 'bra', 'yield', 'p', 'bra', 'yield']) + + +class TestExtendingVariableLifetimes(SerialMixin, TestCase): + # Test for `numba.config.EXTEND_VARIABLE_LIFETIMES` which moves the ir.Del + # nodes to just before a block's terminator, i.e. their lifetime is extended + # beyond the point of last use. + + def test_lifetime_basic(self): + + def get_ir(extend_lifetimes): + class IRPreservingCompiler(CompilerBase): + + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(PreserveIR, IRLegalization) + pm.finalize() + return [pm] + + @njit(pipeline_class=IRPreservingCompiler) + def foo(): + a = 10 + b = 20 + c = a + b + # a and b are now unused, standard behaviour is ir.Del for them here + d = c / c + return d + + with override_config('EXTEND_VARIABLE_LIFETIMES', extend_lifetimes): + foo() + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + + return func_ir + + + def check(func_ir, expect): + # assert single block + self.assertEqual(len(func_ir.blocks), 1) + blk = next(iter(func_ir.blocks.values())) + + # check sequencing + for expect_class, got_stmt in zip(expect, blk.body): + self.assertIsInstance(got_stmt, expect_class) + + del_after_use_ir = get_ir(False) + # should be 3 assigns (a, b, c), 2 del (a, b), assign (d), del (c) + # assign for cast d to return, del (d), return + expect = [*((ir.Assign,) * 3), ir.Del, ir.Del, ir.Assign, ir.Del, + ir.Assign, ir.Del, ir.Return] + check(del_after_use_ir, expect) + + del_at_block_end_ir = get_ir(True) + # should be 4 assigns (a, b, c, d), assign for cast d to return, + # 4 dels (a, b, c, d) then the return. + expect = [*((ir.Assign,) * 4), ir.Assign, *((ir.Del,) * 4), ir.Return] + check(del_at_block_end_ir, expect) + + def test_dbg_extend_lifetimes(self): + + def get_ir(**options): + class IRPreservingCompiler(CompilerBase): + + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(PreserveIR, IRLegalization) + pm.finalize() + return [pm] + + @njit(pipeline_class=IRPreservingCompiler, **options) + def foo(): + a = 10 + b = 20 + c = a + b + # a and b are now unused, standard behaviour is ir.Del for them here + d = c / c + return d + + foo() + cres = foo.overloads[foo.signatures[0]] + func_ir = cres.metadata['preserved_ir'] + + return func_ir + + # _dbg_extend_lifetimes is on when debug=True + ir_debug = get_ir(debug=True) + # explicitly turn on _dbg_extend_lifetimes + ir_debug_ext = get_ir(debug=True, _dbg_extend_lifetimes=True) + # explicitly turn off _dbg_extend_lifetimes + ir_debug_no_ext = get_ir(debug=True, _dbg_extend_lifetimes=False) + + def is_del_grouped_at_the_end(fir): + [blk] = fir.blocks.values() + # Mark all statements that are ir.Del + inst_is_del = [isinstance(stmt, ir.Del) for stmt in blk.body] + # Get the leading segment that are not dels + not_dels = list(takewhile(operator.not_, inst_is_del)) + # Compute the starting position of the dels + begin = len(not_dels) + # Get the remaining segment that are all dels + all_dels = list(takewhile(operator.truth, inst_is_del[begin:])) + # Compute the ending position of the dels + end = begin + len(all_dels) + # If the dels are all grouped at the end (before the terminator), + # the end position will be the last position of the list + return end == len(inst_is_del) - 1 + + self.assertTrue(is_del_grouped_at_the_end(ir_debug)) + self.assertTrue(is_del_grouped_at_the_end(ir_debug_ext)) + self.assertFalse(is_del_grouped_at_the_end(ir_debug_no_ext)) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_object_mode.py b/venv/lib/python3.10/site-packages/numba/tests/test_object_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..63c4d877381462387a20d5014228820f5ec2e999 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_object_mode.py @@ -0,0 +1,204 @@ +""" +Testing object mode specifics. + +""" + +import numpy as np + +import unittest +from numba import jit, types +from numba.core import utils +from numba.tests.support import TestCase + + +def complex_constant(n): + tmp = n + 4 + return tmp + 3j + + +def long_constant(n): + return n + 100000000000000000000000000000000000000000000000 + + +def delitem_usecase(x): + del x[:] + + +def loop_nest_3(x, y): + n = 0 + for i in range(x): + for j in range(y): + for k in range(x + y): + n += i * j + + return n + + +def array_of_object(x): + return x + + +class TestObjectMode(TestCase): + + def test_complex_constant(self): + pyfunc = complex_constant + cfunc = jit((), forceobj=True)(pyfunc) + self.assertPreciseEqual(pyfunc(12), cfunc(12)) + + def test_long_constant(self): + pyfunc = long_constant + cfunc = jit((), forceobj=True)(pyfunc) + self.assertPreciseEqual(pyfunc(12), cfunc(12)) + + def test_loop_nest(self): + """ + Test bug that decref the iterator early. + If the bug occurs, a segfault should occur + """ + pyfunc = loop_nest_3 + cfunc = jit((), forceobj=True)(pyfunc) + self.assertEqual(pyfunc(5, 5), cfunc(5, 5)) + + def bm_pyfunc(): + pyfunc(5, 5) + + def bm_cfunc(): + cfunc(5, 5) + + utils.benchmark(bm_pyfunc) + utils.benchmark(bm_cfunc) + + def test_array_of_object(self): + cfunc = jit(forceobj=True)(array_of_object) + objarr = np.array([object()] * 10) + self.assertIs(cfunc(objarr), objarr) + + def test_sequence_contains(self): + """ + Test handling of the `in` comparison + """ + @jit(forceobj=True) + def foo(x, y): + return x in y + + self.assertTrue(foo(1, [0, 1])) + self.assertTrue(foo(0, [0, 1])) + self.assertFalse(foo(2, [0, 1])) + + with self.assertRaises(TypeError) as raises: + foo(None, None) + + self.assertIn("is not iterable", str(raises.exception)) + + def test_delitem(self): + pyfunc = delitem_usecase + cfunc = jit((), forceobj=True)(pyfunc) + + l = [3, 4, 5] + cfunc(l) + self.assertPreciseEqual(l, []) + with self.assertRaises(TypeError): + cfunc(42) + + def test_starargs_non_tuple(self): + def consumer(*x): + return x + + @jit(forceobj=True) + def foo(x): + return consumer(*x) + + arg = "ijo" + got = foo(arg) + expect = foo.py_func(arg) + self.assertEqual(got, tuple(arg)) + self.assertEqual(got, expect) + + def test_expr_undef(self): + @jit(forceobj=True) + def foo(): + # In Py3.12, this will emit a Expr.undef. + return [x for x in (1, 2)] + + self.assertEqual(foo(), foo.py_func()) + + +class TestObjectModeInvalidRewrite(TestCase): + """ + Tests to ensure that rewrite passes didn't affect objmode lowering. + """ + + def _ensure_objmode(self, disp): + self.assertTrue(disp.signatures) + self.assertFalse(disp.nopython_signatures) + return disp + + def test_static_raise_in_objmode_fallback(self): + """ + Test code based on user submitted issue at + https://github.com/numba/numba/issues/2159 + """ + def test0(n): + return n + + def test1(n): + if n == 0: + # static raise will fail in objmode if the IR is modified by + # rewrite pass + raise ValueError() + return test0(n) # trigger objmode fallback + + compiled = jit(forceobj=True)(test1) + self.assertEqual(test1(10), compiled(10)) + self._ensure_objmode(compiled) + + def test_static_setitem_in_objmode_fallback(self): + """ + Test code based on user submitted issue at + https://github.com/numba/numba/issues/2169 + """ + + def test0(n): + return n + + def test(a1, a2): + a1 = np.asarray(a1) + # static setitem here will fail in objmode if the IR is modified by + # rewrite pass + a2[0] = 1 + return test0(a1.sum() + a2.sum()) # trigger objmode fallback + + compiled = jit(forceobj=True)(test) + args = np.array([3]), np.array([4]) + self.assertEqual(test(*args), compiled(*args)) + self._ensure_objmode(compiled) + + def test_dynamic_func_objmode(self): + """ + Test issue https://github.com/numba/numba/issues/3355 + """ + func_text = "def func():\n" + func_text += " np.array([1,2,3])\n" + loc_vars = {} + custom_globals = {'np': np} + exec(func_text, custom_globals, loc_vars) + func = loc_vars['func'] + jitted = jit(forceobj=True)(func) + jitted() + + def test_issue_9725_label_renaming(self): + # Test issue https://github.com/numba/numba/issues/9725 + # this should compile via fallback + @jit(forceobj=True) + def f(): + for _ in (): # cannot lift this loop as a nopython loop + [0 for k in (None,)] + f() + self._ensure_objmode(f) + lifted = f.overloads[f.signatures[0]].lifted[0] + self.assertFalse(lifted.nopython_signatures) + self.assertEqual(lifted.signatures, [(types.Tuple(()),)]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_objects.py b/venv/lib/python3.10/site-packages/numba/tests/test_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..a47f9ca00877ec2dcfc203e7df25b4ddd5da3003 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_objects.py @@ -0,0 +1,57 @@ +""" +Test generic manipulation of objects. +""" + + +import unittest +from numba import jit +from numba.core import types +from numba.tests.support import TestCase + + +class C(object): + pass + + +def setattr_usecase(o, v): + o.x = v + + +def delattr_usecase(o): + del o.x + + +class TestAttributes(TestCase): + def test_setattr(self): + pyfunc = setattr_usecase + cfunc = jit((types.pyobject, types.int32), forceobj=True)(pyfunc) + c = C() + cfunc(c, 123) + self.assertEqual(c.x, 123) + + def test_setattr_attribute_error(self): + pyfunc = setattr_usecase + cfunc = jit((types.pyobject, types.int32), forceobj=True)(pyfunc) + # Can't set undeclared slot + with self.assertRaises(AttributeError): + cfunc(object(), 123) + + def test_delattr(self): + pyfunc = delattr_usecase + cfunc = jit((types.pyobject,), forceobj=True)(pyfunc) + c = C() + c.x = 123 + cfunc(c) + with self.assertRaises(AttributeError): + c.x + + def test_delattr_attribute_error(self): + pyfunc = delattr_usecase + cfunc = jit((types.pyobject,), forceobj=True)(pyfunc) + # Can't delete non-existing attribute + with self.assertRaises(AttributeError): + cfunc(C()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_operators.py b/venv/lib/python3.10/site-packages/numba/tests/test_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..17f6c4915263519754edc20b22f0357f56b43bc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_operators.py @@ -0,0 +1,1622 @@ +import copy +import itertools +import operator +import unittest + +import numpy as np + +from numba import jit, njit +from numba.core import types, utils, errors +from numba.core.types.functions import _header_lead +from numba.tests.support import TestCase, tag, needs_blas +from numba.tests.matmul_usecase import (matmul_usecase, imatmul_usecase, + DumbMatrix,) + +Noflags = {'nopython': True} + +force_pyobj_flags = {'forceobj': True} + + +def make_static_power(exp): + def pow_usecase(x): + return x ** exp + return pow_usecase + + +class LiteralOperatorImpl(object): + + @staticmethod + def add_usecase(x, y): + return x + y + + @staticmethod + def iadd_usecase(x, y): + x += y + return x + + @staticmethod + def sub_usecase(x, y): + return x - y + + @staticmethod + def isub_usecase(x, y): + x -= y + return x + + @staticmethod + def mul_usecase(x, y): + return x * y + + @staticmethod + def imul_usecase(x, y): + x *= y + return x + + @staticmethod + def floordiv_usecase(x, y): + return x // y + + @staticmethod + def ifloordiv_usecase(x, y): + x //= y + return x + + @staticmethod + def truediv_usecase(x, y): + return x / y + + @staticmethod + def itruediv_usecase(x, y): + x /= y + return x + + if matmul_usecase: + matmul_usecase = staticmethod(matmul_usecase) + imatmul_usecase = staticmethod(imatmul_usecase) + + @staticmethod + def mod_usecase(x, y): + return x % y + + @staticmethod + def imod_usecase(x, y): + x %= y + return x + + @staticmethod + def pow_usecase(x, y): + return x ** y + + @staticmethod + def ipow_usecase(x, y): + x **= y + return x + + @staticmethod + def bitshift_left_usecase(x, y): + return x << y + + @staticmethod + def bitshift_ileft_usecase(x, y): + x <<= y + return x + + @staticmethod + def bitshift_right_usecase(x, y): + return x >> y + + @staticmethod + def bitshift_iright_usecase(x, y): + x >>= y + return x + + @staticmethod + def bitwise_and_usecase(x, y): + return x & y + + @staticmethod + def bitwise_iand_usecase(x, y): + x &= y + return x + + @staticmethod + def bitwise_or_usecase(x, y): + return x | y + + @staticmethod + def bitwise_ior_usecase(x, y): + x |= y + return x + + @staticmethod + def bitwise_xor_usecase(x, y): + return x ^ y + + @staticmethod + def bitwise_ixor_usecase(x, y): + x ^= y + return x + + @staticmethod + def bitwise_not_usecase_binary(x, _unused): + return ~x + + @staticmethod + def bitwise_not_usecase(x): + return ~x + + @staticmethod + def not_usecase(x): + return not(x) + + @staticmethod + def negate_usecase(x): + return -x + + @staticmethod + def unary_positive_usecase(x): + return +x + + @staticmethod + def lt_usecase(x, y): + return x < y + + @staticmethod + def le_usecase(x, y): + return x <= y + + @staticmethod + def gt_usecase(x, y): + return x > y + + @staticmethod + def ge_usecase(x, y): + return x >= y + + @staticmethod + def eq_usecase(x, y): + return x == y + + @staticmethod + def ne_usecase(x, y): + return x != y + + @staticmethod + def in_usecase(x, y): + return x in y + + @staticmethod + def not_in_usecase(x, y): + return x not in y + + @staticmethod + def is_usecase(x, y): + return x is y + + +class FunctionalOperatorImpl(object): + + @staticmethod + def add_usecase(x, y): + return operator.add(x, y) + + @staticmethod + def iadd_usecase(x, y): + return operator.iadd(x, y) + + @staticmethod + def sub_usecase(x, y): + return operator.sub(x, y) + + @staticmethod + def isub_usecase(x, y): + return operator.isub(x, y) + + @staticmethod + def mul_usecase(x, y): + return operator.mul(x, y) + + @staticmethod + def imul_usecase(x, y): + return operator.imul(x, y) + + @staticmethod + def floordiv_usecase(x, y): + return operator.floordiv(x, y) + + @staticmethod + def ifloordiv_usecase(x, y): + return operator.ifloordiv(x, y) + + @staticmethod + def truediv_usecase(x, y): + return operator.truediv(x, y) + + @staticmethod + def itruediv_usecase(x, y): + return operator.itruediv(x, y) + + @staticmethod + def mod_usecase(x, y): + return operator.mod(x, y) + + @staticmethod + def imod_usecase(x, y): + return operator.imod(x, y) + + @staticmethod + def pow_usecase(x, y): + return operator.pow(x, y) + + @staticmethod + def ipow_usecase(x, y): + return operator.ipow(x, y) + + @staticmethod + def matmul_usecase(x, y): + return operator.matmul(x, y) + + @staticmethod + def imatmul_usecase(x, y): + return operator.imatmul(x, y) + + @staticmethod + def bitshift_left_usecase(x, y): + return operator.lshift(x, y) + + @staticmethod + def bitshift_ileft_usecase(x, y): + return operator.ilshift(x, y) + + @staticmethod + def bitshift_right_usecase(x, y): + return operator.rshift(x, y) + + @staticmethod + def bitshift_iright_usecase(x, y): + return operator.irshift(x, y) + + @staticmethod + def bitwise_and_usecase(x, y): + return operator.and_(x, y) + + @staticmethod + def bitwise_iand_usecase(x, y): + return operator.iand(x, y) + + @staticmethod + def bitwise_or_usecase(x, y): + return operator.or_(x, y) + + @staticmethod + def bitwise_ior_usecase(x, y): + return operator.ior(x, y) + + @staticmethod + def bitwise_xor_usecase(x, y): + return operator.xor(x, y) + + @staticmethod + def bitwise_ixor_usecase(x, y): + return operator.ixor(x, y) + + @staticmethod + def bitwise_not_usecase_binary(x, _unused): + return operator.invert(x) + + @staticmethod + def bitwise_not_usecase(x): + return operator.invert(x) + + @staticmethod + def not_usecase(x): + return operator.not_(x) + + @staticmethod + def negate_usecase(x): + return operator.neg(x) + + @staticmethod + def unary_positive_usecase(x): + return operator.pos(x) + + @staticmethod + def lt_usecase(x, y): + return operator.lt(x, y) + + @staticmethod + def le_usecase(x, y): + return operator.le(x, y) + + @staticmethod + def gt_usecase(x, y): + return operator.gt(x, y) + + @staticmethod + def ge_usecase(x, y): + return operator.ge(x, y) + + @staticmethod + def eq_usecase(x, y): + return operator.eq(x, y) + + @staticmethod + def ne_usecase(x, y): + return operator.ne(x, y) + + @staticmethod + def in_usecase(x, y): + return operator.contains(y, x) + + @staticmethod + def not_in_usecase(x, y): + return not operator.contains(y, x) + + @staticmethod + def is_usecase(x, y): + return operator.is_(x, y) + + +class TestOperators(TestCase): + """ + Test standard Python operators on scalars. + + NOTE: operators on array are generally tested in test_ufuncs. + """ + + op = LiteralOperatorImpl + + _bitwise_opnames = { + 'bitshift_left_usecase': operator.lshift, + 'bitshift_ileft_usecase': operator.ilshift, + 'bitshift_right_usecase': operator.rshift, + 'bitshift_iright_usecase': operator.irshift, + 'bitwise_and_usecase': operator.and_, + 'bitwise_iand_usecase': operator.iand, + 'bitwise_or_usecase': operator.or_, + 'bitwise_ior_usecase': operator.ior, + 'bitwise_xor_usecase': operator.xor, + 'bitwise_ixor_usecase': operator.ixor, + 'bitwise_not_usecase_binary': operator.invert, + } + + def run_test_ints(self, pyfunc, x_operands, y_operands, types_list, + flags=force_pyobj_flags): + for arg_types in types_list: + cfunc = jit(arg_types, **flags)(pyfunc) + for x, y in itertools.product(x_operands, y_operands): + # For inplace ops, we check that the first operand + # was correctly mutated. + x_got = copy.copy(x) + x_expected = copy.copy(x) + got = cfunc(x_got, y) + expected = pyfunc(x_expected, y) + self.assertPreciseEqual( + got, expected, + msg="mismatch for (%r, %r) with types %s: %r != %r" + % (x, y, arg_types, got, expected)) + self.assertPreciseEqual( + x_got, x_expected, + msg="mismatch for (%r, %r) with types %s: %r != %r" + % (x, y, arg_types, x_got, x_expected)) + + def run_test_floats(self, pyfunc, x_operands, y_operands, types_list, + flags=force_pyobj_flags): + for arg_types in types_list: + cfunc = jit(arg_types, **flags)(pyfunc) + for x, y in itertools.product(x_operands, y_operands): + # For inplace ops, we check that the first operand + # was correctly mutated. + x_got = copy.copy(x) + x_expected = copy.copy(x) + got = cfunc(x_got, y) + expected = pyfunc(x_expected, y) + np.testing.assert_allclose(got, expected, rtol=1e-5) + np.testing.assert_allclose(x_got, x_expected, rtol=1e-5) + + def coerce_operand(self, op, numba_type): + if hasattr(op, "dtype"): + return numba_type.cast_python_value(op) + elif numba_type in types.unsigned_domain: + return abs(int(op.real)) + elif numba_type in types.integer_domain: + return int(op.real) + elif numba_type in types.real_domain: + return float(op.real) + else: + return op + + def run_test_scalar_compare(self, pyfunc, flags=force_pyobj_flags, + ordered=True): + ops = self.compare_scalar_operands + types_list = self.compare_types + if not ordered: + types_list = types_list + self.compare_unordered_types + for typ in types_list: + cfunc = jit((typ, typ), **flags)(pyfunc) + for x, y in itertools.product(ops, ops): + x = self.coerce_operand(x, typ) + y = self.coerce_operand(y, typ) + expected = pyfunc(x, y) + got = cfunc(x, y) + # Scalar ops => scalar result + self.assertIs(type(got), type(expected)) + self.assertEqual(got, expected, + "mismatch with %r (%r, %r)" + % (typ, x, y)) + + + # + # Comparison operators + # + + compare_scalar_operands = [-0.5, -1.0 + 1j, -1.0 + 2j, -0.5 + 1j, 1.5] + compare_types = [types.int32, types.int64, + types.uint32, types.uint64, + types.float32, types.float64] + compare_unordered_types = [types.complex64, types.complex128] + + def test_lt_scalar(self, flags=force_pyobj_flags): + self.run_test_scalar_compare(self.op.lt_usecase, flags) + + def test_lt_scalar_npm(self): + self.test_lt_scalar(flags=Noflags) + + def test_le_scalar(self, flags=force_pyobj_flags): + self.run_test_scalar_compare(self.op.le_usecase, flags) + + def test_le_scalar_npm(self): + self.test_le_scalar(flags=Noflags) + + def test_gt_scalar(self, flags=force_pyobj_flags): + self.run_test_scalar_compare(self.op.gt_usecase, flags) + + def test_gt_scalar_npm(self): + self.test_gt_scalar(flags=Noflags) + + def test_ge_scalar(self, flags=force_pyobj_flags): + self.run_test_scalar_compare(self.op.ge_usecase, flags) + + def test_ge_scalar_npm(self): + self.test_ge_scalar(flags=Noflags) + + def test_eq_scalar(self, flags=force_pyobj_flags): + self.run_test_scalar_compare(self.op.eq_usecase, flags, ordered=False) + + def test_eq_scalar_npm(self): + self.test_eq_scalar(flags=Noflags) + + def test_ne_scalar(self, flags=force_pyobj_flags): + self.run_test_scalar_compare(self.op.ne_usecase, flags, ordered=False) + + def test_ne_scalar_npm(self): + self.test_ne_scalar(flags=Noflags) + + def test_is_ellipsis(self): + cfunc = njit((types.ellipsis, types.ellipsis))(self.op.is_usecase) + self.assertTrue(cfunc(Ellipsis, Ellipsis)) + + def test_is_void_ptr(self): + # can't call this directly from python, as void cannot be unboxed + cfunc_void = jit( + (types.voidptr, types.voidptr), nopython=True + )(self.op.is_usecase) + + # this wrapper performs the casts from int to voidptr for us + @jit(nopython=True) + def cfunc(x, y): + return cfunc_void(x, y) + + self.assertTrue(cfunc(1, 1)) + self.assertFalse(cfunc(1, 2)) + + # + # Arithmetic operators + # + + def run_binop_bools(self, pyfunc, flags=force_pyobj_flags): + x_operands = [False, False, True, True] + y_operands = [False, True, False, True] + + types_list = [(types.boolean, types.boolean)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def run_binop_ints(self, pyfunc, flags=force_pyobj_flags): + x_operands = [-5, 0, 1, 2] + y_operands = [-3, -1, 1, 3] + + types_list = [(types.int32, types.int32), + (types.int64, types.int64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [2, 3] + y_operands = [1, 2] + + types_list = [(types.byte, types.byte), + (types.uint32, types.uint32), + (types.uint64, types.uint64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def run_binop_floats(self, pyfunc, flags=force_pyobj_flags): + x_operands = [-1.1, 0.0, 1.1] + y_operands = [-1.5, 0.8, 2.1] + + types_list = [(types.float32, types.float32), + (types.float64, types.float64)] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def run_binop_floats_floordiv(self, pyfunc, flags=force_pyobj_flags): + self.run_binop_floats(pyfunc, flags=flags) + + def run_binop_complex(self, pyfunc, flags=force_pyobj_flags): + x_operands = [-1.1 + 0.3j, 0.0 + 0.0j, 1.1j] + y_operands = [-1.5 - 0.7j, 0.8j, 2.1 - 2.0j] + + types_list = [(types.complex64, types.complex64), + (types.complex128, types.complex128)] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def generate_binop_tests(ns, usecases, tp_runners, npm_array=False): + for usecase in usecases: + for tp_name, runner_name in tp_runners.items(): + for nopython in (False, True): + test_name = "test_%s_%s" % (usecase, tp_name) + if nopython: + test_name += "_npm" + flags = Noflags if nopython else force_pyobj_flags + usecase_name = "%s_usecase" % usecase + + def inner(self, runner_name=runner_name, + usecase_name=usecase_name, flags=flags): + runner = getattr(self, runner_name) + op_usecase = getattr(self.op, usecase_name) + runner(op_usecase, flags) + + if nopython and 'array' in tp_name and not npm_array: + def test_meth(self): + with self.assertTypingError(): + inner() + else: + test_meth = inner + + test_meth.__name__ = test_name + + if nopython: + test_meth = tag('important')(test_meth) + + ns[test_name] = test_meth + + + generate_binop_tests(locals(), + ('add', 'iadd', 'sub', 'isub', 'mul', 'imul'), + {'ints': 'run_binop_ints', + 'floats': 'run_binop_floats', + 'complex': 'run_binop_complex', + }) + + generate_binop_tests(locals(), + ('truediv', 'itruediv'), + {'ints': 'run_binop_ints', + 'floats': 'run_binop_floats', + 'complex': 'run_binop_complex', + }) + + # NOTE: floordiv and mod unsupported for complex numbers + generate_binop_tests(locals(), + ('floordiv', 'ifloordiv', 'mod', 'imod'), + {'ints': 'run_binop_ints', + 'floats': 'run_binop_floats_floordiv', + }) + + def check_div_errors(self, usecase_name, msg, flags=force_pyobj_flags, + allow_complex=False): + pyfunc = getattr(self.op, usecase_name) + # Signed and unsigned division can take different code paths, + # test them both. + arg_types = [types.int32, types.uint32, types.float64] + if allow_complex: + arg_types.append(types.complex128) + for tp in arg_types: + cfunc = jit((tp, tp), **flags)(pyfunc) + with self.assertRaises(ZeroDivisionError) as cm: + cfunc(1, 0) + # Test exception message if not in object mode + if flags is not force_pyobj_flags: + self.assertIn(msg, str(cm.exception)) + + def test_truediv_errors(self, flags=force_pyobj_flags): + self.check_div_errors("truediv_usecase", "division by zero", flags=flags, + allow_complex=True) + + def test_truediv_errors_npm(self): + self.test_truediv_errors(flags=Noflags) + + def test_floordiv_errors(self, flags=force_pyobj_flags): + self.check_div_errors("floordiv_usecase", "division by zero", flags=flags) + + def test_floordiv_errors_npm(self): + self.test_floordiv_errors(flags=Noflags) + + def test_mod_errors(self, flags=force_pyobj_flags): + self.check_div_errors("mod_usecase", "modulo by zero", flags=flags) + + def test_mod_errors_npm(self): + self.test_mod_errors(flags=Noflags) + + def run_pow_ints(self, pyfunc, flags=force_pyobj_flags): + x_operands = [-2, -1, 0, 1, 2] + y_operands = [0, 1, 2] + + types_list = [(types.int32, types.int32), + (types.int64, types.int64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, 1, 2] + y_operands = [0, 1, 2] + + types_list = [(types.byte, types.byte), + (types.uint32, types.uint32), + (types.uint64, types.uint64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def run_pow_floats(self, pyfunc, flags=force_pyobj_flags): + x_operands = [-222.222, -111.111, 111.111, 222.222] + y_operands = [-2, -1, 0, 1, 2] + + types_list = [(types.float32, types.float32), + (types.float64, types.float64)] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0.0] + y_operands = [0, 1, 2] # TODO native handling of 0 ** negative power + + types_list = [(types.float32, types.float32), + (types.float64, types.float64)] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + # XXX power operator is unsupported on complex numbers (see issue #488) + generate_binop_tests(locals(), + ('pow', 'ipow'), + {'ints': 'run_pow_ints', + 'floats': 'run_pow_floats', + }) + + def test_add_complex(self, flags=force_pyobj_flags): + pyfunc = self.op.add_usecase + + x_operands = [1+0j, 1j, -1-1j] + y_operands = x_operands + + types_list = [(types.complex64, types.complex64), + (types.complex128, types.complex128),] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def test_add_complex_npm(self): + self.test_add_complex(flags=Noflags) + + def test_sub_complex(self, flags=force_pyobj_flags): + pyfunc = self.op.sub_usecase + + x_operands = [1+0j, 1j, -1-1j] + y_operands = [1, 2, 3] + + types_list = [(types.complex64, types.complex64), + (types.complex128, types.complex128),] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def test_sub_complex_npm(self): + self.test_sub_complex(flags=Noflags) + + def test_mul_complex(self, flags=force_pyobj_flags): + pyfunc = self.op.mul_usecase + + x_operands = [1+0j, 1j, -1-1j] + y_operands = [1, 2, 3] + + types_list = [(types.complex64, types.complex64), + (types.complex128, types.complex128),] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def test_mul_complex_npm(self): + self.test_mul_complex(flags=Noflags) + + def test_truediv_complex(self, flags=force_pyobj_flags): + pyfunc = self.op.truediv_usecase + + x_operands = [1+0j, 1j, -1-1j] + y_operands = [1, 2, 3] + + types_list = [(types.complex64, types.complex64), + (types.complex128, types.complex128),] + + self.run_test_floats(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + def test_truediv_complex_npm(self): + self.test_truediv_complex(flags=Noflags) + + def test_mod_complex(self, flags=force_pyobj_flags): + pyfunc = self.op.mod_usecase + cres = jit((types.complex64, types.complex64), **flags)(pyfunc) + with self.assertRaises(TypeError) as raises: + cres(4j, 2j) + + # error message depends on Python version. + if utils.PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)): + msg = "unsupported operand type(s) for %" + else: + raise NotImplementedError(utils.PYVERSION) + + self.assertIn(msg, str(raises.exception)) + + def test_mod_complex_npm(self): + pyfunc = self.op.mod_usecase + with self.assertTypingError(): + njit((types.complex64, types.complex64))(pyfunc) + + # + # Matrix multiplication + # (just check with simple values; computational tests are in test_linalg) + # + + def check_matmul_objmode(self, pyfunc, inplace): + # Use dummy objects, to work with any NumPy / SciPy version + cfunc = jit((), **force_pyobj_flags)(pyfunc) + a = DumbMatrix(3) + b = DumbMatrix(4) + got = cfunc(a, b) + self.assertEqual(got.value, 12) + if inplace: + self.assertIs(got, a) + else: + self.assertIsNot(got, a) + self.assertIsNot(got, b) + + def test_matmul(self): + self.check_matmul_objmode(self.op.matmul_usecase, inplace=False) + + def test_imatmul(self): + self.check_matmul_objmode(self.op.imatmul_usecase, inplace=True) + + @needs_blas + def check_matmul_npm(self, pyfunc): + arrty = types.Array(types.float32, 1, 'C') + cfunc = njit((arrty, arrty))(pyfunc) + a = np.float32([1, 2]) + b = np.float32([3, 4]) + got = cfunc(a, b) + self.assertPreciseEqual(got, np.dot(a, b)) + # Never inplace + self.assertIsNot(got, a) + self.assertIsNot(got, b) + + def test_matmul_npm(self): + self.check_matmul_npm(self.op.matmul_usecase) + + def test_imatmul_npm(self): + with self.assertTypingError() as raises: + self.check_matmul_npm(self.op.imatmul_usecase) + + # + # Bitwise operators + # + + def run_bitshift_left(self, pyfunc, flags=force_pyobj_flags): + x_operands = [0, 1] + y_operands = [0, 1, 2, 4, 8, 16, 31] + + types_list = [(types.uint32, types.uint32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, 1] + y_operands = [0, 1, 2, 4, 8, 16, 32, 63] + + types_list = [(types.uint64, types.uint64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, -1] + y_operands = [0, 1, 2, 4, 8, 16, 31] + + types_list = [(types.int32, types.int32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, -1] + y_operands = [0, 1, 2, 4, 8, 16, 32, 63] + + types_list = [(types.int64, types.int64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + generate_binop_tests(locals(), + ('bitshift_left', 'bitshift_ileft'), + {'ints': 'run_bitshift_left', + }) + + def run_bitshift_right(self, pyfunc, flags=force_pyobj_flags): + x_operands = [0, 1, 2**32 - 1] + y_operands = [0, 1, 2, 4, 8, 16, 31] + + types_list = [(types.uint32, types.uint32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, 1, 2**64 - 1] + y_operands = [0, 1, 2, 4, 8, 16, 32, 63] + + types_list = [(types.uint64, types.uint64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, 1, -(2**31)] + y_operands = [0, 1, 2, 4, 8, 16, 31] + + types_list = [(types.int32, types.int32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = [0, -1, -(2**31)] + y_operands = [0, 1, 2, 4, 8, 16, 32, 63] + + types_list = [(types.int64, types.int64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + generate_binop_tests(locals(), + ('bitshift_right', 'bitshift_iright'), + {'ints': 'run_bitshift_right', + }) + + def run_logical(self, pyfunc, flags=force_pyobj_flags): + x_operands = list(range(0, 8)) + [2**32 - 1] + y_operands = list(range(0, 8)) + [2**32 - 1] + + types_list = [(types.uint32, types.uint32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = list(range(0, 8)) + [2**64 - 1] + y_operands = list(range(0, 8)) + [2**64 - 1] + + types_list = [(types.uint64, types.uint64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1] + y_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1] + + types_list = [(types.int32, types.int32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1] + y_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1] + + types_list = [(types.int64, types.int64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + generate_binop_tests(locals(), + ('bitwise_and', 'bitwise_iand', + 'bitwise_or', 'bitwise_ior', + 'bitwise_xor', 'bitwise_ixor'), + {'ints': 'run_logical', + 'bools': 'run_binop_bools', + }) + + # + # Unary operators + # + + def test_bitwise_not(self, flags=force_pyobj_flags): + pyfunc = self.op.bitwise_not_usecase_binary + + x_operands = list(range(0, 8)) + [2**32 - 1] + x_operands = [np.uint32(x) for x in x_operands] + y_operands = [0] + + types_list = [(types.uint32, types.uint32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = list(range(-4, 4)) + [-(2**31), 2**31 - 1] + y_operands = [0] + + types_list = [(types.int32, types.int32)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = list(range(0, 8)) + [2**64 - 1] + x_operands = [np.uint64(x) for x in x_operands] + y_operands = [0] + + types_list = [(types.uint64, types.uint64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + x_operands = list(range(-4, 4)) + [-(2**63), 2**63 - 1] + y_operands = [0] + + types_list = [(types.int64, types.int64)] + + self.run_test_ints(pyfunc, x_operands, y_operands, types_list, + flags=flags) + + # For booleans, we follow Numpy semantics (i.e. ~True == False, + # not ~True == -2) + values = [False, False, True, True] + values = list(map(np.bool_, values)) + + pyfunc = self.op.bitwise_not_usecase + cfunc = jit((types.boolean,), **flags)(pyfunc) + for val in values: + self.assertPreciseEqual(pyfunc(val), cfunc(val)) + + def test_bitwise_not_npm(self): + self.test_bitwise_not(flags=Noflags) + + def test_bitwise_float(self): + """ + Make sure that bitwise float operations are not allowed + """ + def assert_reject_compile(pyfunc, argtypes, opname): + msg = 'expecting TypingError when compiling {}'.format(pyfunc) + with self.assertRaises(errors.TypingError, msg=msg) as raises: + njit(argtypes)(pyfunc) + # check error message + fmt = _header_lead + ' {}' + expecting = fmt.format(opname + if isinstance(opname, str) + else 'Function({})'.format(opname)) + self.assertIn(expecting, str(raises.exception)) + + methods = [ + 'bitshift_left_usecase', + 'bitshift_ileft_usecase', + 'bitshift_right_usecase', + 'bitshift_iright_usecase', + 'bitwise_and_usecase', + 'bitwise_iand_usecase', + 'bitwise_or_usecase', + 'bitwise_ior_usecase', + 'bitwise_xor_usecase', + 'bitwise_ixor_usecase', + 'bitwise_not_usecase_binary', + ] + + for name in methods: + pyfunc = getattr(self.op, name) + assert_reject_compile(pyfunc, (types.float32, types.float32), + opname=self._bitwise_opnames[name]) + + def test_not(self): + pyfunc = self.op.not_usecase + + values = [ + 1, + 2, + 3, + 1.2, + 3.4j, + ] + + cfunc = jit((), **force_pyobj_flags)(pyfunc) + for val in values: + self.assertEqual(pyfunc(val), cfunc(val)) + + def test_not_npm(self): + pyfunc = self.op.not_usecase + # test native mode + argtys = [ + types.int8, + types.int32, + types.int64, + types.float32, + types.complex128, + ] + values = [ + 1, + 2, + 3, + 1.2, + 3.4j, + ] + for ty, val in zip(argtys, values): + cfunc = njit((ty,))(pyfunc) + self.assertEqual(cfunc.nopython_signatures[0].return_type, + types.boolean) + self.assertEqual(pyfunc(val), cfunc(val)) + + # XXX test_negate should check for negative and positive zeros and infinities + + def test_negate_npm(self): + pyfunc = self.op.negate_usecase + # test native mode + argtys = [ + types.int8, + types.int32, + types.int64, + types.float32, + types.float64, + types.complex128, + types.boolean, + types.boolean, + ] + values = [ + 1, + 2, + 3, + 1.2, + 2.4, + 3.4j, + True, + False, + ] + for ty, val in zip(argtys, values): + cfunc = njit((ty,))(pyfunc) + self.assertAlmostEqual(pyfunc(val), cfunc(val)) + + + def test_negate(self): + pyfunc = self.op.negate_usecase + values = [ + 1, + 2, + 3, + 1.2, + 3.4j, + True, + False, + ] + cfunc = jit((), **force_pyobj_flags)(pyfunc) + for val in values: + self.assertEqual(pyfunc(val), cfunc(val)) + + def test_unary_positive_npm(self): + pyfunc = self.op.unary_positive_usecase + # test native mode + argtys = [ + types.int8, + types.int32, + types.int64, + types.float32, + types.float64, + types.complex128, + types.boolean, + types.boolean, + ] + values = [ + 1, + 2, + 3, + 1.2, + 2.4, + 3.4j, + True, + False + ] + for ty, val in zip(argtys, values): + cfunc = njit((ty,))(pyfunc) + self.assertAlmostEqual(pyfunc(val), cfunc(val)) + + def test_unary_positive(self): + pyfunc = self.op.unary_positive_usecase + values = [ + 1, + 2, + 3, + 1.2, + 3.4j, + True, + False, + ] + cfunc = jit((), **force_pyobj_flags)(pyfunc) + for val in values: + self.assertEqual(pyfunc(val), cfunc(val)) + + def _check_in(self, pyfunc, flags): + dtype = types.int64 + cfunc = jit((dtype, types.UniTuple(dtype, 3)), **flags)(pyfunc) + for i in (3, 4, 5, 6, 42): + tup = (3, 42, 5) + self.assertPreciseEqual(pyfunc(i, tup), cfunc(i, tup)) + + def test_in(self, flags=force_pyobj_flags): + self._check_in(self.op.in_usecase, flags) + + def test_in_npm(self): + self.test_in(flags=Noflags) + + def test_not_in(self, flags=force_pyobj_flags): + self._check_in(self.op.not_in_usecase, flags) + + def test_not_in_npm(self): + self.test_not_in(flags=Noflags) + + +class TestOperatorModule(TestOperators): + + op = FunctionalOperatorImpl + + _bitwise_opnames = { + 'bitshift_left_usecase': operator.lshift, + 'bitshift_ileft_usecase': operator.ilshift, + 'bitshift_right_usecase': operator.rshift, + 'bitshift_iright_usecase': operator.irshift, + 'bitwise_and_usecase': operator.and_, + 'bitwise_iand_usecase': operator.iand, + 'bitwise_or_usecase': operator.or_, + 'bitwise_ior_usecase': operator.ior, + 'bitwise_xor_usecase': operator.xor, + 'bitwise_ixor_usecase': operator.ixor, + 'bitwise_not_usecase_binary': operator.invert, + } + + +class TestMixedInts(TestCase): + """ + Tests for operator calls with mixed integer types. + """ + + op = LiteralOperatorImpl + + int_samples = [0, 1, 3, 10, 42, 127, 10000, -1, -3, -10, -42, -127, -10000] + + int_types = [types.int8, types.uint8, types.int64, types.uint64] + signed_types = [tp for tp in int_types if tp.signed] + unsigned_types = [tp for tp in int_types if not tp.signed] + type_pairs = list(itertools.product(int_types, int_types)) + signed_pairs = [(u, v) for u, v in type_pairs + if u.signed or v.signed] + unsigned_pairs = [(u, v) for u, v in type_pairs + if not (u.signed or v.signed)] + + def int_in_dtype_range(self, val, tp): + tp_info = np.iinfo(tp.key) + return tp_info.min <= val <= tp_info.max + + def get_numpy_signed_upcast(self, *vals): + bitwidth = max(v.dtype.itemsize * 8 for v in vals) + bitwidth = max(bitwidth, types.intp.bitwidth) + return getattr(np, "int%d" % bitwidth) + + def get_numpy_unsigned_upcast(self, *vals): + bitwidth = max(v.dtype.itemsize * 8 for v in vals) + bitwidth = max(bitwidth, types.intp.bitwidth) + return getattr(np, "uint%d" % bitwidth) + + def get_typed_int(self, typ, val): + return getattr(np, typ.name)(val) + + def get_control_signed(self, opname): + op = getattr(operator, opname) + def control_signed(a, b): + tp = self.get_numpy_signed_upcast(a, b) + return op(tp(a), tp(b)) + return control_signed + + def get_control_unsigned(self, opname): + op = getattr(operator, opname) + def control_unsigned(a, b): + tp = self.get_numpy_unsigned_upcast(a, b) + return op(tp(a), tp(b)) + return control_unsigned + + def run_binary(self, pyfunc, control_func, operands, types, + expected_type=int, force_type=lambda x: x, + **assertPreciseEqualArgs): + for xt, yt in types: + cfunc = njit((xt, yt))(pyfunc) + for x, y in itertools.product(operands, operands): + # Check if xt and yt are values with range of dtype x and y + if not self.int_in_dtype_range(x, xt) or not self.int_in_dtype_range(y, yt): + continue + # Get Numpy typed scalars for the given types and values + x = self.get_typed_int(xt, x) + y = self.get_typed_int(yt, y) + expected = control_func(x, y) + got = cfunc(x, y) + self.assertIsInstance(got, expected_type) + msg = ("mismatch for (%r, %r) with types %s" + % (x, y, (xt, yt))) + got, expected = force_type(got), force_type(expected) + self.assertPreciseEqual(got, expected, msg=msg, + **assertPreciseEqualArgs) + + def run_unary(self, pyfunc, control_func, operands, types, + expected_type=int): + for xt in types: + cfunc = njit((xt,))(pyfunc) + for x in operands: + if not self.int_in_dtype_range(x, xt): + continue + x = self.get_typed_int(xt, x) + expected = control_func(x) + got = cfunc(x) + self.assertIsInstance(got, expected_type) + self.assertPreciseEqual( + got, expected, + msg="mismatch for %r with type %s: %r != %r" + % (x, xt, got, expected)) + + def run_arith_binop(self, pyfunc, opname, samples, + expected_type=int, force_type=lambda x: x, + **assertPreciseEqualArgs): + self.run_binary(pyfunc, self.get_control_signed(opname), + samples, self.signed_pairs, expected_type, + force_type=force_type, + **assertPreciseEqualArgs) + self.run_binary(pyfunc, self.get_control_unsigned(opname), + samples, self.unsigned_pairs, expected_type, + force_type=force_type, + **assertPreciseEqualArgs) + + def test_add(self): + self.run_arith_binop(self.op.add_usecase, 'add', self.int_samples) + + def test_sub(self): + self.run_arith_binop(self.op.sub_usecase, 'sub', self.int_samples) + + def test_mul(self): + self.run_arith_binop(self.op.mul_usecase, 'mul', self.int_samples) + + def test_floordiv(self): + samples = [x for x in self.int_samples if x != 0] + self.run_arith_binop(self.op.floordiv_usecase, 'floordiv', samples) + + def test_mod(self): + samples = [x for x in self.int_samples if x != 0] + self.run_arith_binop(self.op.mod_usecase, 'mod', samples) + + def test_pow(self): + extra_cast = {} + if utils.PYVERSION == (3, 11): + extra_cast["force_type"] = float + pyfunc = self.op.pow_usecase + # Only test with positive values, as otherwise trying to write the + # control function in terms of Python or Numpy power turns out insane. + samples = [x for x in self.int_samples if x >= 0] + self.run_arith_binop(pyfunc, 'pow', samples, **extra_cast) + + # Now test all non-zero values, but only with signed types + def control_signed(a, b): + tp = self.get_numpy_signed_upcast(a, b) + if b >= 0: + return tp(a) ** tp(b) + else: + inv = tp(a) ** tp(-b) + if inv == 0: + # Overflow + return 0 + return np.intp(1.0 / inv) + samples = [x for x in self.int_samples if x != 0] + signed_pairs = [(u, v) for u, v in self.type_pairs + if u.signed and v.signed] + self.run_binary(pyfunc, control_signed, + samples, signed_pairs, **extra_cast) + + def test_truediv(self): + + def control(a, b): + return float(a) / float(b) + samples = [x for x in self.int_samples if x != 0] + pyfunc = self.op.truediv_usecase + + # Note: there can be precision issues on x87 + # e.g. for `1 / 18446744073709541616` + # -> 0x1.0000000000002p-64 vs. 0x1.0000000000003p-64. + self.run_binary(pyfunc, control, samples, self.signed_pairs, + expected_type=float, prec='double') + self.run_binary(pyfunc, control, samples, self.unsigned_pairs, + expected_type=float, prec='double') + + def test_and(self): + self.run_arith_binop(self.op.bitwise_and_usecase, 'and_', self.int_samples) + + def test_or(self): + self.run_arith_binop(self.op.bitwise_or_usecase, 'or_', self.int_samples) + + def test_xor(self): + self.run_arith_binop(self.op.bitwise_xor_usecase, 'xor', self.int_samples) + + def run_shift_binop(self, pyfunc, opname): + opfunc = getattr(operator, opname) + def control_signed(a, b): + tp = self.get_numpy_signed_upcast(a, b) + return opfunc(tp(a), tp(b)) + def control_unsigned(a, b): + tp = self.get_numpy_unsigned_upcast(a, b) + return opfunc(tp(a), tp(b)) + + samples = self.int_samples + + def check(xt, yt, control_func): + cfunc = njit((xt, yt))(pyfunc) + for x in samples: + # Avoid shifting by more than the shiftand's bitwidth, as + # we would hit undefined behaviour. + maxshift = xt.bitwidth - 1 + for y in (0, 1, 3, 5, maxshift - 1, maxshift): + if not self.int_in_dtype_range(x, xt) or not self.int_in_dtype_range(y, yt): + continue + # Get Numpy typed scalars for the given types and values + x = self.get_typed_int(xt, x) + y = self.get_typed_int(yt, y) + expected = control_func(x, y) + got = cfunc(x, y) + msg = ("mismatch for (%r, %r) with types %s" + % (x, y, (xt, yt))) + self.assertPreciseEqual(got, expected, msg=msg) + + # For bitshifts, only the first operand's signedness matters + # to choose the operation's signedness. + signed_pairs = [(u, v) for u, v in self.type_pairs + if u.signed] + unsigned_pairs = [(u, v) for u, v in self.type_pairs + if not u.signed] + + for xt, yt in signed_pairs: + check(xt, yt, control_signed) + for xt, yt in unsigned_pairs: + check(xt, yt, control_unsigned) + + def test_lshift(self): + self.run_shift_binop(self.op.bitshift_left_usecase, 'lshift') + + def test_rshift(self): + self.run_shift_binop(self.op.bitshift_right_usecase, 'rshift') + + def test_unary_positive(self): + def control(a): + return a + samples = self.int_samples + pyfunc = self.op.unary_positive_usecase + + self.run_unary(pyfunc, control, samples, self.int_types) + + def test_unary_negative(self): + def control_signed(a): + tp = self.get_numpy_signed_upcast(a) + return tp(-a) + def control_unsigned(a): + tp = self.get_numpy_unsigned_upcast(a) + return tp(-a) + samples = self.int_samples + pyfunc = self.op.negate_usecase + + self.run_unary(pyfunc, control_signed, samples, self.signed_types) + self.run_unary(pyfunc, control_unsigned, samples, self.unsigned_types) + + def test_invert(self): + def control_signed(a): + tp = self.get_numpy_signed_upcast(a) + return tp(~a) + def control_unsigned(a): + tp = self.get_numpy_unsigned_upcast(a) + return tp(~a) + samples = self.int_samples + pyfunc = self.op.bitwise_not_usecase + + self.run_unary(pyfunc, control_signed, samples, self.signed_types) + self.run_unary(pyfunc, control_unsigned, samples, self.unsigned_types) + + +class TestMixedIntsOperatorModule(TestMixedInts): + + op = FunctionalOperatorImpl + + +class TestStaticPower(TestCase): + """ + Test the ** operator with a static exponent, to exercise a + dedicated optimization. + """ + + def _check_pow(self, exponents, values): + for exp in exponents: + # test against non-static version of the @jit-ed function + regular_func = LiteralOperatorImpl.pow_usecase + static_func = make_static_power(exp) + + static_cfunc = jit(nopython=True)(static_func) + regular_cfunc = jit(nopython=True)(regular_func) + for v in values: + try: + expected = regular_cfunc(v, exp) + except ZeroDivisionError: + with self.assertRaises(ZeroDivisionError): + static_cfunc(v) + else: + got = static_cfunc(v) + self.assertPreciseEqual(expected, got, prec='double') + + def test_int_values(self): + exponents = [1, 2, 3, 5, 17, 0, -1, -2, -3] + vals = [0, 1, 3, -1, -4, np.int8(-3), np.uint16(4)] + + self._check_pow(exponents, vals) + + def test_real_values(self): + exponents = [1, 2, 3, 5, 17, 0, -1, -2, -3, 0x111111, -0x111112] + vals = [1.5, 3.25, -1.25, np.float32(-2.0), float('inf'), float('nan')] + + self._check_pow(exponents, vals) + +class TestStringConstComparison(TestCase): + """ + Test comparison of string constants + """ + def test_eq(self): + def test_impl1(): + s = 'test' + return s == 'test' + + def test_impl2(): + s = 'test1' + return s == 'test' + + cfunc1 = jit(nopython=True)(test_impl1) + cfunc2 = jit(nopython=True)(test_impl2) + self.assertEqual(test_impl1(), cfunc1()) + self.assertEqual(test_impl2(), cfunc2()) + + def test_neq(self): + def test_impl1(): + s = 'test' + return s != 'test' + + def test_impl2(): + s = 'test1' + return s != 'test' + + cfunc1 = jit(nopython=True)(test_impl1) + cfunc2 = jit(nopython=True)(test_impl2) + self.assertEqual(test_impl1(), cfunc1()) + self.assertEqual(test_impl2(), cfunc2()) + +class TestBooleanLiteralOperators(TestCase): + """ + Test operators with Boolean constants + """ + def test_eq(self): + + def test_impl1(b): + return a_val == b + + def test_impl2(a): + return a == b_val + + def test_impl3(): + r1 = True == True + r2 = True == False + r3 = False == True + r4 = False == False + return (r1, r2, r3, r4) + + for a_val, b in itertools.product([True, False], repeat=2): + cfunc1 = jit(nopython=True)(test_impl1) + self.assertEqual(test_impl1(b), cfunc1(b)) + + for a, b_val in itertools.product([True, False], repeat=2): + cfunc2 = jit(nopython=True)(test_impl2) + self.assertEqual(test_impl2(a), cfunc2(a)) + + cfunc3 = jit(nopython=True)(test_impl3) + self.assertEqual(test_impl3(), cfunc3()) + + def test_ne(self): + + def test_impl1(b): + return a_val != b + + def test_impl2(a): + return a != b_val + + def test_impl3(): + r1 = True != True + r2 = True != False + r3 = False != True + r4 = False != False + return (r1, r2, r3, r4) + + for a_val, b in itertools.product([True, False], repeat=2): + cfunc1 = jit(nopython=True)(test_impl1) + self.assertEqual(test_impl1(b), cfunc1(b)) + + for a, b_val in itertools.product([True, False], repeat=2): + cfunc2 = jit(nopython=True)(test_impl2) + self.assertEqual(test_impl2(a), cfunc2(a)) + + cfunc3 = jit(nopython=True)(test_impl3) + self.assertEqual(test_impl3(), cfunc3()) + + def test_is(self): + + def test_impl1(b): + return a_val is b + + def test_impl2(): + r1 = True is True + r2 = True is False + r3 = False is True + r4 = False is False + return (r1, r2, r3, r4) + + for a_val, b in itertools.product([True, False], repeat=2): + cfunc1 = jit(nopython=True)(test_impl1) + self.assertEqual(test_impl1(b), cfunc1(b)) + + cfunc2 = jit(nopython=True)(test_impl2) + self.assertEqual(test_impl2(), cfunc2()) + + def test_not(self): + + def test_impl(): + a, b = False, True + return (not a, not b) + + cfunc = jit(nopython=True)(test_impl) + self.assertEqual(test_impl(), cfunc()) + + def test_bool(self): + + def test_impl(): + a, b = False, True + return (bool(a), bool(b)) + + cfunc = jit(nopython=True)(test_impl) + self.assertEqual(test_impl(), cfunc()) + + def test_bool_to_str(self): + + def test_impl(): + a, b = False, True + return (str(a), str(b)) + + cfunc = jit(nopython=True)(test_impl) + self.assertEqual(test_impl(), cfunc()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_optimisation_pipelines.py b/venv/lib/python3.10/site-packages/numba/tests/test_optimisation_pipelines.py new file mode 100644 index 0000000000000000000000000000000000000000..a11e6632442d907c3a4dc9c3588a40cc2b518c34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_optimisation_pipelines.py @@ -0,0 +1,58 @@ +import unittest + +from numba.tests.support import (TestCase, override_config) +from numba import njit +from numba.core import types +import llvmlite.binding as llvm + + +class TestPassManagerOptimization(TestCase): + """ Tests that pass manager is not overriding the intended + optimization level. + """ + + def _get_llvmir(self, fn, sig): + with override_config('OPT', 0): + fn.compile(sig) + return fn.inspect_llvm(sig) + + def test_override_config(self): + @njit(debug=True, error_model='numpy') + def foo(a): + b = a + 1.23 + c = b * 2.34 + d = b / c + print(d) + return d + + sig = (types.float64,) + full_ir = self._get_llvmir(foo, sig=sig) + + module = llvm.parse_assembly(full_ir) + + name = foo.overloads[foo.signatures[0]].fndesc.mangled_name + funcs = [x for x in module.functions if x.name == name] + self.assertEqual(len(funcs), 1) + func = funcs[0] + blocks = [x for x in func.blocks] + self.assertGreater(len(blocks), 1) + block = blocks[0] + + # Find sequence with non-debug instructions + instrs = [x for x in block.instructions if x.opcode != 'call'] + op_expect = {'fadd', 'fmul', 'fdiv'} + started = False + for x in instrs: + if x.opcode in op_expect: + op_expect.remove(x.opcode) + if not started: + started = True + elif op_expect and started: + break + + self.assertGreater(len(op_expect), 0, + "Function was optimized unexpectedly") + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_optional.py b/venv/lib/python3.10/site-packages/numba/tests/test_optional.py new file mode 100644 index 0000000000000000000000000000000000000000..f043cebee22b050c24afdb9b2af0827cccd41fe8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_optional.py @@ -0,0 +1,254 @@ +import itertools + +import numpy as np + +import unittest +from numba import jit, njit +from numba.core import types +from numba.tests.support import TestCase + + +def return_double_or_none(x): + if x: + ret = None + else: + ret = 1.2 + return ret + + +def return_different_statement(x): + if x: + return None + else: + return 1.2 + + +def return_bool_optional_or_none(x, y): + if y: + z = False + else: + z = None + if x == 2: + # A boolean + return True + elif x == 1: + # A runtime optional + return z + else: + # None + return None + + +def is_this_a_none(x): + if x: + val_or_none = None + else: + val_or_none = x + + if val_or_none is None: + return x - 1 + + if val_or_none is not None: + return x + 1 + + +def a_is_b(a, b): + """ + Note in nopython mode, this operation does not make much sense. + Because we don't have objects anymore. + `a is b` is always False if not operating on None and Optional type + """ + return a is b + + +def a_is_not_b(a, b): + """ + This is `not (a is b)` + """ + return a is not b + + +class TestOptional(TestCase): + + _numba_parallel_test_ = False + + def test_return_double_or_none(self): + pyfunc = return_double_or_none + cfunc = njit((types.boolean,))(pyfunc) + + for v in [True, False]: + self.assertPreciseEqual(pyfunc(v), cfunc(v)) + + def test_return_different_statement(self): + pyfunc = return_different_statement + cfunc = njit((types.boolean,))(pyfunc) + + for v in [True, False]: + self.assertPreciseEqual(pyfunc(v), cfunc(v)) + + def test_return_bool_optional_or_none(self): + pyfunc = return_bool_optional_or_none + cfunc = njit((types.int32, types.int32,))(pyfunc) + + for x, y in itertools.product((0, 1, 2), (0, 1)): + self.assertPreciseEqual(pyfunc(x, y), cfunc(x, y)) + + def test_is_this_a_none(self): + pyfunc = is_this_a_none + cfunc = njit((types.intp,))(pyfunc) + + for v in [-1, 0, 1, 2]: + self.assertPreciseEqual(pyfunc(v), cfunc(v)) + + def test_is_this_a_none_objmode(self): + pyfunc = is_this_a_none + cfunc = jit((types.intp,), forceobj=True)(pyfunc) + self.assertTrue(cfunc.overloads[cfunc.signatures[0]].objectmode) + for v in [-1, 0, 1, 2]: + self.assertPreciseEqual(pyfunc(v), cfunc(v)) + + def test_a_is_b_intp(self): + pyfunc = a_is_b + cfunc = njit((types.intp, types.intp))(pyfunc) + # integer identity relies on `==` + self.assertTrue(cfunc(1, 1)) + self.assertFalse(cfunc(1, 2)) + + def test_a_is_not_b_intp(self): + pyfunc = a_is_not_b + cfunc = njit((types.intp, types.intp))(pyfunc) + # integer identity relies on `==` + self.assertFalse(cfunc(1, 1)) + self.assertTrue(cfunc(1, 2)) + + def test_optional_float(self): + def pyfunc(x, y): + if y is None: + return x + else: + return x + y + + cfunc = njit("(float64, optional(float64))")(pyfunc) + self.assertAlmostEqual(pyfunc(1., 12.3), cfunc(1., 12.3)) + self.assertAlmostEqual(pyfunc(1., None), cfunc(1., None)) + + def test_optional_array(self): + def pyfunc(x, y): + if y is None: + return x + else: + y[0] += x + return y[0] + + cfunc = njit("(float32, optional(float32[:]))")(pyfunc) + cy = np.array([12.3], dtype=np.float32) + py = cy.copy() + self.assertAlmostEqual(pyfunc(1., py), cfunc(1., cy)) + np.testing.assert_almost_equal(py, cy) + self.assertAlmostEqual(pyfunc(1., None), cfunc(1., None)) + + def test_optional_array_error(self): + def pyfunc(y): + return y[0] + + cfunc = njit("(optional(int32[:]),)")(pyfunc) + with self.assertRaises(TypeError) as raised: + cfunc(None) + self.assertIn('expected array(int32, 1d, A), got None', + str(raised.exception)) + + y = np.array([0xabcd], dtype=np.int32) + self.assertEqual(cfunc(y), pyfunc(y)) + + def test_optional_array_attribute(self): + """ + Check that we can access attribute of an optional + """ + def pyfunc(arr, do_it): + opt = None + if do_it: # forces `opt` to be an optional of arr + opt = arr + return opt.shape[0] + + cfunc = njit(pyfunc) + arr = np.arange(5) + self.assertEqual(pyfunc(arr, True), cfunc(arr, True)) + + def test_assign_to_optional(self): + """ + Check that we can assign to a variable of optional type + """ + @njit + def make_optional(val, get_none): + if get_none: + ret = None + else: + ret = val + return ret + + @njit + def foo(val, run_second): + a = make_optional(val, True) + if run_second: + a = make_optional(val, False) + return a + + self.assertIsNone(foo(123, False)) + self.assertEqual(foo(231, True), 231) + + def test_optional_thru_omitted_arg(self): + """ + Issue 1868 + """ + + def pyfunc(x=None): + if x is None: + x = 1 + return x + + cfunc = njit(pyfunc) + self.assertEqual(pyfunc(), cfunc()) + self.assertEqual(pyfunc(3), cfunc(3)) + + def test_optional_unpack(self): + """ + Issue 2171 + """ + def pyfunc(x): + if x is None: + return + else: + a, b = x + return a, b + + tup = types.Tuple([types.intp] * 2) + opt_tup = types.Optional(tup) + sig = (opt_tup,) + cfunc = njit(sig)(pyfunc) + self.assertEqual(pyfunc(None), cfunc(None)) + self.assertEqual(pyfunc((1, 2)), cfunc((1, 2))) + + def test_many_optional_none_returns(self): + """ + Issue #4058 + """ + @njit + def foo(maybe): + lx = None + if maybe: + lx = 10 + return 1, lx + + def work(): + tmp = [] + for _ in range(20000): + maybe = False + _ = foo(maybe) + + # this caused "Fatal Python error: deallocating None" as there was no + # incref being made on the returned None. + work() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_overlap.py b/venv/lib/python3.10/site-packages/numba/tests/test_overlap.py new file mode 100644 index 0000000000000000000000000000000000000000..eecf77c099ce9359a5ed298f84c5eefaa7076883 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_overlap.py @@ -0,0 +1,134 @@ +import numpy as np + +from numba import jit +from numba.core import types +from numba.tests.support import TestCase, tag +import unittest + + +# Array overlaps involving a displacement + +def array_overlap1(src, dest, k=1): + assert src.shape == dest.shape + dest[k:] = src[:-k] + +def array_overlap2(src, dest, k=1): + assert src.shape == dest.shape + dest[:-k] = src[k:] + +def array_overlap3(src, dest, k=1): + assert src.shape == dest.shape + dest[:,:-k] = src[:,k:] + +def array_overlap4(src, dest, k=1): + assert src.shape == dest.shape + dest[:,k:] = src[:,:-k] + +def array_overlap5(src, dest, k=1): + assert src.shape == dest.shape + dest[...,:-k] = src[...,k:] + +def array_overlap6(src, dest, k=1): + assert src.shape == dest.shape + dest[...,k:] = src[...,:-k] + +# Array overlaps involving an in-place reversal + +def array_overlap11(src, dest): + assert src.shape == dest.shape + dest[::-1] = src + +def array_overlap12(src, dest): + assert src.shape == dest.shape + dest[:] = src[::-1] + +def array_overlap13(src, dest): + assert src.shape == dest.shape + dest[:,::-1] = src + +def array_overlap14(src, dest): + assert src.shape == dest.shape + dest[:] = src[:,::-1] + +def array_overlap15(src, dest): + assert src.shape == dest.shape + dest[...,::-1] = src + +def array_overlap16(src, dest): + assert src.shape == dest.shape + dest[:] = src[...,::-1] + + +class TestArrayOverlap(TestCase): + + def check_overlap(self, pyfunc, min_ndim, have_k_argument=False): + N = 4 + + def vary_layouts(orig): + yield orig.copy(order='C') + yield orig.copy(order='F') + a = orig[::-1].copy()[::-1] + assert not a.flags.c_contiguous and not a.flags.f_contiguous + yield a + + def check(pyfunc, cfunc, pydest, cdest, kwargs): + pyfunc(pydest, pydest, **kwargs) + cfunc(cdest, cdest, **kwargs) + self.assertPreciseEqual(pydest, cdest) + + cfunc = jit(nopython=True)(pyfunc) + # Check for up to 3d arrays + for ndim in range(min_ndim, 4): + shape = (N,) * ndim + orig = np.arange(0, N**ndim).reshape(shape) + # Note we cannot copy a 'A' layout array exactly (bitwise), + # so instead we call vary_layouts() twice + for pydest, cdest in zip(vary_layouts(orig), vary_layouts(orig)): + if have_k_argument: + for k in range(1, N): + check(pyfunc, cfunc, pydest, cdest, dict(k=k)) + else: + check(pyfunc, cfunc, pydest, cdest, {}) + + def check_overlap_with_k(self, pyfunc, min_ndim): + self.check_overlap(pyfunc, min_ndim=min_ndim, have_k_argument=True) + + def test_overlap1(self): + self.check_overlap_with_k(array_overlap1, min_ndim=1) + + def test_overlap2(self): + self.check_overlap_with_k(array_overlap2, min_ndim=1) + + def test_overlap3(self): + self.check_overlap_with_k(array_overlap3, min_ndim=2) + + def test_overlap4(self): + self.check_overlap_with_k(array_overlap4, min_ndim=2) + + def test_overlap5(self): + self.check_overlap_with_k(array_overlap5, min_ndim=1) + + def test_overlap6(self): + self.check_overlap_with_k(array_overlap6, min_ndim=1) + + def test_overlap11(self): + self.check_overlap(array_overlap11, min_ndim=1) + + def test_overlap12(self): + self.check_overlap(array_overlap12, min_ndim=1) + + def test_overlap13(self): + self.check_overlap(array_overlap13, min_ndim=2) + + def test_overlap14(self): + self.check_overlap(array_overlap14, min_ndim=2) + + def test_overlap15(self): + self.check_overlap(array_overlap15, min_ndim=1) + + def test_overlap16(self): + self.check_overlap(array_overlap16, min_ndim=1) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_parallel_backend.py b/venv/lib/python3.10/site-packages/numba/tests/test_parallel_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..e8da3ba65ccf9e2e224ca5adfb163f745b3756c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_parallel_backend.py @@ -0,0 +1,1253 @@ +# -*- coding: utf-8 -*- + +""" +Tests the parallel backend +""" +import faulthandler +import itertools +import multiprocessing +import os +import random +import re +import subprocess +import sys +import textwrap +import threading +import unittest + +import numpy as np + +from numba import jit, vectorize, guvectorize, set_num_threads +from numba.tests.support import (temp_directory, override_config, TestCase, tag, + skip_parfors_unsupported, linux_only) + +import queue as t_queue +from numba.testing.main import _TIMEOUT as _RUNNER_TIMEOUT +from numba.core import config + + +_TEST_TIMEOUT = _RUNNER_TIMEOUT - 60. + + +# Check which backends are available +# TODO: Put this in a subprocess so the address space is kept clean +try: + # Check it's a compatible TBB before loading it + from numba.np.ufunc.parallel import _check_tbb_version_compatible + _check_tbb_version_compatible() + from numba.np.ufunc import tbbpool # noqa: F401 + _HAVE_TBB_POOL = True +except ImportError: + _HAVE_TBB_POOL = False + +try: + from numba.np.ufunc import omppool + _HAVE_OMP_POOL = True +except ImportError: + _HAVE_OMP_POOL = False + +try: + import scipy.linalg.cython_lapack # noqa: F401 + _HAVE_LAPACK = True +except ImportError: + _HAVE_LAPACK = False + +# test skipping decorators +skip_no_omp = unittest.skipUnless(_HAVE_OMP_POOL, "OpenMP threadpool required") +skip_no_tbb = unittest.skipUnless(_HAVE_TBB_POOL, "TBB threadpool required") + +_gnuomp = _HAVE_OMP_POOL and omppool.openmp_vendor == "GNU" +skip_unless_gnu_omp = unittest.skipUnless(_gnuomp, "GNU OpenMP only tests") + +_windows = sys.platform.startswith('win') +_osx = sys.platform.startswith('darwin') +_32bit = sys.maxsize <= 2 ** 32 +_parfors_unsupported = _32bit + +_HAVE_OS_FORK = not _windows + + +# some functions to jit + +def foo(n, v): + return np.ones(n) + v + + +if _HAVE_LAPACK: + def linalg(n, v): + x = np.dot(np.ones((n, n)), np.ones((n, n))) + return x + np.arange(n) + v +else: + def linalg(n, v): + # no way to trigger MKL without the lapack bindings. + return np.arange(n) + v + + +def ufunc_foo(a, b): + return a + b + + +def gufunc_foo(a, b, out): + out[0] = a + b + + +class runnable(object): + def __init__(self, **options): + self._options = options + + +class jit_runner(runnable): + + def __call__(self): + cfunc = jit(**self._options)(foo) + a = 4 + b = 10 + expected = foo(a, b) + got = cfunc(a, b) + np.testing.assert_allclose(expected, got) + + +class mask_runner(object): + def __init__(self, runner, mask, **options): + self.runner = runner + self.mask = mask + + def __call__(self): + if self.mask: + # Tests are all run in isolated subprocesses, so we + # don't have to worry about this affecting other tests + set_num_threads(self.mask) + self.runner() + + +class linalg_runner(runnable): + + def __call__(self): + cfunc = jit(**self._options)(linalg) + a = 4 + b = 10 + expected = linalg(a, b) + got = cfunc(a, b) + np.testing.assert_allclose(expected, got) + + +class vectorize_runner(runnable): + + def __call__(self): + cfunc = vectorize(['(f4, f4)'], **self._options)(ufunc_foo) + a = b = np.random.random(10).astype(np.float32) + expected = ufunc_foo(a, b) + got = cfunc(a, b) + np.testing.assert_allclose(expected, got) + + +class guvectorize_runner(runnable): + + def __call__(self): + sig = ['(f4, f4, f4[:])'] + cfunc = guvectorize(sig, '(),()->()', **self._options)(gufunc_foo) + a = b = np.random.random(10).astype(np.float32) + expected = ufunc_foo(a, b) + got = cfunc(a, b) + np.testing.assert_allclose(expected, got) + + +def chooser(fnlist, **kwargs): + q = kwargs.get('queue') + try: + faulthandler.enable() + for _ in range(int(len(fnlist) * 1.5)): + fn = random.choice(fnlist) + fn() + except Exception as e: + q.put(e) + + +def compile_factory(parallel_class, queue_impl): + def run_compile(fnlist): + q = queue_impl() + kws = {'queue': q} + ths = [parallel_class(target=chooser, args=(fnlist,), kwargs=kws) + for i in range(4)] + for th in ths: + th.start() + for th in ths: + th.join() + if not q.empty(): + errors = [] + while not q.empty(): + errors.append(q.get(False)) + _msg = "Error(s) occurred in delegated runner:\n%s" + raise RuntimeError(_msg % '\n'.join([repr(x) for x in errors])) + return run_compile + + +# workers +_thread_class = threading.Thread + + +class _proc_class_impl(object): + + def __init__(self, method): + self._method = method + + def __call__(self, *args, **kwargs): + ctx = multiprocessing.get_context(self._method) + return ctx.Process(*args, **kwargs) + + +def _get_mp_classes(method): + if method == 'default': + method = None + ctx = multiprocessing.get_context(method) + proc = _proc_class_impl(method) + queue = ctx.Queue + return proc, queue + + +thread_impl = compile_factory(_thread_class, t_queue.Queue) +spawn_proc_impl = compile_factory(*_get_mp_classes('spawn')) +if not _windows: + fork_proc_impl = compile_factory(*_get_mp_classes('fork')) + forkserver_proc_impl = compile_factory(*_get_mp_classes('forkserver')) + +# this is duplication as Py27, linux uses fork, windows uses spawn, it however +# is kept like this so that when tests fail it's less confusing! +default_proc_impl = compile_factory(*_get_mp_classes('default')) + + +class TestParallelBackendBase(TestCase): + """ + Base class for testing the parallel backends + """ + + all_impls = [ + jit_runner(nopython=True), + jit_runner(nopython=True, cache=True), + jit_runner(nopython=True, nogil=True), + linalg_runner(nopython=True), + linalg_runner(nopython=True, nogil=True), + vectorize_runner(nopython=True), + vectorize_runner(nopython=True, target='parallel'), + vectorize_runner(nopython=True, target='parallel', cache=True), + guvectorize_runner(nopython=True), + guvectorize_runner(nopython=True, target='parallel'), + guvectorize_runner(nopython=True, target='parallel', cache=True), + ] + + if not _parfors_unsupported: + parfor_impls = [ + jit_runner(nopython=True, parallel=True), + jit_runner(nopython=True, parallel=True, cache=True), + linalg_runner(nopython=True, parallel=True), + linalg_runner(nopython=True, parallel=True, cache=True), + ] + all_impls.extend(parfor_impls) + + if config.NUMBA_NUM_THREADS < 2: + # Not enough cores + masks = [] + else: + masks = [1, 2] + + mask_impls = [] + for impl in all_impls: + for mask in masks: + mask_impls.append(mask_runner(impl, mask)) + + parallelism = ['threading', 'random'] + parallelism.append('multiprocessing_spawn') + if _HAVE_OS_FORK: + parallelism.append('multiprocessing_fork') + parallelism.append('multiprocessing_forkserver') + + runners = { + 'concurrent_jit': [ + jit_runner(nopython=True, parallel=(not _parfors_unsupported)), + ], + 'concurrent_vectorize': [ + vectorize_runner(nopython=True, target='parallel'), + ], + 'concurrent_guvectorize': [ + guvectorize_runner(nopython=True, target='parallel'), + ], + 'concurrent_mix_use': all_impls, + 'concurrent_mix_use_masks': mask_impls, + } + + safe_backends = {'omp', 'tbb'} + + def run_compile(self, fnlist, parallelism='threading'): + self._cache_dir = temp_directory(self.__class__.__name__) + with override_config('CACHE_DIR', self._cache_dir): + if parallelism == 'threading': + thread_impl(fnlist) + elif parallelism == 'multiprocessing_fork': + fork_proc_impl(fnlist) + elif parallelism == 'multiprocessing_forkserver': + forkserver_proc_impl(fnlist) + elif parallelism == 'multiprocessing_spawn': + spawn_proc_impl(fnlist) + elif parallelism == 'multiprocessing_default': + default_proc_impl(fnlist) + elif parallelism == 'random': + ps = [thread_impl, spawn_proc_impl] + if _HAVE_OS_FORK: + ps.append(fork_proc_impl) + ps.append(forkserver_proc_impl) + + random.shuffle(ps) + for impl in ps: + impl(fnlist) + else: + raise ValueError( + 'Unknown parallelism supplied %s' % parallelism) + + +_specific_backends = config.THREADING_LAYER in ('omp', 'tbb', 'workqueue') + + +@unittest.skipUnless(_specific_backends, "Threading layer not explicit") +class TestParallelBackend(TestParallelBackendBase): + """ These are like the numba.tests.test_threadsafety tests but designed + instead to torture the parallel backend. + If a suitable backend is supplied via NUMBA_THREADING_LAYER these tests + can be run directly. This test class cannot be run using the multiprocessing + option to the test runner (i.e. `./runtests -m`) as daemon processes cannot + have children. + """ + + # NOTE: All tests are generated based on what a platform supports concurrent + # execution wise from Python, irrespective of whether the native libraries + # can actually handle the behaviour present. + @classmethod + def generate(cls): + for p in cls.parallelism: + for name, impl in cls.runners.items(): + methname = "test_" + p + '_' + name + + def methgen(impl, p): + def test_method(self): + selfproc = multiprocessing.current_process() + # daemonized processes cannot have children + if selfproc.daemon: + _msg = 'daemonized processes cannot have children' + self.skipTest(_msg) + else: + self.run_compile(impl, parallelism=p) + return test_method + fn = methgen(impl, p) + fn.__name__ = methname + setattr(cls, methname, fn) + + +TestParallelBackend.generate() + + +class TestInSubprocess(object): + backends = {'tbb': skip_no_tbb, + 'omp': skip_no_omp, + 'workqueue': unittest.skipIf(False, '')} + + def run_cmd(self, cmdline, env): + popen = subprocess.Popen(cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + # finish in _TEST_TIMEOUT seconds or kill it + timeout = threading.Timer(_TEST_TIMEOUT, popen.kill) + try: + timeout.start() + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError( + "process failed with code %s: stderr follows\n%s\n" % + (popen.returncode, err.decode())) + return out.decode(), err.decode() + finally: + timeout.cancel() + return None, None + + def run_test_in_separate_process(self, test, threading_layer): + env_copy = os.environ.copy() + env_copy['NUMBA_THREADING_LAYER'] = str(threading_layer) + cmdline = [sys.executable, "-m", "numba.runtests", test] + return self.run_cmd(cmdline, env_copy) + + +class TestSpecificBackend(TestInSubprocess, TestParallelBackendBase): + """ + This is quite contrived, for each test in the TestParallelBackend tests it + generates a test that will run the TestParallelBackend test in a new python + process with an environment modified to ensure a specific threadsafe backend + is used. This is with view of testing the backends independently and in an + isolated manner such that if they hang/crash/have issues, it doesn't kill + the test suite. + """ + _DEBUG = False + + @classmethod + def _inject(cls, p, name, backend, backend_guard): + themod = cls.__module__ + thecls = TestParallelBackend.__name__ + methname = "test_" + p + '_' + name + injected_method = '%s.%s.%s' % (themod, thecls, methname) + + def test_template(self): + o, e = self.run_test_in_separate_process(injected_method, backend) + if self._DEBUG: + print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e)) + # If the test was skipped in the subprocess, then mark this as a + # skipped test. + m = re.search(r"\.\.\. skipped '(.*?)'", e) + if m is not None: + self.skipTest(m.group(1)) + self.assertIn('OK', e) + self.assertTrue('FAIL' not in e) + self.assertTrue('ERROR' not in e) + injected_test = "test_%s_%s_%s" % (p, name, backend) + # Mark as long_running + setattr(cls, injected_test, + tag('long_running')(backend_guard(test_template))) + + @classmethod + def generate(cls): + for backend, backend_guard in cls.backends.items(): + for p in cls.parallelism: + for name in cls.runners.keys(): + # handle known problem cases... + + # GNU OpenMP is not fork safe + if (p in ('multiprocessing_fork', 'random') and + backend == 'omp' and + sys.platform.startswith('linux')): + continue + + # workqueue is not thread safe + if (p in ('threading', 'random') and + backend == 'workqueue'): + continue + + cls._inject(p, name, backend, backend_guard) + + +TestSpecificBackend.generate() + + +class ThreadLayerTestHelper(TestCase): + """ + Helper class for running an isolated piece of code based on a template + """ + # sys path injection and separate usecase module to make sure everything + # is importable by children of multiprocessing + _here = "%r" % os.path.dirname(__file__) + + template = """if 1: + import sys + sys.path.insert(0, "%(here)r") + import multiprocessing + import numpy as np + from numba import njit + import numba + try: + import threading_backend_usecases + except ImportError as e: + print("DEBUG:", sys.path) + raise e + import os + + sigterm_handler = threading_backend_usecases.sigterm_handler + busy_func = threading_backend_usecases.busy_func + + def the_test(): + %%s + + if __name__ == "__main__": + the_test() + """ % {'here': _here} + + def run_cmd(self, cmdline, env=None): + if env is None: + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = str("omp") + popen = subprocess.Popen(cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + # finish in _TEST_TIMEOUT seconds or kill it + timeout = threading.Timer(_TEST_TIMEOUT, popen.kill) + try: + timeout.start() + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError( + "process failed with code %s: stderr follows\n%s\n" % + (popen.returncode, err.decode())) + finally: + timeout.cancel() + return out.decode(), err.decode() + + +@skip_parfors_unsupported +class TestThreadingLayerSelection(ThreadLayerTestHelper): + """ + Checks that numba.threading_layer() reports correctly. + """ + _DEBUG = False + + backends = {'tbb': skip_no_tbb, + 'omp': skip_no_omp, + 'workqueue': unittest.skipIf(False, '')} + + @classmethod + def _inject(cls, backend, backend_guard): + + def test_template(self): + body = """if 1: + X = np.arange(1000000.) + Y = np.arange(1000000.) + Z = busy_func(X, Y) + assert numba.threading_layer() == '%s' + """ + runme = self.template % (body % backend) + cmdline = [sys.executable, '-c', runme] + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = str(backend) + out, err = self.run_cmd(cmdline, env=env) + if self._DEBUG: + print(out, err) + injected_test = "test_threading_layer_selector_%s" % backend + setattr(cls, injected_test, + tag("important")(backend_guard(test_template))) + + @classmethod + def generate(cls): + for backend, backend_guard in cls.backends.items(): + cls._inject(backend, backend_guard) + + +TestThreadingLayerSelection.generate() + + +@skip_parfors_unsupported +class TestThreadingLayerPriority(ThreadLayerTestHelper): + + def each_env_var(self, env_var: str): + """Test setting priority via env var NUMBA_THREADING_LAYER_PRIORITY. + """ + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = 'default' + env['NUMBA_THREADING_LAYER_PRIORITY'] = env_var + + code = f""" + import numba + + # trigger threading layer decision + # hence catching invalid THREADING_LAYER_PRIORITY + @numba.jit( + 'float64[::1](float64[::1], float64[::1])', + nopython=True, + parallel=True, + ) + def plus(x, y): + return x + y + + captured_envvar = list("{env_var}".split()) + assert numba.config.THREADING_LAYER_PRIORITY == \ + captured_envvar, "priority mismatch" + assert numba.threading_layer() == captured_envvar[0],\ + "selected backend mismatch" + """ + cmd = [ + sys.executable, + '-c', + textwrap.dedent(code), + ] + self.run_cmd(cmd, env=env) + + @skip_no_omp + @skip_no_tbb + def test_valid_env_var(self): + default = ['tbb', 'omp', 'workqueue'] + for p in itertools.permutations(default): + env_var = ' '.join(p) + self.each_env_var(env_var) + + @skip_no_omp + @skip_no_tbb + def test_invalid_env_var(self): + env_var = 'tbb omp workqueue notvalidhere' + with self.assertRaises(AssertionError) as raises: + self.each_env_var(env_var) + for msg in ( + "THREADING_LAYER_PRIORITY invalid:", + "It must be a permutation of" + ): + self.assertIn(f"{msg}", str(raises.exception)) + + @skip_no_omp + def test_omp(self): + for env_var in ("omp tbb workqueue", "omp workqueue tbb"): + self.each_env_var(env_var) + + @skip_no_tbb + def test_tbb(self): + for env_var in ("tbb omp workqueue", "tbb workqueue omp"): + self.each_env_var(env_var) + + def test_workqueue(self): + for env_var in ("workqueue tbb omp", "workqueue omp tbb"): + self.each_env_var(env_var) + + +@skip_parfors_unsupported +class TestMiscBackendIssues(ThreadLayerTestHelper): + """ + Checks fixes for the issues with threading backends implementation + """ + _DEBUG = False + + @skip_no_omp + def test_omp_stack_overflow(self): + """ + Tests that OMP does not overflow stack + """ + runme = """if 1: + from numba import vectorize, threading_layer + import numpy as np + + @vectorize(['f4(f4,f4,f4,f4,f4,f4,f4,f4)'], target='parallel') + def foo(a, b, c, d, e, f, g, h): + return a+b+c+d+e+f+g+h + + x = np.ones(2**20, np.float32) + foo(*([x]*8)) + assert threading_layer() == "omp", "omp not found" + """ + cmdline = [sys.executable, '-c', runme] + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = "omp" + env['OMP_STACKSIZE'] = "100K" + self.run_cmd(cmdline, env=env) + + @skip_no_tbb + def test_single_thread_tbb(self): + """ + Tests that TBB works well with single thread + https://github.com/numba/numba/issues/3440 + """ + runme = """if 1: + from numba import njit, prange, threading_layer + + @njit(parallel=True) + def foo(n): + acc = 0 + for i in prange(n): + acc += i + return acc + + foo(100) + assert threading_layer() == "tbb", "tbb not found" + """ + cmdline = [sys.executable, '-c', runme] + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = "tbb" + env['NUMBA_NUM_THREADS'] = "1" + self.run_cmd(cmdline, env=env) + + def test_workqueue_aborts_on_nested_parallelism(self): + """ + Tests workqueue raises sigabrt if a nested parallel call is performed + """ + runme = """if 1: + from numba import njit, prange + import numpy as np + + @njit(parallel=True) + def nested(x): + for i in prange(len(x)): + x[i] += 1 + + + @njit(parallel=True) + def main(): + Z = np.zeros((5, 10)) + for i in prange(Z.shape[0]): + nested(Z[i]) + return Z + + main() + """ + cmdline = [sys.executable, '-c', runme] + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = "workqueue" + env['NUMBA_NUM_THREADS'] = "4" + + try: + out, err = self.run_cmd(cmdline, env=env) + except AssertionError as e: + if self._DEBUG: + print(out, err) + e_msg = str(e) + self.assertIn("failed with code", e_msg) + # raised a SIGABRT, but the value is platform specific so just check + # the error message + expected = ("Numba workqueue threading layer is terminating: " + "Concurrent access has been detected.") + self.assertIn(expected, e_msg) + + @unittest.skipUnless(_HAVE_OS_FORK, "Test needs fork(2)") + def test_workqueue_handles_fork_from_non_main_thread(self): + # For context see #7872, but essentially the multiprocessing pool + # implementation has a number of Python threads for handling the worker + # processes, one of which calls fork(2), this results in a fork from a + # non-main thread. + + runme = """if 1: + from numba import njit, prange, threading_layer + import numpy as np + import multiprocessing + + if __name__ == "__main__": + # Need for force fork context (OSX default is "spawn") + multiprocessing.set_start_method('fork') + + @njit(parallel=True) + def func(x): + return 10. * x + + arr = np.arange(2.) + + # run in single process to start Numba's thread pool + np.testing.assert_allclose(func(arr), func.py_func(arr)) + + # now run in a multiprocessing pool to get a fork from a + # non-main thread + with multiprocessing.Pool(10) as p: + result = p.map(func, [arr]) + np.testing.assert_allclose(result, + func.py_func(np.expand_dims(arr, 0))) + + assert threading_layer() == "workqueue" + """ + cmdline = [sys.executable, '-c', runme] + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = "workqueue" + env['NUMBA_NUM_THREADS'] = "4" + + self.run_cmd(cmdline, env=env) + + +# 32bit or windows py27 (not that this runs on windows) +@skip_parfors_unsupported +@skip_unless_gnu_omp +class TestForkSafetyIssues(ThreadLayerTestHelper): + """ + Checks Numba's behaviour in various situations involving GNU OpenMP and fork + """ + _DEBUG = False + + def test_check_threading_layer_is_gnu(self): + runme = """if 1: + from numba.np.ufunc import omppool + assert omppool.openmp_vendor == 'GNU' + """ + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + + def test_par_parent_os_fork_par_child(self): + """ + Whilst normally valid, this actually isn't for Numba invariant of OpenMP + Checks SIGABRT is received. + """ + body = """if 1: + X = np.arange(1000000.) + Y = np.arange(1000000.) + Z = busy_func(X, Y) + pid = os.fork() + if pid == 0: + Z = busy_func(X, Y) + else: + os.wait() + """ + runme = self.template % body + cmdline = [sys.executable, '-c', runme] + try: + out, err = self.run_cmd(cmdline) + except AssertionError as e: + self.assertIn("failed with code -6", str(e)) + + def test_par_parent_implicit_mp_fork_par_child(self): + """ + Implicit use of multiprocessing fork context. + Does this: + 1. Start with OpenMP + 2. Fork to processes using OpenMP (this is invalid) + 3. Joins fork + 4. Check the exception pushed onto the queue that is a result of + catching SIGTERM coming from the C++ aborting on illegal fork + pattern for GNU OpenMP + """ + body = """if 1: + mp = multiprocessing.get_context('fork') + X = np.arange(1000000.) + Y = np.arange(1000000.) + q = mp.Queue() + + # Start OpenMP runtime on parent via parallel function + Z = busy_func(X, Y, q) + + # fork() underneath with no exec, will abort + proc = mp.Process(target = busy_func, args=(X, Y, q)) + proc.start() + + err = q.get() + assert "Caught SIGTERM" in str(err) + """ + runme = self.template % body + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + if self._DEBUG: + print(out, err) + + @linux_only + def test_par_parent_explicit_mp_fork_par_child(self): + """ + Explicit use of multiprocessing fork context. + Does this: + 1. Start with OpenMP + 2. Fork to processes using OpenMP (this is invalid) + 3. Joins fork + 4. Check the exception pushed onto the queue that is a result of + catching SIGTERM coming from the C++ aborting on illegal fork + pattern for GNU OpenMP + """ + body = """if 1: + X = np.arange(1000000.) + Y = np.arange(1000000.) + ctx = multiprocessing.get_context('fork') + q = ctx.Queue() + + # Start OpenMP runtime on parent via parallel function + Z = busy_func(X, Y, q) + + # fork() underneath with no exec, will abort + proc = ctx.Process(target = busy_func, args=(X, Y, q)) + proc.start() + proc.join() + + err = q.get() + assert "Caught SIGTERM" in str(err) + """ + runme = self.template % body + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + if self._DEBUG: + print(out, err) + + def test_par_parent_mp_spawn_par_child_par_parent(self): + """ + Explicit use of multiprocessing spawn, this is safe. + Does this: + 1. Start with OpenMP + 2. Spawn to processes using OpenMP + 3. Join spawns + 4. Run some more OpenMP + """ + body = """if 1: + X = np.arange(1000000.) + Y = np.arange(1000000.) + ctx = multiprocessing.get_context('spawn') + q = ctx.Queue() + + # Start OpenMP runtime and run on parent via parallel function + Z = busy_func(X, Y, q) + procs = [] + for x in range(20): # start a lot to try and get overlap + ## fork() + exec() to run some OpenMP on children + proc = ctx.Process(target = busy_func, args=(X, Y, q)) + procs.append(proc) + sys.stdout.flush() + sys.stderr.flush() + proc.start() + + [p.join() for p in procs] + + try: + q.get(False) + except multiprocessing.queues.Empty: + pass + else: + raise RuntimeError("Queue was not empty") + + # Run some more OpenMP on parent + Z = busy_func(X, Y, q) + """ + runme = self.template % body + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + if self._DEBUG: + print(out, err) + + def test_serial_parent_implicit_mp_fork_par_child_then_par_parent(self): + """ + Implicit use of multiprocessing (will be fork, but cannot declare that + in Py2.7 as there's no process launch context). + Does this: + 1. Start with no OpenMP + 2. Fork to processes using OpenMP + 3. Join forks + 4. Run some OpenMP + """ + body = """if 1: + X = np.arange(1000000.) + Y = np.arange(1000000.) + q = multiprocessing.Queue() + + # this is ok + procs = [] + for x in range(10): + # fork() underneath with but no OpenMP in parent, this is ok + proc = multiprocessing.Process(target = busy_func, + args=(X, Y, q)) + procs.append(proc) + proc.start() + + [p.join() for p in procs] + + # and this is still ok as the OpenMP happened in forks + Z = busy_func(X, Y, q) + try: + q.get(False) + except multiprocessing.queues.Empty: + pass + else: + raise RuntimeError("Queue was not empty") + """ + runme = self.template % body + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + if self._DEBUG: + print(out, err) + + @linux_only + def test_serial_parent_explicit_mp_fork_par_child_then_par_parent(self): + """ + Explicit use of multiprocessing 'fork'. + Does this: + 1. Start with no OpenMP + 2. Fork to processes using OpenMP + 3. Join forks + 4. Run some OpenMP + """ + body = """if 1: + X = np.arange(1000000.) + Y = np.arange(1000000.) + ctx = multiprocessing.get_context('fork') + q = ctx.Queue() + + # this is ok + procs = [] + for x in range(10): + # fork() underneath with but no OpenMP in parent, this is ok + proc = ctx.Process(target = busy_func, args=(X, Y, q)) + procs.append(proc) + proc.start() + + [p.join() for p in procs] + + # and this is still ok as the OpenMP happened in forks + Z = busy_func(X, Y, q) + try: + q.get(False) + except multiprocessing.queues.Empty: + pass + else: + raise RuntimeError("Queue was not empty") + """ + runme = self.template % body + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + if self._DEBUG: + print(out, err) + + +@skip_parfors_unsupported +@skip_no_tbb +class TestTBBSpecificIssues(ThreadLayerTestHelper): + + _DEBUG = False + + @linux_only # os.fork required. + def test_fork_from_non_main_thread(self): + # See issue #5973 and PR #6208 for original context. + # See issue #6963 for context on the following comments: + # + # Important things to note: + # 1. Compilation of code containing an objmode block will result in the + # use of and `ObjModeLiftedWith` as the dispatcher. This inherits + # from `LiftedCode` which handles the serialization. In that + # serialization is a call to uuid.uuid1() which causes a fork_exec in + # CPython internals. + # 2. The selected parallel backend thread pool is started during the + # compilation of a function that has `parallel=True`. + # 3. The TBB backend can handle forks from the main thread, it will + # safely reinitialise after so doing. If a fork occurs from a + # non-main thread it will warn and the state is invalid in the child + # process. + # + # Due to 1. and 2. the `obj_mode_func` function separated out and is + # `njit` decorated. This means during type inference of `work` it will + # trigger a standard compilation of the function and the thread pools + # won't have started yet as the parallelisation compiler passes for + # `work` won't yet have run. This mitigates the fork() call from 1. + # occurring after 2. The result of this is that 3. can be tested using + # the threading etc herein with the state being known as the above + # described, i.e. the TBB threading layer has not experienced a fork(). + + runme = """if 1: + import threading + import numba + numba.config.THREADING_LAYER='tbb' + from numba import njit, prange, objmode + from numba.core.serialize import PickleCallableByPath + import os + + e_running = threading.Event() + e_proceed = threading.Event() + + def indirect_core(): + e_running.set() + # wait for forker() to have forked + while not e_proceed.isSet(): + pass + + indirect = PickleCallableByPath(indirect_core) + + @njit + def obj_mode_func(): + with objmode(): + indirect() + + @njit(parallel=True, nogil=True) + def work(): + acc = 0 + for x in prange(10): + acc += x + obj_mode_func() + return acc + + def runner(): + work() + + def forker(): + # wait for the jit function to say it's running + while not e_running.isSet(): + pass + # then fork + os.fork() + # now fork is done signal the runner to proceed to exit + e_proceed.set() + + numba_runner = threading.Thread(target=runner,) + fork_runner = threading.Thread(target=forker,) + + threads = (numba_runner, fork_runner) + for t in threads: + t.start() + for t in threads: + t.join() + """ + + cmdline = [sys.executable, '-c', runme] + out, err = self.run_cmd(cmdline) + # assert error message printed on stderr + msg_head = "Attempted to fork from a non-main thread, the TBB library" + self.assertIn(msg_head, err) + + if self._DEBUG: + print("OUT:", out) + print("ERR:", err) + + @linux_only # fork required. + def test_lifetime_of_task_scheduler_handle(self): + + self.skip_if_no_external_compiler() # external compiler needed + + # See PR #7280 for context. + BROKEN_COMPILERS = 'SKIP: COMPILATION FAILED' + runme = """if 1: + import ctypes + import sys + import multiprocessing as mp + from tempfile import TemporaryDirectory, NamedTemporaryFile + from numba.pycc.platform import Toolchain, external_compiler_works + from numba import njit, prange, threading_layer + import faulthandler + faulthandler.enable() + if not external_compiler_works(): + raise AssertionError('External compilers are not found.') + with TemporaryDirectory() as tmpdir: + with NamedTemporaryFile(dir=tmpdir) as tmpfile: + try: + src = \"\"\" + #define TBB_PREVIEW_WAITING_FOR_WORKERS 1 + #include + static tbb::task_scheduler_handle tsh; + extern "C" + { + void launch(void) + { + tsh = tbb::task_scheduler_handle::get(); + } + } + \"\"\" + cxxfile = f"{tmpfile.name}.cxx" + with open(cxxfile, 'wt') as f: + f.write(src) + tc = Toolchain() + object_files = tc.compile_objects([cxxfile,], + output_dir=tmpdir) + dso_name = f"{tmpfile.name}.so" + tc.link_shared(dso_name, object_files, + libraries=['tbb',], + export_symbols=['launch']) + # Load into the process, it doesn't matter whether the + # DSO exists on disk once it's loaded in. + DLL = ctypes.CDLL(dso_name) + except Exception as e: + # Something is broken in compilation, could be one of + # many things including, but not limited to: missing tbb + # headers, incorrect permissions, compilers that don't + # work for the above + print(e) + print('BROKEN_COMPILERS') + sys.exit(0) + + # Do the test, launch this library and also execute a + # function with the TBB threading layer. + + DLL.launch() + + @njit(parallel=True) + def foo(n): + acc = 0 + for i in prange(n): + acc += i + return acc + + foo(1) + + # Check the threading layer used was TBB + assert threading_layer() == 'tbb' + + # Use mp context for a controlled version of fork, this triggers the + # reported bug. + + ctx = mp.get_context('fork') + def nowork(): + pass + p = ctx.Process(target=nowork) + p.start() + p.join(10) + print("SUCCESS") + """.replace('BROKEN_COMPILERS', BROKEN_COMPILERS) + + cmdline = [sys.executable, '-c', runme] + env = os.environ.copy() + env['NUMBA_THREADING_LAYER'] = 'tbb' + out, err = self.run_cmd(cmdline, env=env) + + if BROKEN_COMPILERS in out: + self.skipTest("Compilation of DSO failed. Check output for details") + else: + self.assertIn("SUCCESS", out) + + if self._DEBUG: + print("OUT:", out) + print("ERR:", err) + + +@skip_parfors_unsupported +class TestInitSafetyIssues(TestCase): + + _DEBUG = False + + def run_cmd(self, cmdline): + popen = subprocess.Popen(cmdline, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE,) + # finish in _TEST_TIMEOUT seconds or kill it + timeout = threading.Timer(_TEST_TIMEOUT, popen.kill) + try: + timeout.start() + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError( + "process failed with code %s: stderr follows\n%s\n" % + (popen.returncode, err.decode())) + finally: + timeout.cancel() + return out.decode(), err.decode() + + @linux_only # only linux can leak semaphores + def test_orphaned_semaphore(self): + # sys path injection and separate usecase module to make sure everything + # is importable by children of multiprocessing + + test_file = os.path.join(os.path.dirname(__file__), + "orphaned_semaphore_usecase.py") + cmdline = [sys.executable, test_file] + out, err = self.run_cmd(cmdline) + + # assert no semaphore leaks reported on stderr + self.assertNotIn("leaked semaphore", err) + + if self._DEBUG: + print("OUT:", out) + print("ERR:", err) + + def test_lazy_lock_init(self): + # checks based on https://github.com/numba/numba/pull/5724 + # looking for "lazy" process lock initialisation so as to avoid setting + # a multiprocessing context as part of import. + for meth in ('fork', 'spawn', 'forkserver'): + # if a context is available on the host check it can be set as the + # start method in a separate process + try: + multiprocessing.get_context(meth) + except ValueError: + continue + cmd = ("import numba; import multiprocessing;" + "multiprocessing.set_start_method('{}');" + "print(multiprocessing.get_context().get_start_method())") + cmdline = [sys.executable, "-c", cmd.format(meth)] + out, err = self.run_cmd(cmdline) + if self._DEBUG: + print("OUT:", out) + print("ERR:", err) + self.assertIn(meth, out) + + +@skip_parfors_unsupported +@skip_no_omp +class TestOpenMPVendors(TestCase): + + def test_vendors(self): + """ + Checks the OpenMP vendor strings are correct + """ + expected = dict() + expected['win32'] = "MS" + expected['darwin'] = "Intel" + expected['linux'] = "GNU" + + # only check OS that are supported, custom toolchains may well work as + # may other OS + for k in expected.keys(): + if sys.platform.startswith(k): + self.assertEqual(expected[k], omppool.openmp_vendor) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_parfors.py b/venv/lib/python3.10/site-packages/numba/tests/test_parfors.py new file mode 100644 index 0000000000000000000000000000000000000000..4233ad69ecfa2eb2d0edcf0d2b09abe19669de97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_parfors.py @@ -0,0 +1,4969 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + + +import math +import os +import re +import dis +import numbers +import platform +import sys +import subprocess +import types as pytypes +import warnings +from functools import reduce +import numpy as np +from numpy.random import randn +import operator +from collections import defaultdict, namedtuple +import copy +from itertools import cycle, chain +import subprocess as subp + +import numba.parfors.parfor +from numba import (njit, prange, parallel_chunksize, + get_parallel_chunksize, set_parallel_chunksize, + set_num_threads, get_num_threads, typeof) +from numba.core import (types, errors, ir, rewrites, + typed_passes, inline_closurecall, config, compiler, cpu) +from numba.typed import Dict, List + +from numba.extending import (overload_method, register_model, + typeof_impl, unbox, NativeValue, models) +from numba.core.registry import cpu_target +from numba.core.annotations import type_annotations +from numba.core.ir_utils import (find_callname, guard, build_definitions, + get_definition, is_getitem, is_setitem, + index_var_of_get_setitem) +from numba.np.unsafe.ndarray import empty_inferred as unsafe_empty +from numba.core.compiler import (CompilerBase, DefaultPassBuilder) +from numba.core.compiler_machinery import register_pass, AnalysisPass +from numba.core.typed_passes import IRLegalization +from numba.tests.support import (TestCase, captured_stdout, MemoryLeakMixin, + override_env_config, linux_only, tag, + skip_parfors_unsupported, _32bit, needs_blas, + needs_lapack, disabled_test, skip_unless_scipy, + needs_subprocess, + skip_ppc64le_invalid_ctr_loop) +from numba.core.extending import register_jitable +from numba.core.bytecode import _fix_LOAD_GLOBAL_arg +from numba.core import utils + +import cmath +import unittest + +# NOTE: Each parfors test class is run in separate subprocess, this is to reduce +# memory pressure in CI settings. The environment variable "SUBPROC_TEST" is +# used to determine whether a test is skipped or not, such that if you want to +# run any parfors test directly this environment variable can be set. The +# subprocesses running the test classes set this environment variable as the new +# process starts which enables the tests within the process. The decorator +# @needs_subprocess is used to ensure the appropriate test skips are made. + + +@skip_parfors_unsupported +class TestParforsRunner(TestCase): + + _numba_parallel_test_ = False + + # Each test class can run for 30 minutes before time out. Extend this to an + # hour on aarch64 (some public CI systems were timing out). + _TIMEOUT = 1800 if platform.machine() != 'aarch64' else 3600 + + """This is the test runner for all the parfors tests, it runs them in + subprocesses as described above. The convention for the test method naming + is: `test_` where is the name of the test class in + this module. + """ + def runner(self): + themod = self.__module__ + test_clazz_name = self.id().split('.')[-1].split('_')[-1] + # don't specify a given test, it's an entire class that needs running + self.subprocess_test_runner(test_module=themod, + test_class=test_clazz_name, + timeout=self._TIMEOUT) + + def test_TestParforBasic(self): + self.runner() + + def test_TestParforNumericalMisc(self): + self.runner() + + def test_TestParforNumPy(self): + self.runner() + + def test_TestParfors(self): + self.runner() + + def test_TestParforsBitMask(self): + self.runner() + + def test_TestParforsDiagnostics(self): + self.runner() + + def test_TestParforsLeaks(self): + self.runner() + + def test_TestParforsMisc(self): + self.runner() + + def test_TestParforsOptions(self): + self.runner() + + def test_TestParforsSlice(self): + self.runner() + + def test_TestParforsVectorizer(self): + self.runner() + + def test_TestPrangeBasic(self): + self.runner() + + def test_TestPrangeSpecific(self): + self.runner() + + +x86_only = unittest.skipIf(platform.machine() not in ('i386', 'x86_64'), 'x86 only test') + +_GLOBAL_INT_FOR_TESTING1 = 17 +_GLOBAL_INT_FOR_TESTING2 = 5 + +TestNamedTuple = namedtuple('TestNamedTuple', ('part0', 'part1')) + + +def null_comparer(a, b): + """ + Used with check_arq_equality to indicate that we do not care + whether the value of the parameter at the end of the function + has a particular value. + """ + pass + + +@needs_subprocess +class TestParforsBase(TestCase): + """ + Base class for testing parfors. + Provides functions for compilation and three way comparison between + python functions, njit'd functions and parfor njit'd functions. + """ + + _numba_parallel_test_ = False + + def _compile_this(self, func, sig, **flags): + # This method originally used `compile_isolated` which returns a + # "CompileResult", hence this does the same. + return njit(sig, **flags)(func).overloads[sig] + + def compile_parallel(self, func, sig): + return self._compile_this(func, sig, parallel=True) + + def compile_parallel_fastmath(self, func, sig): + return self._compile_this(func, sig, parallel=True, fastmath=True) + + def compile_njit(self, func, sig): + return self._compile_this(func, sig) + + def compile_all(self, pyfunc, *args, **kwargs): + sig = tuple([numba.typeof(x) for x in args]) + + # compile the prange injected function + cpfunc = self.compile_parallel(pyfunc, sig) + + # compile a standard njit of the original function + cfunc = self.compile_njit(pyfunc, sig) + + return cfunc, cpfunc + + def check_parfors_vs_others(self, pyfunc, cfunc, cpfunc, *args, **kwargs): + """ + Checks python, njit and parfor impls produce the same result. + + Arguments: + pyfunc - the python function to test + cfunc - CompilerResult from njit of pyfunc + cpfunc - CompilerResult from njit(parallel=True) of pyfunc + args - arguments for the function being tested + Keyword Arguments: + scheduler_type - 'signed', 'unsigned' or None, default is None. + Supply in cases where the presence of a specific + scheduler is to be asserted. + fastmath_pcres - a fastmath parallel compile result, if supplied + will be run to make sure the result is correct + check_arg_equality - some functions need to check that a + parameter is modified rather than a certain + value returned. If this keyword argument + is supplied, it should be a list of + comparison functions such that the i'th + function in the list is used to compare the + i'th parameter of the njit and parallel=True + functions against the i'th parameter of the + standard Python function, asserting if they + differ. The length of this list must be equal + to the number of parameters to the function. + The null comparator is available for use + when you do not desire to test if some + particular parameter is changed. + Remaining kwargs are passed to np.testing.assert_almost_equal + """ + scheduler_type = kwargs.pop('scheduler_type', None) + check_fastmath = kwargs.pop('check_fastmath', None) + fastmath_pcres = kwargs.pop('fastmath_pcres', None) + check_scheduling = kwargs.pop('check_scheduling', True) + check_args_for_equality = kwargs.pop('check_arg_equality', None) + + def copy_args(*args): + if not args: + return tuple() + new_args = [] + for x in args: + if isinstance(x, np.ndarray): + new_args.append(x.copy('k')) + elif isinstance(x, np.number): + new_args.append(x.copy()) + elif isinstance(x, numbers.Number): + new_args.append(x) + elif x is None: + new_args.append(x) + elif isinstance(x, tuple): + new_args.append(copy.deepcopy(x)) + elif isinstance(x, list): + new_args.append(x[:]) + elif isinstance(x, Dict): + new_args.append(copy.copy(x)) + elif isinstance(x, List): + new_args.append(copy.copy(x)) + else: + raise ValueError('Unsupported argument type encountered') + return tuple(new_args) + + # python result + py_args = copy_args(*args) + py_expected = pyfunc(*py_args) + + # njit result + njit_args = copy_args(*args) + njit_output = cfunc.entry_point(*njit_args) + + # parfor result + parfor_args = copy_args(*args) + parfor_output = cpfunc.entry_point(*parfor_args) + + if check_args_for_equality is None: + np.testing.assert_almost_equal(njit_output, py_expected, **kwargs) + np.testing.assert_almost_equal(parfor_output, py_expected, **kwargs) + self.assertEqual(type(njit_output), type(parfor_output)) + else: + assert(len(py_args) == len(check_args_for_equality)) + for pyarg, njitarg, parforarg, argcomp in zip( + py_args, njit_args, parfor_args, check_args_for_equality): + argcomp(njitarg, pyarg, **kwargs) + argcomp(parforarg, pyarg, **kwargs) + + if check_scheduling: + self.check_scheduling(cpfunc, scheduler_type) + + # if requested check fastmath variant + if fastmath_pcres is not None: + parfor_fastmath_output = fastmath_pcres.entry_point(*copy_args(*args)) + np.testing.assert_almost_equal(parfor_fastmath_output, py_expected, + **kwargs) + + def check(self, pyfunc, *args, **kwargs): + """Checks that pyfunc compiles for *args under parallel=True and njit + and asserts that all version execute and produce the same result""" + cfunc, cpfunc = self.compile_all(pyfunc, *args) + self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs) + + def check_variants(self, impl, arg_gen, **kwargs): + """Run self.check(impl, ...) on array data generated from arg_gen. + """ + for args in arg_gen(): + with self.subTest(list(map(typeof, args))): + self.check(impl, *args, **kwargs) + + def count_parfors_variants(self, impl, arg_gen, **kwargs): + """Run self.countParfors(impl, ...) on array types generated from + arg_gen. + """ + for args in arg_gen(): + with self.subTest(list(map(typeof, args))): + argtys = tuple(map(typeof, args)) + # At least one parfors + self.assertGreaterEqual(countParfors(impl, argtys), 1) + + def check_scheduling(self, cres, scheduler_type): + # make sure parfor set up scheduling + scheduler_str = '@do_scheduling' + if scheduler_type is not None: + if scheduler_type in ['signed', 'unsigned']: + scheduler_str += '_' + scheduler_type + else: + msg = "Unknown scheduler_type specified: %s" + raise ValueError(msg % scheduler_type) + + self.assertIn(scheduler_str, cres.library.get_llvm_str()) + + def gen_linspace(self, n, ct): + """Make *ct* sample 1D arrays of length *n* using np.linspace(). + """ + def gen(): + yield np.linspace(0, 1, n) + yield np.linspace(2, 1, n) + yield np.linspace(1, 2, n) + + src = cycle(gen()) + return [next(src) for i in range(ct)] + + def gen_linspace_variants(self, ct): + """Make 1D, 2D, 3D variants of the data in C and F orders + """ + # 1D + yield self.gen_linspace(10, ct=ct) + + # 2D + arr2ds = [x.reshape((2, 3)) + for x in self.gen_linspace(n=2 * 3, ct=ct)] + yield arr2ds + # Fortran order + yield [np.asfortranarray(x) for x in arr2ds] + + # 3D + arr3ds = [x.reshape((2, 3, 4)) + for x in self.gen_linspace(n=2 * 3 * 4, ct=ct)] + yield arr3ds + # Fortran order + yield [np.asfortranarray(x) for x in arr3ds] + + def _filter_mod(self, mod, magicstr, checkstr=None): + """ helper function to filter out modules by name""" + filt = [x for x in mod if magicstr in x.name] + if checkstr is not None: + for x in filt: + assert checkstr in str(x) + return filt + + def _get_gufunc_modules(self, cres, magicstr, checkstr=None): + """ gets the gufunc LLVM Modules""" + _modules = [x for x in cres.library._codegen._engine._ee._modules] + # make sure to only use modules that are actually used by cres and + # aren't just in the EE by virtue of shared compilation context. + potential_matches = self._filter_mod(_modules, magicstr, + checkstr=checkstr) + + lib_asm = cres.library.get_asm_str() + ret = [] + for mod in potential_matches: + if mod.name in lib_asm: + ret.append(mod) + return ret + + def _get_gufunc_info(self, cres, fn): + """ helper for gufunc IR/asm generation""" + # get the gufunc modules + magicstr = '__numba_parfor_gufunc' + gufunc_mods = self._get_gufunc_modules(cres, magicstr) + x = dict() + for mod in gufunc_mods: + x[mod.name] = fn(mod) + return x + + def _get_gufunc_ir(self, cres): + """ + Returns the IR of the gufuncs used as parfor kernels + as a dict mapping the gufunc name to its IR. + + Arguments: + cres - a CompileResult from `njit(parallel=True, ...)` + """ + return self._get_gufunc_info(cres, str) + + def _get_gufunc_asm(self, cres): + """ + Returns the assembly of the gufuncs used as parfor kernels + as a dict mapping the gufunc name to its assembly. + + Arguments: + cres - a CompileResult from `njit(parallel=True, ...)` + """ + tm = cres.library._codegen._tm + def emit_asm(mod): + return str(tm.emit_assembly(mod)) + return self._get_gufunc_info(cres, emit_asm) + + def assert_fastmath(self, pyfunc, sig): + """ + Asserts that the fastmath flag has some effect in that suitable + instructions are now labelled as `fast`. Whether LLVM can actually do + anything to optimise better now the derestrictions are supplied is + another matter! + + Arguments: + pyfunc - a function that contains operations with parallel semantics + sig - the type signature of pyfunc + """ + + cres = self.compile_parallel_fastmath(pyfunc, sig) + _ir = self._get_gufunc_ir(cres) + + def _get_fast_instructions(ir): + splitted = ir.splitlines() + fast_inst = [] + for x in splitted: + m = re.search(r'\bfast\b', x) # \b for wholeword + if m is not None: + fast_inst.append(x) + return fast_inst + + def _assert_fast(instrs): + ops = ('fadd', 'fsub', 'fmul', 'fdiv', 'frem', 'fcmp', 'call') + for inst in instrs: + count = 0 + for op in ops: + match = op + ' fast' + if match in inst: + count += 1 + self.assertTrue(count > 0) + + for name, guir in _ir.items(): + inst = _get_fast_instructions(guir) + _assert_fast(inst) + + +def blackscholes_impl(sptprice, strike, rate, volatility, timev): + # blackscholes example + logterm = np.log(sptprice / strike) + powterm = 0.5 * volatility * volatility + den = volatility * np.sqrt(timev) + d1 = (((rate + powterm) * timev) + logterm) / den + d2 = d1 - den + NofXd1 = 0.5 + 0.5 * 2.0 * d1 + NofXd2 = 0.5 + 0.5 * 2.0 * d2 + futureValue = strike * np.exp(- rate * timev) + c1 = futureValue * NofXd2 + call = sptprice * NofXd1 - c1 + put = call - futureValue + sptprice + return put + + +def lr_impl(Y, X, w, iterations): + # logistic regression example + for i in range(iterations): + w -= np.dot(((1.0 / (1.0 + np.exp(-Y * np.dot(X, w))) - 1.0) * Y), X) + return w + +def example_kmeans_test(A, numCenter, numIter, init_centroids): + centroids = init_centroids + N, D = A.shape + + for l in range(numIter): + dist = np.array([[math.sqrt(np.sum((A[i,:]-centroids[j,:])**2)) + for j in range(numCenter)] for i in range(N)]) + labels = np.array([dist[i,:].argmin() for i in range(N)]) + + centroids = np.array([[np.sum(A[labels==i, j])/np.sum(labels==i) + for j in range(D)] for i in range(numCenter)]) + + return centroids + +def get_optimized_numba_ir(test_func, args, **kws): + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + test_ir = compiler.run_frontend(test_func) + if kws: + options = cpu.ParallelOptions(kws) + else: + options = cpu.ParallelOptions(True) + + tp = TestPipeline(typingctx, targetctx, args, test_ir) + + typingctx.refresh() + targetctx.refresh() + + inline_pass = inline_closurecall.InlineClosureCallPass(tp.state.func_ir, + options, + typed=True) + inline_pass.run() + + rewrites.rewrite_registry.apply('before-inference', tp.state) + + tp.state.typemap, tp.state.return_type, tp.state.calltypes, _ = \ + typed_passes.type_inference_stage(tp.state.typingctx, + tp.state.targetctx, tp.state.func_ir, tp.state.args, None) + + type_annotations.TypeAnnotation( + func_ir=tp.state.func_ir, + typemap=tp.state.typemap, + calltypes=tp.state.calltypes, + lifted=(), + lifted_from=None, + args=tp.state.args, + return_type=tp.state.return_type, + html_output=config.HTML) + + diagnostics = numba.parfors.parfor.ParforDiagnostics() + + preparfor_pass = numba.parfors.parfor.PreParforPass( + tp.state.func_ir, tp.state.typemap, tp.state.calltypes, + tp.state.typingctx, tp.state.targetctx, options, + swapped=diagnostics.replaced_fns) + preparfor_pass.run() + + rewrites.rewrite_registry.apply('after-inference', tp.state) + + flags = compiler.Flags() + parfor_pass = numba.parfors.parfor.ParforPass( + tp.state.func_ir, tp.state.typemap, tp.state.calltypes, + tp.state.return_type, tp.state.typingctx, tp.state.targetctx, + options, flags, tp.state.metadata, diagnostics=diagnostics) + parfor_pass.run() + parfor_pass = numba.parfors.parfor.ParforFusionPass( + tp.state.func_ir, tp.state.typemap, tp.state.calltypes, + tp.state.return_type, tp.state.typingctx, tp.state.targetctx, + options, flags, tp.state.metadata, diagnostics=diagnostics) + parfor_pass.run() + parfor_pass = numba.parfors.parfor.ParforPreLoweringPass( + tp.state.func_ir, tp.state.typemap, tp.state.calltypes, + tp.state.return_type, tp.state.typingctx, tp.state.targetctx, + options, flags, tp.state.metadata, diagnostics=diagnostics) + parfor_pass.run() + test_ir._definitions = build_definitions(test_ir.blocks) + + return test_ir, tp + +def countParfors(test_func, args, **kws): + test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) + ret_count = 0 + + for label, block in test_ir.blocks.items(): + for i, inst in enumerate(block.body): + if isinstance(inst, numba.parfors.parfor.Parfor): + ret_count += 1 + + return ret_count + + +def countArrays(test_func, args, **kws): + test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) + return _count_arrays_inner(test_ir.blocks, tp.state.typemap) + +def get_init_block_size(test_func, args, **kws): + test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) + blocks = test_ir.blocks + + ret_count = 0 + + for label, block in blocks.items(): + for i, inst in enumerate(block.body): + if isinstance(inst, numba.parfors.parfor.Parfor): + ret_count += len(inst.init_block.body) + + return ret_count + +def _count_arrays_inner(blocks, typemap): + ret_count = 0 + arr_set = set() + + for label, block in blocks.items(): + for i, inst in enumerate(block.body): + if isinstance(inst, numba.parfors.parfor.Parfor): + parfor_blocks = inst.loop_body.copy() + parfor_blocks[0] = inst.init_block + ret_count += _count_arrays_inner(parfor_blocks, typemap) + if (isinstance(inst, ir.Assign) + and isinstance(typemap[inst.target.name], + types.ArrayCompatible)): + arr_set.add(inst.target.name) + + ret_count += len(arr_set) + return ret_count + +def countArrayAllocs(test_func, args, **kws): + test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) + ret_count = 0 + + for block in test_ir.blocks.values(): + ret_count += _count_array_allocs_inner(test_ir, block) + + return ret_count + +def _count_array_allocs_inner(func_ir, block): + ret_count = 0 + for inst in block.body: + if isinstance(inst, numba.parfors.parfor.Parfor): + ret_count += _count_array_allocs_inner(func_ir, inst.init_block) + for b in inst.loop_body.values(): + ret_count += _count_array_allocs_inner(func_ir, b) + + if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr) + and inst.value.op == 'call' + and (guard(find_callname, func_ir, inst.value) == ('empty', 'numpy') + or guard(find_callname, func_ir, inst.value) + == ('empty_inferred', 'numba.np.unsafe.ndarray'))): + ret_count += 1 + + return ret_count + +def countNonParforArrayAccesses(test_func, args, **kws): + test_ir, tp = get_optimized_numba_ir(test_func, args, **kws) + return _count_non_parfor_array_accesses_inner(test_ir, test_ir.blocks, + tp.state.typemap) + +def _count_non_parfor_array_accesses_inner(f_ir, blocks, typemap, parfor_indices=None): + ret_count = 0 + if parfor_indices is None: + parfor_indices = set() + + for label, block in blocks.items(): + for stmt in block.body: + if isinstance(stmt, numba.parfors.parfor.Parfor): + parfor_indices.add(stmt.index_var.name) + parfor_blocks = stmt.loop_body.copy() + parfor_blocks[0] = stmt.init_block + ret_count += _count_non_parfor_array_accesses_inner( + f_ir, parfor_blocks, typemap, parfor_indices) + + # getitem + elif (is_getitem(stmt) and isinstance(typemap[stmt.value.value.name], + types.ArrayCompatible) and not _uses_indices( + f_ir, index_var_of_get_setitem(stmt), parfor_indices)): + ret_count += 1 + + # setitem + elif (is_setitem(stmt) and isinstance(typemap[stmt.target.name], + types.ArrayCompatible) and not _uses_indices( + f_ir, index_var_of_get_setitem(stmt), parfor_indices)): + ret_count += 1 + + # find parfor_index aliases + elif (isinstance(stmt, ir.Assign) and + isinstance(stmt.value, ir.Var) and + stmt.value.name in parfor_indices): + parfor_indices.add(stmt.target.name) + + return ret_count + +def _uses_indices(f_ir, index, index_set): + if index.name in index_set: + return True + + ind_def = guard(get_definition, f_ir, index) + if isinstance(ind_def, ir.Expr) and ind_def.op == 'build_tuple': + varnames = set(v.name for v in ind_def.items) + return len(varnames & index_set) != 0 + + return False + + +class TestPipeline(object): + def __init__(self, typingctx, targetctx, args, test_ir): + self.state = compiler.StateDict() + self.state.typingctx = typingctx + self.state.targetctx = targetctx + self.state.args = args + self.state.func_ir = test_ir + self.state.typemap = None + self.state.return_type = None + self.state.calltypes = None + self.state.metadata = {} + + +@skip_parfors_unsupported +class TestParforBasic(TestParforsBase): + """Smoke tests for the parfors transforms. These tests check the most basic + functionality""" + + def __init__(self, *args): + TestParforsBase.__init__(self, *args) + # these are used in the mass of simple tests + m = np.reshape(np.arange(12.), (3, 4)) + self.simple_args = [np.arange(3.), np.arange(4.), m, m.T] + + def test_simple01(self): + def test_impl(): + return np.ones(()) + with self.assertRaises(AssertionError) as raises: + self.check(test_impl) + self.assertIn("\'@do_scheduling\' not found", str(raises.exception)) + + def test_simple02(self): + def test_impl(): + return np.ones((1,)) + self.check(test_impl) + + def test_simple03(self): + def test_impl(): + return np.ones((1, 2)) + self.check(test_impl) + + def test_simple04(self): + def test_impl(): + return np.ones(1) + self.check(test_impl) + + def test_simple07(self): + def test_impl(): + return np.ones((1, 2), dtype=np.complex128) + self.check(test_impl) + + def test_simple08(self): + def test_impl(): + return np.ones((1, 2)) + np.ones((1, 2)) + self.check(test_impl) + + def test_simple09(self): + def test_impl(): + return np.ones((1, 1)) + self.check(test_impl) + + def test_simple10(self): + def test_impl(): + return np.ones((0, 0)) + self.check(test_impl) + + def test_simple11(self): + def test_impl(): + return np.ones((10, 10)) + 1. + self.check(test_impl) + + def test_simple12(self): + def test_impl(): + return np.ones((10, 10)) + np.complex128(1.) + self.check(test_impl) + + def test_simple13(self): + def test_impl(): + return np.complex128(1.) + with self.assertRaises(AssertionError) as raises: + self.check(test_impl) + self.assertIn("\'@do_scheduling\' not found", str(raises.exception)) + + def test_simple14(self): + def test_impl(): + return np.ones((10, 10))[0::20] + self.check(test_impl) + + def test_simple15(self): + def test_impl(v1, v2, m1, m2): + return v1 + v1 + self.check(test_impl, *self.simple_args) + + def test_simple16(self): + def test_impl(v1, v2, m1, m2): + return m1 + m1 + self.check(test_impl, *self.simple_args) + + def test_simple17(self): + def test_impl(v1, v2, m1, m2): + return m2 + v1 + self.check(test_impl, *self.simple_args) + + @needs_lapack + def test_simple18(self): + def test_impl(v1, v2, m1, m2): + return m1.T + np.linalg.svd(m2)[1] + self.check(test_impl, *self.simple_args) + + @needs_blas + def test_simple19(self): + def test_impl(v1, v2, m1, m2): + return np.dot(m1, v2) + self.check(test_impl, *self.simple_args) + + @needs_blas + def test_simple20(self): + def test_impl(v1, v2, m1, m2): + return np.dot(m1, m2) + # gemm is left to BLAS + with self.assertRaises(AssertionError) as raises: + self.check(test_impl, *self.simple_args) + self.assertIn("\'@do_scheduling\' not found", str(raises.exception)) + + @needs_blas + def test_simple21(self): + def test_impl(v1, v2, m1, m2): + return np.dot(v1, v1) + self.check(test_impl, *self.simple_args) + + def test_simple22(self): + def test_impl(v1, v2, m1, m2): + return np.sum(v1 + v1) + self.check(test_impl, *self.simple_args) + + def test_simple23(self): + def test_impl(v1, v2, m1, m2): + x = 2 * v1 + y = 2 * v1 + return 4 * np.sum(x**2 + y**2 < 1) / 10 + self.check(test_impl, *self.simple_args) + + def test_simple24(self): + def test_impl(): + n = 20 + A = np.ones((n, n)) + b = np.arange(n) + return np.sum(A[:, b]) + self.check(test_impl) + + @disabled_test + def test_simple_operator_15(self): + """same as corresponding test_simple_ case but using operator.add""" + def test_impl(v1, v2, m1, m2): + return operator.add(v1, v1) + + self.check(test_impl, *self.simple_args) + + @disabled_test + def test_simple_operator_16(self): + def test_impl(v1, v2, m1, m2): + return operator.add(m1, m1) + + self.check(test_impl, *self.simple_args) + + @disabled_test + def test_simple_operator_17(self): + def test_impl(v1, v2, m1, m2): + return operator.add(m2, v1) + + self.check(test_impl, *self.simple_args) + + def test_inplace_alias(self): + # issue7201 + def test_impl(a): + a += 1 + a[:] = 3 + + def comparer(a, b): + np.testing.assert_equal(a, b) + + x = np.ones(1) + self.check(test_impl, x, check_arg_equality=[comparer]) + + +@skip_parfors_unsupported +class TestParforNumericalMisc(TestParforsBase): + """ Miscellaneous 'classical' numerical tests """ + + def test_pi(self): + def test_impl(n): + x = 2 * np.random.ranf(n) - 1 + y = 2 * np.random.ranf(n) - 1 + return 4 * np.sum(x**2 + y**2 < 1) / n + + self.check(test_impl, 100000, decimal=1) + self.assertEqual(countParfors(test_impl, (types.int64, )), 1) + self.assertEqual(countArrays(test_impl, (types.intp,)), 0) + + def test_blackscholes(self): + # blackscholes takes 5 1D float array args + args = (numba.float64[:], ) * 5 + self.assertEqual(countParfors(blackscholes_impl, args), 1) + + @needs_blas + def test_logistic_regression(self): + args = (numba.float64[:], numba.float64[:,:], numba.float64[:], + numba.int64) + self.assertEqual(countParfors(lr_impl, args), 2) + self.assertEqual(countArrayAllocs(lr_impl, args), 1) + + def test_kmeans(self): + np.random.seed(0) + N = 1024 + D = 10 + centers = 3 + A = np.random.ranf((N, D)) + init_centroids = np.random.ranf((centers, D)) + self.check(example_kmeans_test, A, centers, 3, init_centroids, + decimal=1) + # TODO: count parfors after k-means fusion is working + # requires recursive parfor counting + arg_typs = (types.Array(types.float64, 2, 'C'), types.intp, types.intp, + types.Array(types.float64, 2, 'C')) + self.assertEqual( + countNonParforArrayAccesses(example_kmeans_test, arg_typs), 0) + + +@skip_parfors_unsupported +class TestParforNumPy(TestParforsBase): + """Tests NumPy functionality under parfors""" + + @needs_blas + def test_mvdot(self): + def test_impl(a, v): + return np.dot(a, v) + + A = np.linspace(0, 1, 20).reshape(2, 10) + v = np.linspace(2, 1, 10) + + self.check(test_impl, A, v) + + def test_fuse_argmin_argmax_max_min(self): + for op in [np.argmin, np.argmax, np.min, np.max]: + def test_impl(n): + A = np.ones(n) + C = op(A) + B = A.sum() + return B + C + self.check(test_impl, 256) + self.assertEqual(countParfors(test_impl, (types.int64, )), 1) + self.assertEqual(countArrays(test_impl, (types.intp,)), 0) + + def test_np_random_func_direct_import(self): + def test_impl(n): + A = randn(n) + return A[0] + self.assertEqual(countParfors(test_impl, (types.int64, )), 1) + + def test_arange(self): + # test with stop only + def test_impl1(n): + return np.arange(n) + # start and stop + def test_impl2(s, n): + return np.arange(s, n) + # start, step, stop + def test_impl3(s, n, t): + return np.arange(s, n, t) + + for arg in [11, 128, 30.0, complex(4,5), complex(5,4)]: + self.check(test_impl1, arg) + self.check(test_impl2, 2, arg) + self.check(test_impl3, 2, arg, 2) + + def test_arange_dtype(self): + # test with stop only + def test_impl1(n): + return np.arange(n, dtype=np.float32) + # start and stop + def test_impl2(s, n): + return np.arange(s, n, dtype=np.float32) + # start, step, stop + def test_impl3(s, n, t): + return np.arange(s, n, t, dtype=np.float32) + + for arg in [11, 128, 30.0]: + self.check(test_impl1, arg) + self.check(test_impl2, 2, arg) + self.check(test_impl3, 2, arg, 2) + + def test_linspace(self): + # without num + def test_impl1(start, stop): + return np.linspace(start, stop) + # with num + def test_impl2(start, stop, num): + return np.linspace(start, stop, num) + + for arg in [11, 128, 30.0, complex(4,5), complex(5,4)]: + self.check(test_impl1, 2, arg) + self.check(test_impl2, 2, arg, 30) + + def test_mean(self): + def test_impl(A): + return A.mean() + N = 100 + A = np.random.ranf(N) + B = np.random.randint(10, size=(N, 3)) + self.check(test_impl, A) + self.check(test_impl, B) + self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 1, 'C'), )), 1) + self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 2, 'C'), )), 1) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl, data_gen) + self.count_parfors_variants(test_impl, data_gen) + + def test_var(self): + def test_impl(A): + return A.var() + N = 100 + A = np.random.ranf(N) + B = np.random.randint(10, size=(N, 3)) + C = A + 1j * A + self.check(test_impl, A) + self.check(test_impl, B) + self.check(test_impl, C) + self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 1, 'C'), )), 2) + self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 2, 'C'), )), 2) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl, data_gen) + self.count_parfors_variants(test_impl, data_gen) + + def test_std(self): + def test_impl(A): + return A.std() + N = 100 + A = np.random.ranf(N) + B = np.random.randint(10, size=(N, 3)) + C = A + 1j * A + self.check(test_impl, A) + self.check(test_impl, B) + self.check(test_impl, C) + argty = (types.Array(types.float64, 1, 'C'),) + self.assertEqual(countParfors(test_impl, argty), 2) + self.assertEqual(countParfors(test_impl, argty), 2) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl, data_gen) + self.count_parfors_variants(test_impl, data_gen) + + def test_random_parfor(self): + """ + Test function with only a random call to make sure a random function + like ranf is actually translated to a parfor. + """ + def test_impl(n): + A = np.random.ranf((n, n)) + return A + self.assertEqual(countParfors(test_impl, (types.int64, )), 1) + + def test_randoms(self): + def test_impl(n): + A = np.random.standard_normal(size=(n, n)) + B = np.random.randn(n, n) + C = np.random.normal(0.0, 1.0, (n, n)) + D = np.random.chisquare(1.0, (n, n)) + E = np.random.randint(1, high=3, size=(n, n)) + F = np.random.triangular(1, 2, 3, (n, n)) + return np.sum(A+B+C+D+E+F) + + n = 128 + cpfunc = self.compile_parallel(test_impl, (numba.typeof(n),)) + parfor_output = cpfunc.entry_point(n) + py_output = test_impl(n) + # check results within 5% since random numbers generated in parallel + np.testing.assert_allclose(parfor_output, py_output, rtol=0.05) + self.assertEqual(countParfors(test_impl, (types.int64, )), 1) + + def test_dead_randoms(self): + def test_impl(n): + A = np.random.standard_normal(size=(n, n)) + B = np.random.randn(n, n) + C = np.random.normal(0.0, 1.0, (n, n)) + D = np.random.chisquare(1.0, (n, n)) + E = np.random.randint(1, high=3, size=(n, n)) + F = np.random.triangular(1, 2, 3, (n, n)) + return 3 + + n = 128 + cpfunc = self.compile_parallel(test_impl, (numba.typeof(n),)) + parfor_output = cpfunc.entry_point(n) + py_output = test_impl(n) + self.assertEqual(parfor_output, py_output) + self.assertEqual(countParfors(test_impl, (types.int64, )), 0) + + def test_min(self): + def test_impl1(A): + return A.min() + + def test_impl2(A): + return np.min(A) + + n = 211 + A = np.random.ranf(n) + B = np.random.randint(10, size=n).astype(np.int32) + C = np.random.ranf((n, n)) # test multi-dimensional array + D = np.array([np.inf, np.inf]) + self.check(test_impl1, A) + self.check(test_impl1, B) + self.check(test_impl1, C) + self.check(test_impl1, D) + self.check(test_impl2, A) + self.check(test_impl2, B) + self.check(test_impl2, C) + self.check(test_impl2, D) + + # checks that 0d array input raises + msg = ("zero-size array to reduction operation " + "minimum which has no identity") + for impl in (test_impl1, test_impl2): + pcfunc = self.compile_parallel(impl, (types.int64[:],)) + with self.assertRaises(ValueError) as e: + pcfunc.entry_point(np.array([], dtype=np.int64)) + self.assertIn(msg, str(e.exception)) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl1, data_gen) + self.count_parfors_variants(test_impl1, data_gen) + self.check_variants(test_impl2, data_gen) + self.count_parfors_variants(test_impl2, data_gen) + + def test_max(self): + def test_impl1(A): + return A.max() + + def test_impl2(A): + return np.max(A) + + n = 211 + A = np.random.ranf(n) + B = np.random.randint(10, size=n).astype(np.int32) + C = np.random.ranf((n, n)) # test multi-dimensional array + D = np.array([-np.inf, -np.inf]) + self.check(test_impl1, A) + self.check(test_impl1, B) + self.check(test_impl1, C) + self.check(test_impl1, D) + self.check(test_impl2, A) + self.check(test_impl2, B) + self.check(test_impl2, C) + self.check(test_impl2, D) + + # checks that 0d array input raises + msg = ("zero-size array to reduction operation " + "maximum which has no identity") + for impl in (test_impl1, test_impl2): + pcfunc = self.compile_parallel(impl, (types.int64[:],)) + with self.assertRaises(ValueError) as e: + pcfunc.entry_point(np.array([], dtype=np.int64)) + self.assertIn(msg, str(e.exception)) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl1, data_gen) + self.count_parfors_variants(test_impl1, data_gen) + self.check_variants(test_impl2, data_gen) + self.count_parfors_variants(test_impl2, data_gen) + + def test_argmax(self): + def test_impl1(A): + return A.argmax() + + def test_impl2(A): + return np.argmax(A) + + n = 211 + A = np.array([1., 0., 3., 2., 3.]) + B = np.random.randint(10, size=n).astype(np.int32) + C = np.random.ranf((n, n)) # test multi-dimensional array + D = np.array([1., 0., np.nan, 2., 3.]) + self.check(test_impl1, A) + self.check(test_impl1, B) + self.check(test_impl1, C) + self.check(test_impl1, D) + self.check(test_impl2, A) + self.check(test_impl2, B) + self.check(test_impl2, C) + self.check(test_impl2, D) + + # checks that 0d array input raises + msg = 'attempt to get argmax of an empty sequence' + for impl in (test_impl1, test_impl2): + pcfunc = self.compile_parallel(impl, (types.int64[:],)) + with self.assertRaises(ValueError) as e: + pcfunc.entry_point(np.array([], dtype=np.int64)) + self.assertIn(msg, str(e.exception)) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl1, data_gen) + self.count_parfors_variants(test_impl1, data_gen) + self.check_variants(test_impl2, data_gen) + self.count_parfors_variants(test_impl2, data_gen) + + def test_argmin(self): + def test_impl1(A): + return A.argmin() + + def test_impl2(A): + return np.argmin(A) + + n = 211 + A = np.array([1., 0., 2., 0., 3.]) + B = np.random.randint(10, size=n).astype(np.int32) + C = np.random.ranf((n, n)) # test multi-dimensional array + D = np.array([1., 0., np.nan, 0., 3.]) + self.check(test_impl1, A) + self.check(test_impl1, B) + self.check(test_impl1, C) + self.check(test_impl1, D) + self.check(test_impl2, A) + self.check(test_impl2, B) + self.check(test_impl2, C) + self.check(test_impl2, D) + + # checks that 0d array input raises + msg = 'attempt to get argmin of an empty sequence' + for impl in (test_impl1, test_impl2): + pcfunc = self.compile_parallel(impl, (types.int64[:],)) + with self.assertRaises(ValueError) as e: + pcfunc.entry_point(np.array([], dtype=np.int64)) + self.assertIn(msg, str(e.exception)) + + # Test variants + data_gen = lambda: self.gen_linspace_variants(1) + self.check_variants(test_impl1, data_gen) + self.count_parfors_variants(test_impl1, data_gen) + self.check_variants(test_impl2, data_gen) + self.count_parfors_variants(test_impl2, data_gen) + + def test_ndarray_fill(self): + def test_impl(x): + x.fill(7.0) + return x + x = np.zeros(10) + self.check(test_impl, x) + argty = (types.Array(types.float64, 1, 'C'),) + self.assertEqual(countParfors(test_impl, argty), 1) + + def test_ndarray_fill2d(self): + def test_impl(x): + x.fill(7.0) + return x + x = np.zeros((2,2)) + self.check(test_impl, x) + argty = (types.Array(types.float64, 2, 'C'),) + self.assertEqual(countParfors(test_impl, argty), 1) + + def test_reshape_with_neg_one(self): + # issue3314 + def test_impl(a, b): + result_matrix = np.zeros((b, b, 1), dtype=np.float64) + sub_a = a[0:b] + a = sub_a.size + b = a / 1 + z = sub_a.reshape(-1, 1) + result_data = sub_a / z + result_matrix[:,:,0] = result_data + return result_matrix + + a = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, + 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]) + b = 3 + + self.check(test_impl, a, b) + + def test_reshape_with_large_neg(self): + # issue3314 + def test_impl(a, b): + result_matrix = np.zeros((b, b, 1), dtype=np.float64) + sub_a = a[0:b] + a = sub_a.size + b = a / 1 + z = sub_a.reshape(-1307, 1) + result_data = sub_a / z + result_matrix[:,:,0] = result_data + return result_matrix + + a = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, + 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]) + b = 3 + + self.check(test_impl, a, b) + + def test_reshape_with_too_many_neg_one(self): + # issue3314 + with self.assertRaises(errors.UnsupportedRewriteError) as raised: + @njit(parallel=True) + def test_impl(a, b): + rm = np.zeros((b, b, 1), dtype=np.float64) + sub_a = a[0:b] + a = sub_a.size + b = a / 1 + z = sub_a.reshape(-1, -1) + result_data = sub_a / z + rm[:,:,0] = result_data + return rm + + a = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, + 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]) + b = 3 + test_impl(a, b) + + msg = ("The reshape API may only include one negative argument.") + self.assertIn(msg, str(raised.exception)) + + def test_0d_array(self): + def test_impl(n): + return np.sum(n) + np.prod(n) + np.min(n) + np.max(n) + np.var(n) + self.check(test_impl, np.array(7), check_scheduling=False) + + def test_real_imag_attr(self): + # See issue 8012 + def test_impl(z): + return np.sum(z.real ** 2 + z.imag ** 2) + + z = np.arange(5) * (1 + 1j) + self.check(test_impl, z) + self.assertEqual(countParfors(test_impl, (types.complex128[::1],)), 1) + + +class TestParforsUnsupported(TestCase): + """Tests for unsupported use of parfors""" + @unittest.skipIf(not _32bit, "Only impacts 32 bit hardware") + @needs_blas + def test_unsupported_combination_raises(self): + """ + This test is in place until issues with the 'parallel' + target on 32 bit hardware are fixed. + """ + with self.assertRaises(errors.UnsupportedParforsError) as raised: + @njit(parallel=True) + def ddot(a, v): + return np.dot(a, v) + + A = np.linspace(0, 1, 20).reshape(2, 10) + v = np.linspace(2, 1, 10) + ddot(A, v) + + msg = ("The 'parallel' target is not currently supported on 32 bit " + "hardware") + self.assertIn(msg, str(raised.exception)) + +@skip_parfors_unsupported +class TestParfors(TestParforsBase): + """ Tests cpython, reduction and various parfors features""" + + def test_arraymap(self): + def test_impl(a, x, y): + return a * x + y + + self.check_variants(test_impl, lambda: self.gen_linspace_variants(3)) + + def test_0d_broadcast(self): + def test_impl(): + X = np.array(1) + Y = np.ones((10, 12)) + return np.sum(X + Y) + self.check(test_impl) + self.assertEqual(countParfors(test_impl, ()), 1) + + def test_2d_parfor(self): + def test_impl(): + X = np.ones((10, 12)) + Y = np.zeros((10, 12)) + return np.sum(X + Y) + self.check(test_impl) + self.assertEqual(countParfors(test_impl, ()), 1) + + def test_nd_parfor(self): + def case1(): + X = np.ones((10, 12)) + Y = np.zeros((10, 12)) + yield (X, Y) + + data_gen = lambda: chain(case1(), self.gen_linspace_variants(2)) + + def test_impl(X, Y): + return np.sum(X + Y) + + self.check_variants(test_impl, data_gen) + self.count_parfors_variants(test_impl, data_gen) + + def test_np_func_direct_import(self): + from numpy import ones # import here becomes FreeVar + def test_impl(n): + A = ones(n) + return A[0] + n = 111 + self.check(test_impl, n) + + def test_size_assertion(self): + def test_impl(m, n): + A = np.ones(m) + B = np.ones(n) + return np.sum(A + B) + + self.check(test_impl, 10, 10) + with self.assertRaises(AssertionError) as raises: + cfunc = njit(parallel=True)(test_impl) + cfunc(10, 9) + msg = "Sizes of A, B do not match" + self.assertIn(msg, str(raises.exception)) + + def test_cfg(self): + # from issue #2477 + def test_impl(x, is_positive, N): + for i in numba.prange(2): + for j in range( i*N//2, (i+1)*N//2 ): + is_positive[j] = 0 + if x[j] > 0: + is_positive[j] = 1 + + return is_positive + + N = 100 + x = np.random.rand(N) + is_positive = np.zeros(N) + self.check(test_impl, x, is_positive, N) + + def test_reduce(self): + def test_impl(A): + init_val = 10 + return reduce(lambda a,b: min(a, b), A, init_val) + + n = 211 + A = np.random.ranf(n) + self.check(test_impl, A) + A = np.random.randint(10, size=n).astype(np.int32) + self.check(test_impl, A) + + # test checking the number of arguments for the reduce function + def test_impl(): + g = lambda x: x ** 2 + return reduce(g, np.array([1, 2, 3, 4, 5]), 2) + with self.assertTypingError(): + self.check(test_impl) + + # test checking reduction over bitarray masked arrays + n = 160 + A = np.random.randint(10, size=n).astype(np.int32) + def test_impl(A): + return np.sum(A[A>=3]) + self.check(test_impl, A) + # TODO: this should fuse + # self.assertTrue(countParfors(test_impl, (numba.float64[:],)) == 1) + + def test_impl(A): + B = A[:,0] + return np.sum(A[B>=3,1]) + self.check(test_impl, A.reshape((16,10))) + # TODO: this should also fuse + #self.assertTrue(countParfors(test_impl, (numba.float64[:,:],)) == 1) + + def test_impl(A): + B = A[:,0] + return np.sum(A[B>=3,1:2]) + self.check(test_impl, A.reshape((16,10))) + # this doesn't fuse due to mixed indices + self.assertEqual(countParfors(test_impl, (numba.float64[:,:],)), 2) + + def test_impl(A): + min_val = np.amin(A) + return A - min_val + self.check(test_impl, A) + # this doesn't fuse due to use of reduction variable + self.assertEqual(countParfors(test_impl, (numba.float64[:],)), 2) + + def test_use_of_reduction_var1(self): + def test_impl(): + acc = 0 + for i in prange(1): + acc = cmath.sqrt(acc) + return acc + + # checks that invalid use of reduction variable is detected + msg = ("Use of reduction variable acc in an unsupported reduction function.") + with self.assertRaises(ValueError) as e: + pcfunc = self.compile_parallel(test_impl, ()) + self.assertIn(msg, str(e.exception)) + + def test_unsupported_floordiv1(self): + def test_impl(): + acc = 100 + for i in prange(2): + acc //= 2 + return acc + + # checks that invalid use of ifloordiv reduction operator is detected + msg = ("Parallel floordiv reductions are not supported. " + "If all divisors are integers then a floordiv " + "reduction can in some cases be parallelized as " + "a multiply reduction followed by a floordiv of " + "the resulting product.") + with self.assertRaises(errors.NumbaValueError) as e: + pcfunc = self.compile_parallel(test_impl, ()) + self.assertIn(msg, str(e.exception)) + + def test_unsupported_xor1(self): + def test_impl(): + acc = 100 + for i in prange(2): + acc ^= i + 2 + return acc + + msg = ("Use of reduction variable acc in an unsupported reduction function.") + with self.assertRaises(ValueError) as e: + pcfunc = self.compile_parallel(test_impl, ()) + self.assertIn(msg, str(e.exception)) + + def test_parfor_array_access1(self): + # signed index of the prange generated by sum() should be replaced + # resulting in array A to be eliminated (see issue #2846) + def test_impl(n): + A = np.ones(n) + return A.sum() + + n = 211 + self.check(test_impl, n) + self.assertEqual(countArrays(test_impl, (types.intp,)), 0) + + def test_parfor_array_access2(self): + # in this test, the prange index has the same name (i) in two loops + # thus, i has multiple definitions and is harder to replace + def test_impl(n): + A = np.ones(n) + m = 0 + n = 0 + for i in numba.prange(len(A)): + m += A[i] + + for i in numba.prange(len(A)): + if m == n: # access in another block + n += A[i] + + return m + n + + n = 211 + self.check(test_impl, n) + self.assertEqual(countNonParforArrayAccesses(test_impl, (types.intp,)), 0) + + def test_parfor_array_access3(self): + def test_impl(n): + A = np.ones(n, np.int64) + m = 0 + for i in numba.prange(len(A)): + m += A[i] + if m==2: + i = m + + n = 211 + with self.assertRaises(errors.UnsupportedRewriteError) as raises: + self.check(test_impl, n) + self.assertIn("Overwrite of parallel loop index", str(raises.exception)) + + @needs_blas + def test_parfor_array_access4(self): + # in this test, one index of a multi-dim access should be replaced + # np.dot parallel implementation produces this case + def test_impl(A, b): + return np.dot(A, b) + + n = 211 + d = 4 + A = np.random.ranf((n, d)) + b = np.random.ranf(d) + self.check(test_impl, A, b) + # make sure the parfor index is replaced in build_tuple of access to A + test_ir, tp = get_optimized_numba_ir( + test_impl, (types.Array(types.float64, 2, 'C'), + types.Array(types.float64, 1, 'C'))) + # this code should have one basic block after optimization + self.assertTrue(len(test_ir.blocks) == 1 and 0 in test_ir.blocks) + block = test_ir.blocks[0] + parfor_found = False + parfor = None + for stmt in block.body: + if isinstance(stmt, numba.parfors.parfor.Parfor): + parfor_found = True + parfor = stmt + + self.assertTrue(parfor_found) + build_tuple_found = False + # there should be only one build_tuple + for bl in parfor.loop_body.values(): + for stmt in bl.body: + if (isinstance(stmt, ir.Assign) + and isinstance(stmt.value, ir.Expr) + and stmt.value.op == 'build_tuple'): + build_tuple_found = True + self.assertTrue(parfor.index_var in stmt.value.items) + + self.assertTrue(build_tuple_found) + + def test_parfor_dtype_type(self): + # test array type replacement creates proper type + def test_impl(a): + for i in numba.prange(len(a)): + a[i] = a.dtype.type(0) + return a[4] + + a = np.ones(10) + self.check(test_impl, a) + + def test_parfor_array_access5(self): + # one dim is slice in multi-dim access + def test_impl(n): + X = np.ones((n, 3)) + y = 0 + for i in numba.prange(n): + y += X[i,:].sum() + return y + + n = 211 + self.check(test_impl, n) + self.assertEqual(countNonParforArrayAccesses(test_impl, (types.intp,)), 0) + + @disabled_test # Test itself is problematic, see #3155 + def test_parfor_hoist_setitem(self): + # Make sure that read of out is not hoisted. + def test_impl(out): + for i in prange(10): + out[0] = 2 * out[0] + return out[0] + + out = np.ones(1) + self.check(test_impl, out) + + @needs_blas + def test_parfor_generate_fuse(self): + # issue #2857 + def test_impl(N, D): + w = np.ones(D) + X = np.ones((N, D)) + Y = np.ones(N) + for i in range(3): + B = (-Y * np.dot(X, w)) + + return B + + n = 211 + d = 3 + self.check(test_impl, n, d) + self.assertEqual(countArrayAllocs(test_impl, (types.intp, types.intp)), 4) + self.assertEqual(countParfors(test_impl, (types.intp, types.intp)), 4) + + def test_ufunc_expr(self): + # issue #2885 + def test_impl(A, B): + return np.bitwise_and(A, B) + + A = np.ones(3, np.uint8) + B = np.ones(3, np.uint8) + B[1] = 0 + self.check(test_impl, A, B) + + def test_find_callname_intrinsic(self): + def test_impl(n): + A = unsafe_empty((n,)) + for i in range(n): + A[i] = i + 2.0 + return A + + # the unsafe allocation should be found even though it is imported + # as a different name + self.assertEqual(countArrayAllocs(test_impl, (types.intp,)), 1) + + def test_reduction_var_reuse(self): + # issue #3139 + def test_impl(n): + acc = 0 + for i in prange(n): + acc += 1 + + for i in prange(n): + acc += 2 + + return acc + self.check(test_impl, 16) + + def test_non_identity_initial(self): + # issue #7344 + def test_impl(A, cond): + s = 1 + for i in prange(A.shape[0]): + if cond[i]: + s += 1 + return s + self.check(test_impl, np.ones(10), np.ones(10).astype('bool')) + + def test_if_not_else_reduction(self): + # issue #7344 + def test_impl(A, cond): + s = 1 + t = 10 + for i in prange(A.shape[0]): + if cond[i]: + s += 1 + t += 1 + else: + s += 2 + return s + t + self.check(test_impl, np.ones(10), np.ones(10).astype('bool')) + + def test_two_d_array_reduction_reuse(self): + def test_impl(n): + shp = (13, 17) + size = shp[0] * shp[1] + result1 = np.zeros(shp, np.int_) + tmp = np.arange(size).reshape(shp) + + for i in numba.prange(n): + result1 += tmp + + for i in numba.prange(n): + result1 += tmp + + return result1 + + self.check(test_impl, 100) + + def test_one_d_array_reduction(self): + def test_impl(n): + result = np.zeros(1, np.int_) + + for i in numba.prange(n): + result += np.array([i], np.int_) + + return result + + self.check(test_impl, 100) + + def test_two_d_array_reduction(self): + def test_impl(n): + shp = (13, 17) + size = shp[0] * shp[1] + result1 = np.zeros(shp, np.int_) + tmp = np.arange(size).reshape(shp) + + for i in numba.prange(n): + result1 += tmp + + return result1 + + self.check(test_impl, 100) + + def test_two_d_array_reduction_with_float_sizes(self): + # result1 is float32 and tmp is float64. + # Tests reduction with differing dtypes. + def test_impl(n): + shp = (2, 3) + result1 = np.zeros(shp, np.float32) + tmp = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).reshape(shp) + + for i in numba.prange(n): + result1 += tmp + + return result1 + + self.check(test_impl, 100) + + def test_two_d_array_reduction_prod(self): + def test_impl(n): + shp = (13, 17) + result1 = 2 * np.ones(shp, np.int_) + tmp = 2 * np.ones_like(result1) + + for i in numba.prange(n): + result1 *= tmp + + return result1 + + self.check(test_impl, 100) + + def test_three_d_array_reduction(self): + def test_impl(n): + shp = (3, 2, 7) + result1 = np.zeros(shp, np.int_) + + for i in numba.prange(n): + result1 += np.ones(shp, np.int_) + + return result1 + + self.check(test_impl, 100) + + def test_preparfor_canonicalize_kws(self): + # test canonicalize_array_math typing for calls with kw args + def test_impl(A): + return A.argsort() + 1 + + n = 211 + A = np.arange(n) + self.check(test_impl, A) + + def test_preparfor_datetime64(self): + # test array.dtype transformation for datetime64 + def test_impl(A): + return A.dtype + + A = np.empty(1, np.dtype('datetime64[ns]')) + cpfunc = self.compile_parallel(test_impl, (numba.typeof(A),)) + self.assertEqual(cpfunc.entry_point(A), test_impl(A)) + + def test_no_hoisting_with_member_function_call(self): + def test_impl(X): + n = X.shape[0] + acc = 0 + for i in prange(n): + R = {1, 2, 3} + R.add(i) + tmp = 0 + for x in R: + tmp += x + acc += tmp + return acc + + self.check(test_impl, np.random.ranf(128)) + + def test_array_compare_scalar(self): + """ issue3671: X != 0 becomes an arrayexpr with operator.ne. + That is turned into a parfor by devectorizing. Make sure + the return type of the devectorized operator.ne + on integer types works properly. + """ + def test_impl(): + X = np.zeros(10, dtype=np.int_) + return X != 0 + + self.check(test_impl) + + def test_array_analysis_optional_def(self): + def test_impl(x, half): + size = len(x) + parr = x[0:size] + + if half: + parr = x[0:size//2] + + return parr.sum() + x = np.ones(20) + self.check(test_impl, x, True, check_scheduling=False) + + def test_prange_side_effects(self): + def test_impl(a, b): + data = np.empty(len(a), dtype=np.float64) + size = len(data) + for i in numba.prange(size): + data[i] = a[i] + for i in numba.prange(size): + data[i] = data[i] + b[i] + return data + + x = np.arange(10 ** 2, dtype=float) + y = np.arange(10 ** 2, dtype=float) + + self.check(test_impl, x, y) + self.assertEqual(countParfors(test_impl, + (types.Array(types.float64, 1, 'C'), + types.Array(types.float64, 1, 'C'))), 1) + + def test_tuple1(self): + def test_impl(a): + atup = (3, 4) + b = 7 + for i in numba.prange(len(a)): + a[i] += atup[0] + atup[1] + b + return a + + x = np.arange(10) + self.check(test_impl, x) + + def test_tuple2(self): + def test_impl(a): + atup = a.shape + b = 7 + for i in numba.prange(len(a)): + a[i] += atup[0] + b + return a + + x = np.arange(10) + self.check(test_impl, x) + + def test_tuple3(self): + def test_impl(a): + atup = (np.arange(10), 4) + b = 7 + for i in numba.prange(len(a)): + a[i] += atup[0][5] + atup[1] + b + return a + + x = np.arange(10) + self.check(test_impl, x) + + def test_namedtuple1(self): + def test_impl(a): + antup = TestNamedTuple(part0=3, part1=4) + b = 7 + for i in numba.prange(len(a)): + a[i] += antup.part0 + antup.part1 + b + return a + + x = np.arange(10) + self.check(test_impl, x) + + def test_namedtuple2(self): + TestNamedTuple2 = namedtuple('TestNamedTuple2', ('part0', 'part1')) + def test_impl(a): + antup = TestNamedTuple2(part0=3, part1=4) + b = 7 + for i in numba.prange(len(a)): + a[i] += antup.part0 + antup.part1 + b + return a + + x = np.arange(10) + self.check(test_impl, x) + + def test_namedtuple3(self): + # issue5872: test that a.y[:] = 5 is not removed as + # deadcode. + TestNamedTuple3 = namedtuple(f'TestNamedTuple3',['y']) + + def test_impl(a): + a.y[:] = 5 + + def comparer(a, b): + np.testing.assert_almost_equal(a.y, b.y) + + x = TestNamedTuple3(y=np.zeros(10)) + self.check(test_impl, x, check_arg_equality=[comparer]) + + def test_inplace_binop(self): + def test_impl(a, b): + b += a + return b + + X = np.arange(10) + 10 + Y = np.arange(10) + 100 + self.check(test_impl, X, Y) + self.assertEqual(countParfors(test_impl, + (types.Array(types.float64, 1, 'C'), + types.Array(types.float64, 1, 'C'))), 1) + + def test_tuple_concat(self): + # issue5383 + def test_impl(a): + n = len(a) + array_shape = n, n + indices = np.zeros(((1,) + array_shape + (1,)), dtype=np.uint64) + k_list = indices[0, :] + + for i, g in enumerate(a): + k_list[i, i] = i + return k_list + + x = np.array([1, 1]) + self.check(test_impl, x) + + def test_tuple_concat_with_reverse_slice(self): + # issue5383 + def test_impl(a): + n = len(a) + array_shape = n, n + indices = np.zeros(((1,) + array_shape + (1,))[:-1], + dtype=np.uint64) + k_list = indices[0, :] + + for i, g in enumerate(a): + k_list[i, i] = i + return k_list + + x = np.array([1, 1]) + self.check(test_impl, x) + + def test_array_tuple_concat(self): + # issue6399 + def test_impl(a): + S = (a,) + (a, a) + return S[0].sum() + + x = np.ones((3,3)) + self.check(test_impl, x) + + def test_high_dimension1(self): + # issue6749 + def test_impl(x): + return x * 5.0 + x = np.ones((2, 2, 2, 2, 2, 15)) + self.check(test_impl, x) + + def test_tuple_arg(self): + def test_impl(x, sz): + for i in numba.pndindex(sz): + x[i] = 1 + return x + sz = (10, 5) + self.check(test_impl, np.empty(sz), sz) + + def test_tuple_arg_not_whole_array(self): + def test_impl(x, sz): + for i in numba.pndindex(sz): + x[i] = 1 + return x + sz = (10, 5) + self.check(test_impl, np.zeros(sz), (10, 3)) + + def test_tuple_for_pndindex(self): + def test_impl(x): + sz = (10, 5) + for i in numba.pndindex(sz): + x[i] = 1 + return x + sz = (10, 5) + self.check(test_impl, np.zeros(sz)) + + def test_tuple_arg_literal(self): + def test_impl(x, first): + sz = (first, 5) + for i in numba.pndindex(sz): + x[i] = 1 + return x + sz = (10, 5) + self.check(test_impl, np.zeros(sz), 10) + + def test_tuple_of_literal_nonliteral(self): + # This test has to be done manually as the self.check uses + # compile_isolated and one function cannot "see" the other + + def test_impl(x, sz): + for i in numba.pndindex(sz): + x[i] = 1 + return x + + def call(x, fn): + return fn(x, (10, 3)) # Only want to iterate to the 3rd + + get_input = lambda: np.zeros((10, 10)) + expected = call(get_input(), test_impl) + + def check(dec): + f1 = dec(test_impl) + f2 = njit(call) # no parallel semantics in the caller + got = f2(get_input(), f1) + self.assertPreciseEqual(expected, got) + + for d in (njit, njit(parallel=True)): + check(d) + + def test_tuple_arg_1d(self): + def test_impl(x, sz): + for i in numba.pndindex(sz): + x[i] = 1 + return x + sz = (10,) + self.check(test_impl, np.zeros(sz), sz) + + def test_tuple_arg_1d_literal(self): + def test_impl(x): + sz = (10,) + for i in numba.pndindex(sz): + x[i] = 1 + return x + sz = (10,) + self.check(test_impl, np.zeros(sz)) + + def test_int_arg_pndindex(self): + def test_impl(x, sz): + for i in numba.pndindex(sz): + x[i] = 1 + return x + self.check(test_impl, np.zeros((10, 10)), 3) + + def test_prange_unknown_call1(self): + @register_jitable + def issue7854_proc(u, i, even, size): + for j in range((even + i + 1) % 2 + 1, size - 1, 2): + u[i, j] = u[i + 1, j] + 1 + + # issue7854 + # Forbid fusion in unanalyzable call inside prange. + def test_impl(u, size): + for i in numba.prange(1, size - 1): + issue7854_proc(u, i, 0, size) + for i in numba.prange(1, size - 1): + issue7854_proc(u, i, 1, size) + return u + + size = 4 + u = np.zeros((size, size)) + cptypes = (numba.float64[:, ::1], types.int64) + self.assertEqual(countParfors(test_impl, cptypes), 2) + self.check(test_impl, u, size) + + def test_prange_index_calc1(self): + # Should forbid fusion due to cross-iteration dependency as + # detected by loop index calcuation (i+1) as array index. + def test_impl(u, size): + for i in numba.prange(1, size - 1): + for j in range((i + 1) % 2 + 1, size - 1, 2): + u[i, j] = u[i + 1, j] + 1 + for i in numba.prange(1, size - 1): + for j in range(i % 2 + 1, size - 1, 2): + u[i, j] = u[i + 1, j] + 1 + return u + + size = 4 + u = np.zeros((size, size)) + cptypes = (numba.float64[:, ::1], types.int64) + self.assertEqual(countParfors(test_impl, cptypes), 2) + self.check(test_impl, u, size) + + def test_prange_reverse_order1(self): + # Testing if reversed loop index usage as array index + # prevents fusion. + def test_impl(a, b, size): + for i in numba.prange(size): + for j in range(size): + a[i, j] = b[i, j] + 1 + for i in numba.prange(size): + for j in range(size): + b[j, i] = 3 + return a[0, 0] + b[0, 0] + + size = 10 + a = np.zeros((size, size)) + b = np.zeros((size, size)) + cptypes = (numba.float64[:, ::1], numba.float64[:, ::1], types.int64) + self.assertEqual(countParfors(test_impl, cptypes), 2) + self.check(test_impl, a, b, size) + + def test_prange_parfor_index_then_not(self): + # Testing if accessing an array first with a parfor index then + # without will prevent fusion. + def test_impl(a, size): + b = 0 + for i in numba.prange(size): + a[i] = i + for i in numba.prange(size): + b += a[5] + return b + + size = 10 + a = np.zeros(size) + cptypes = (numba.float64[:], types.int64) + self.assertEqual(countParfors(test_impl, cptypes), 2) + self.check(test_impl, a, size) + + def test_prange_parfor_index_const_tuple_fusion(self): + # Testing if accessing a tuple with prange index + # and later with a constant will not prevent fusion. + def test_impl(a, tup, size): + acc = 0 + for i in numba.prange(size): + a[i] = i + tup[i] + for i in numba.prange(size): + acc += a[i] + tup[1] + return acc + + size = 10 + a = np.zeros(size) + b = tuple(a) + cptypes = (numba.float64[:], + types.containers.UniTuple(types.float64, size), + types.intp) + self.assertEqual(countParfors(test_impl, cptypes), 1) + self.check(test_impl, a, b, size) + + def test_prange_non_parfor_index_then_opposite(self): + # Testing if accessing an array first without a parfor index then + # with will prevent fusion. + def test_impl(a, b, size): + for i in numba.prange(size): + b[i] = a[5] + for i in numba.prange(size): + a[i] = i + # Need this to stop previous prange from being optimized away. + b[0] += a[0] + return b + + size = 10 + a = np.zeros(size) + b = np.zeros(size) + cptypes = (numba.float64[:], numba.float64[:], types.int64) + self.assertEqual(countParfors(test_impl, cptypes), 2) + self.check(test_impl, a, b, size) + + def test_prange_optional(self): + def test_impl(arr, pred=None): + for i in prange(1): + if pred is not None: + arr[i] = 0.0 + + arr = np.ones(10) + self.check(test_impl, arr, None, + check_arg_equality=[np.testing.assert_almost_equal, + lambda x, y: x == y]) + self.assertEqual(arr.sum(), 10.0) + + def test_untraced_value_tuple(self): + # This is a test for issue #6478. + def test_impl(): + a = (1.2, 1.3) + return a[0] + + with self.assertRaises(AssertionError) as raises: + self.check(test_impl) + self.assertIn("\'@do_scheduling\' not found", str(raises.exception)) + + def test_recursive_untraced_value_tuple(self): + # This is a test for issue #6478. + def test_impl(): + a = ((1.2, 1.3),) + return a[0][0] + + with self.assertRaises(AssertionError) as raises: + self.check(test_impl) + self.assertIn("\'@do_scheduling\' not found", str(raises.exception)) + + def test_untraced_value_parfor(self): + # This is a test for issue #6478. + def test_impl(arr): + a = (1.2, 1.3) + n1 = len(arr) + arr2 = np.empty(n1, np.float64) + for i in prange(n1): + arr2[i] = arr[i] * a[0] + n2 = len(arr2) + arr3 = np.empty(n2, np.float64) + for j in prange(n2): + arr3[j] = arr2[j] - a[1] + total = 0.0 + n3 = len(arr3) + for k in prange(n3): + total += arr3[k] + return total + a[0] + + arg = (types.Array(types.int64, 1, 'C'), ) + self.assertEqual(countParfors(test_impl, arg), 1) + + arr = np.arange(10, dtype=np.int64) + self.check(test_impl, arr) + + def test_setitem_2d_one_replaced(self): + # issue7843 + def test_impl(x): + count = 0 + for n in range(x.shape[0]): + # Useless "if" necessary to trigger bug. + if n: + n + x[count, :] = 1 + count += 1 + return x + + self.check(test_impl, np.zeros((3, 1))) + + def test_1array_control_flow(self): + # issue8146 + def test_impl(arr, flag1, flag2): + inv = np.arange(arr.size) + if flag1: + return inv.astype(np.float64) + if flag2: + ret = inv[inv] + else: + ret = inv[inv - 1] + return ret / arr.size + + arr = np.arange(100) + self.check(test_impl, arr, True, False) + self.check(test_impl, arr, True, True) + self.check(test_impl, arr, False, False) + + def test_2array_1_control_flow(self): + # issue8146 + def test_impl(arr, l, flag): + inv1 = np.arange(arr.size) + inv2 = np.arange(l, arr.size + l) + if flag: + ret = inv1[inv1] + else: + ret = inv1[inv1 - 1] + return ret / inv2 + + arr = np.arange(100) + self.check(test_impl, arr, 10, True) + self.check(test_impl, arr, 10, False) + + def test_2array_2_control_flow(self): + # issue8146 + def test_impl(arr, l, flag): + inv1 = np.arange(arr.size) + inv2 = np.arange(l, arr.size + l) + if flag: + ret1 = inv1[inv1] + ret2 = inv2[inv1] + else: + ret1 = inv1[inv1 - 1] + ret2 = inv2[inv1 - 1] + return ret1 / ret2 + + arr = np.arange(100) + self.check(test_impl, arr, 10, True) + self.check(test_impl, arr, 10, False) + + def test_issue8515(self): + # issue8515: an array is filled in the first prange and + # then accessed with c[i - 1] in the next prange which + # should prevent fusion with the previous prange. + def test_impl(n): + r = np.zeros(n, dtype=np.intp) + c = np.zeros(n, dtype=np.intp) + for i in prange(n): + for j in range(i): + c[i] += 1 + + for i in prange(n): + if i == 0: + continue + r[i] = c[i] - c[i - 1] + return r[1:] + + self.check(test_impl, 15) + self.assertEqual(countParfors(test_impl, (types.int64, )), 2) + + def test_issue9029(self): + # issue9029: too many parfors executed in one function + # overflowed the stack. + def test_impl(i1, i2): + N = 30 + S = 3 + a = np.empty((N,N)) + # The stack should overflow if there are 30*30*2 (# of parfors) + # iterations. + for y in range(N): + for x in range(N): + values = np.ones(S) + v = values[0] + + p2 = np.empty(S) + for i in prange(i1, i2): + p2[i] = 1 + j = p2[0] + + a[y,x] = v + j + return a + + # We pass in 0 and 3 so that the function can't analyze the loop + # bounds on the prange to generate a signed loop whereas the + # np.ones will be an unsigned loop. + self.check(test_impl, 0, 3) + + def test_fusion_no_side_effects(self): + def test_impl(a, b): + X = np.ones(100) + b = math.ceil(b) + Y = np.ones(100) + c = int(max(a, b)) + return X + Y + c + self.check(test_impl, 3.7, 4.3) + self.assertEqual(countParfors(test_impl, (types.float64, types.float64)), 1) + + def test_issue9256_lower_sroa_conflict(self): + @njit(parallel=True) + def def_in_loop(x): + c = 0 + set_num_threads(1) + for i in prange(x): + c = i + return c + + self.assertEqual(def_in_loop(10), def_in_loop.py_func(10)) + + def test_issue9256_lower_sroa_conflict_variant1(self): + def def_in_loop(x): + c = x + set_num_threads(1) + for _i in prange(x): + if c: # forces 3 SSA versions + d = x + 4 + return c, d > 0 + + expected = def_in_loop(4) + self.assertEqual(expected, njit(parallel=False)(def_in_loop)(4)) + self.assertEqual(expected, njit(parallel=True)(def_in_loop)(4)) + + def test_issue9256_lower_sroa_conflict_variant2(self): + def def_in_loop(x): + c = x + set_num_threads(1) + for _i in prange(x): + if c: + for _j in range(x): # forces 4 SSA versions + d = x + 4 + return c, d > 0 + + expected = def_in_loop(4) + self.assertEqual(expected, njit(parallel=False)(def_in_loop)(4)) + self.assertEqual(expected, njit(parallel=True)(def_in_loop)(4)) + + @needs_lapack # use of np.linalg.solve + @skip_ppc64le_invalid_ctr_loop + def test_issue9490_non_det_ssa_problem(self): + # Test modified to include https://github.com/numba/numba/issues/9581 + # which is an issue with hoisting + cmd = [ + sys.executable, + "-m", + "numba.tests.parfor_iss9490_usecase", + ] + envs = { + **os.environ, + # Reproducer consistently fail with the following hashseed. + "PYTHONHASHSEED": "1", + # See https://github.com/numba/numba/issues/9501 + # for details of why num-thread pinning is needed. + "NUMBA_NUM_THREADS": "1", + } + try: + subp.check_output(cmd, env=envs, + stderr=subp.STDOUT, + encoding='utf-8') + except subp.CalledProcessError as e: + msg = f"subprocess failed with output:\n{e.output}" + self.fail(msg=msg) + + +@skip_parfors_unsupported +class TestParforsLeaks(MemoryLeakMixin, TestParforsBase): + def check(self, pyfunc, *args, **kwargs): + cfunc, cpfunc = self.compile_all(pyfunc, *args) + self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs) + + def test_reduction(self): + # issue4299 + def test_impl(arr): + return arr.sum() + + arr = np.arange(10).astype(np.float64) + self.check(test_impl, arr) + + def test_multiple_reduction_vars(self): + + def test_impl(arr): + a = 0. + b = 1. + for i in prange(arr.size): + a += arr[i] + b += 1. / (arr[i] + 1) + return a * b + arr = np.arange(10).astype(np.float64) + self.check(test_impl, arr) + + +@skip_parfors_unsupported +class TestParforsSlice(TestParforsBase): + + def test_parfor_slice1(self): + def test_impl(a): + (n,) = a.shape + b = a[0:n-2] + a[1:n-1] + return b + + self.check(test_impl, np.ones(10)) + + def test_parfor_slice2(self): + def test_impl(a, m): + (n,) = a.shape + b = a[0:n-2] + a[1:m] + return b + + # runtime assertion should succeed + self.check(test_impl, np.ones(10), 9) + # next we expect failure + with self.assertRaises(AssertionError) as raises: + njit(parallel=True)(test_impl)(np.ones(10),10) + self.assertIn("do not match", str(raises.exception)) + + def test_parfor_slice3(self): + def test_impl(a): + (m,n) = a.shape + b = a[0:m-1,0:n-1] + a[1:m,1:n] + return b + + self.check(test_impl, np.ones((4,3))) + + def test_parfor_slice4(self): + def test_impl(a): + (m,n) = a.shape + b = a[:,0:n-1] + a[:,1:n] + return b + + self.check(test_impl, np.ones((4,3))) + + def test_parfor_slice5(self): + def test_impl(a): + (m,n) = a.shape + b = a[0:m-1,:] + a[1:m,:] + return b + + self.check(test_impl, np.ones((4,3))) + + def test_parfor_slice6(self): + def test_impl(a): + b = a.transpose() + c = a[1,:] + b[:,1] + return c + + self.check(test_impl, np.ones((4,3))) + + def test_parfor_slice7(self): + def test_impl(a): + b = a.transpose() + c = a[1,:] + b[1,:] + return c + + # runtime check should succeed + self.check(test_impl, np.ones((3,3))) + # next we expect failure + with self.assertRaises(AssertionError) as raises: + njit(parallel=True)(test_impl)(np.ones((3,4))) + self.assertIn("do not match", str(raises.exception)) + + @disabled_test + def test_parfor_slice8(self): + def test_impl(a): + (m,n) = a.shape + b = a.transpose() + b[1:m,1:n] = a[1:m,1:n] + return b + + self.check(test_impl, np.arange(9).reshape((3,3))) + + @disabled_test + def test_parfor_slice9(self): + def test_impl(a): + (m,n) = a.shape + b = a.transpose() + b[1:n,1:m] = a[:,1:m] + return b + + self.check(test_impl, np.arange(12).reshape((3,4))) + + @disabled_test + def test_parfor_slice10(self): + def test_impl(a): + (m,n) = a.shape + b = a.transpose() + b[2,1:m] = a[2,1:m] + return b + + self.check(test_impl, np.arange(9).reshape((3,3))) + + def test_parfor_slice11(self): + def test_impl(a): + (m,n,l) = a.shape + b = a.copy() + b[:,1,1:l] = a[:,2,1:l] + return b + + self.check(test_impl, np.arange(27).reshape((3,3,3))) + + def test_parfor_slice12(self): + def test_impl(a): + (m,n) = a.shape + b = a.copy() + b[1,1:-1] = a[0,:-2] + return b + + self.check(test_impl, np.arange(12).reshape((3,4))) + + def test_parfor_slice13(self): + def test_impl(a): + (m,n) = a.shape + b = a.copy() + c = -1 + b[1,1:c] = a[0,-n:c-1] + return b + + self.check(test_impl, np.arange(12).reshape((3,4))) + + def test_parfor_slice14(self): + def test_impl(a): + (m,n) = a.shape + b = a.copy() + b[1,:-1] = a[0,-3:4] + return b + + self.check(test_impl, np.arange(12).reshape((3,4))) + + def test_parfor_slice15(self): + def test_impl(a): + (m,n) = a.shape + b = a.copy() + b[1,-(n-1):] = a[0,-3:4] + return b + + self.check(test_impl, np.arange(12).reshape((3,4))) + + @disabled_test + def test_parfor_slice16(self): + """ This test is disabled because if n is larger than the array size + then n and n-1 will both be the end of the array and thus the + slices will in fact be of different sizes and unable to fuse. + """ + def test_impl(a, b, n): + assert(a.shape == b.shape) + a[1:n] = 10 + b[0:(n-1)] = 10 + return a * b + + self.check(test_impl, np.ones(10), np.zeros(10), 8) + args = (numba.float64[:], numba.float64[:], numba.int64) + self.assertEqual(countParfors(test_impl, args), 2) + + def test_parfor_slice17(self): + def test_impl(m, A): + B = np.zeros(m) + n = len(A) + B[-n:] = A + return B + + self.check(test_impl, 10, np.ones(10)) + + def test_parfor_slice18(self): + # issue 3534 + def test_impl(): + a = np.zeros(10) + a[1:8] = np.arange(0, 7) + y = a[3] + return y + + self.check(test_impl) + + def test_parfor_slice19(self): + # issues #3561 and #3554, empty slice binop + def test_impl(X): + X[:0] += 1 + return X + + self.check(test_impl, np.ones(10)) + + def test_parfor_slice20(self): + # issue #4075, slice size + def test_impl(): + a = np.ones(10) + c = a[1:] + s = len(c) + return s + + self.check(test_impl, check_scheduling=False) + + def test_parfor_slice21(self): + def test_impl(x1, x2): + x1 = x1.reshape(x1.size, 1) + x2 = x2.reshape(x2.size, 1) + return x1 >= x2[:-1, :] + + x1 = np.random.rand(5) + x2 = np.random.rand(6) + self.check(test_impl, x1, x2) + + def test_parfor_slice22(self): + def test_impl(x1, x2): + b = np.zeros((10,)) + for i in prange(1): + b += x1[:, x2] + return b + + x1 = np.zeros((10,7)) + x2 = np.array(4) + self.check(test_impl, x1, x2) + + def test_parfor_slice23(self): + # issue #4630 + def test_impl(x): + x[:0] = 2 + return x + + self.check(test_impl, np.ones(10)) + + def test_parfor_slice24(self): + def test_impl(m, A, n): + B = np.zeros(m) + C = B[n:] + C = A[:len(C)] + return B + + for i in range(-15, 15): + self.check(test_impl, 10, np.ones(10), i) + + def test_parfor_slice25(self): + def test_impl(m, A, n): + B = np.zeros(m) + C = B[:n] + C = A[:len(C)] + return B + + for i in range(-15, 15): + self.check(test_impl, 10, np.ones(10), i) + + def test_parfor_slice26(self): + def test_impl(a): + (n,) = a.shape + b = a.copy() + b[-(n-1):] = a[-3:4] + return b + + self.check(test_impl, np.arange(4)) + + def test_parfor_slice27(self): + # issue5601: tests array analysis of the slice with + # n_valid_vals of unknown size. + def test_impl(a): + n_valid_vals = 0 + + for i in prange(a.shape[0]): + if a[i] != 0: + n_valid_vals += 1 + + if n_valid_vals: + unused = a[:n_valid_vals] + + return 0 + + self.check(test_impl, np.arange(3)) + + def test_parfor_array_access_lower_slice(self): + for ts in [slice(1, 3, None), slice(2, None, None), slice(None, 2, -1), + slice(None, None, None), slice(None, None, -2)]: + + def test_impl(n): + X = np.arange(n * 4).reshape((n, 4)) + y = 0 + for i in numba.prange(n): + y += X[i, ts].sum() + return y + + n = 10 + self.check(test_impl, n) + + X = np.arange(n * 4).reshape((n, 4)) + + def test_impl(X): + y = 0 + for i in numba.prange(X.shape[0]): + y += X[i, ts].sum() + return y + + self.check(test_impl, X) + + +@skip_parfors_unsupported +class TestParforsOptions(TestParforsBase): + + def test_parfor_options(self): + def test_impl(a): + n = a.shape[0] + b = np.ones(n) + c = np.array([ i for i in range(n) ]) + b[:n] = a + b * c + for i in prange(n): + c[i] = b[i] * a[i] + return reduce(lambda x,y:x+y, c, 0) + + self.check(test_impl, np.ones(10)) + args = (numba.float64[:],) + # everything should fuse with default option + self.assertEqual(countParfors(test_impl, args), 1) + # with no fusion + self.assertEqual(countParfors(test_impl, args, fusion=False), 6) + # with no fusion, comprehension + self.assertEqual(countParfors(test_impl, args, fusion=False, + comprehension=False), 5) + #with no fusion, comprehension, setitem + self.assertEqual(countParfors(test_impl, args, fusion=False, + comprehension=False, setitem=False), 4) + # with no fusion, comprehension, prange + self.assertEqual(countParfors(test_impl, args, fusion=False, + comprehension=False, setitem=False, prange=False), 3) + # with no fusion, comprehension, prange, reduction + self.assertEqual(countParfors(test_impl, args, fusion=False, + comprehension=False, setitem=False, prange=False, + reduction=False), 2) + # with no fusion, comprehension, prange, reduction, numpy + self.assertEqual(countParfors(test_impl, args, fusion=False, + comprehension=False, setitem=False, prange=False, + reduction=False, numpy=False), 0) + + +@skip_parfors_unsupported +class TestParforsBitMask(TestParforsBase): + + def test_parfor_bitmask1(self): + def test_impl(a, n): + b = a > n + a[b] = 0 + return a + + self.check(test_impl, np.arange(10), 5) + + def test_parfor_bitmask2(self): + def test_impl(a, b): + a[b] = 0 + return a + + a = np.arange(10) + b = a > 5 + self.check(test_impl, a, b) + + def test_parfor_bitmask3(self): + def test_impl(a, b): + a[b] = a[b] + return a + + a = np.arange(10) + b = a > 5 + self.check(test_impl, a, b) + + def test_parfor_bitmask4(self): + def test_impl(a, b): + a[b] = (2 * a)[b] + return a + + a = np.arange(10) + b = a > 5 + self.check(test_impl, a, b) + + def test_parfor_bitmask5(self): + def test_impl(a, b): + a[b] = a[b] * a[b] + return a + + a = np.arange(10) + b = a > 5 + self.check(test_impl, a, b) + + def test_parfor_bitmask6(self): + def test_impl(a, b, c): + a[b] = c + return a + + a = np.arange(10) + b = a > 5 + c = np.zeros(sum(b)) + + # expect failure due to lack of parallelism + with self.assertRaises(AssertionError) as raises: + self.check(test_impl, a, b, c) + self.assertIn("\'@do_scheduling\' not found", str(raises.exception)) + + +@skip_parfors_unsupported +class TestParforsMisc(TestParforsBase): + """ + Tests miscellaneous parts of ParallelAccelerator use. + """ + def test_no_warn_if_cache_set(self): + + def pyfunc(): + arr = np.ones(100) + for i in prange(arr.size): + arr[i] += i + return arr + + cfunc = njit(parallel=True, cache=True)(pyfunc) + + with warnings.catch_warnings(record=True) as raised_warnings: + warnings.simplefilter('always') + warnings.filterwarnings(action="ignore", + module="typeguard") + # Filter out warnings about TBB interface mismatch + warnings.filterwarnings(action='ignore', + message=r".*TBB_INTERFACE_VERSION.*", + category=numba.errors.NumbaWarning, + module=r'numba\.np\.ufunc\.parallel.*') + cfunc() + + self.assertEqual(len(raised_warnings), 0) + + # Make sure the dynamic globals flag is set + has_dynamic_globals = [cres.library.has_dynamic_globals + for cres in cfunc.overloads.values()] + self.assertEqual(has_dynamic_globals, [False]) + + def test_statement_reordering_respects_aliasing(self): + def impl(): + a = np.zeros(10) + a[1:8] = np.arange(0, 7) + print('a[3]:', a[3]) + print('a[3]:', a[3]) + return a + + cres = self.compile_parallel(impl, ()) + with captured_stdout() as stdout: + cres.entry_point() + for line in stdout.getvalue().splitlines(): + self.assertEqual('a[3]: 2.0', line) + + def test_parfor_ufunc_typing(self): + def test_impl(A): + return np.isinf(A) + + A = np.array([np.inf, 0.0]) + cfunc = njit(parallel=True)(test_impl) + # save global state + old_seq_flag = numba.parfors.parfor.sequential_parfor_lowering + try: + numba.parfors.parfor.sequential_parfor_lowering = True + np.testing.assert_array_equal(test_impl(A), cfunc(A)) + finally: + # recover global state + numba.parfors.parfor.sequential_parfor_lowering = old_seq_flag + + def test_init_block_dce(self): + # issue4690 + def test_impl(): + res = 0 + arr = [1,2,3,4,5] + numba.parfors.parfor.init_prange() + dummy = arr + for i in numba.prange(5): + res += arr[i] + return res + dummy[2] + + self.assertEqual(get_init_block_size(test_impl, ()), 0) + + def test_alias_analysis_for_parfor1(self): + def test_impl(): + acc = 0 + for _ in range(4): + acc += 1 + + data = np.zeros((acc,)) + return data + + self.check(test_impl) + + def test_no_state_change_in_gufunc_lowering_on_error(self): + # tests #5098, if there's an exception arising in gufunc lowering the + # sequential_parfor_lowering global variable should remain as False on + # stack unwind. + + BROKEN_MSG = 'BROKEN_MSG' + + @register_pass(mutates_CFG=True, analysis_only=False) + class BreakParfors(AnalysisPass): + _name = "break_parfors" + + def __init__(self): + AnalysisPass.__init__(self) + + def run_pass(self, state): + for blk in state.func_ir.blocks.values(): + for stmt in blk.body: + if isinstance(stmt, numba.parfors.parfor.Parfor): + # races should be a set(), that list is iterable + # permits it to get through to the + # _create_gufunc_for_parfor_body routine at which + # point it needs to be a set so e.g. set.difference + # can be computed, this therefore creates an error + # in the right location. + class Broken(list): + + def difference(self, other): + raise errors.LoweringError(BROKEN_MSG) + + stmt.races = Broken() + return True + + + class BreakParforsCompiler(CompilerBase): + + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(BreakParfors, IRLegalization) + pm.finalize() + return [pm] + + + @njit(parallel=True, pipeline_class=BreakParforsCompiler) + def foo(): + x = 1 + for _ in prange(1): + x += 1 + return x + + # assert default state for global + self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering) + + with self.assertRaises(errors.LoweringError) as raises: + foo() + + self.assertIn(BROKEN_MSG, str(raises.exception)) + + # assert state has not changed + self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering) + + def test_issue_5098(self): + class DummyType(types.Opaque): + pass + + dummy_type = DummyType("my_dummy") + register_model(DummyType)(models.OpaqueModel) + + class Dummy(object): + pass + + @typeof_impl.register(Dummy) + def typeof_Dummy(val, c): + return dummy_type + + @unbox(DummyType) + def unbox_index(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + @overload_method(DummyType, "method1", jit_options={"parallel":True}) + def _get_method1(obj, arr, func): + def _foo(obj, arr, func): + def baz(a, f): + c = a.copy() + c[np.isinf(a)] = np.nan + return f(c) + + length = len(arr) + output_arr = np.empty(length, dtype=np.float64) + for i in prange(length): + output_arr[i] = baz(arr[i], func) + for i in prange(length - 1): + output_arr[i] += baz(arr[i], func) + return output_arr + return _foo + + @njit + def bar(v): + return v.mean() + + @njit + def test1(d): + return d.method1(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), bar) + + save_state = numba.parfors.parfor.sequential_parfor_lowering + self.assertFalse(save_state) + try: + test1(Dummy()) + self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering) + finally: + # always set the sequential_parfor_lowering state back to the + # original state + numba.parfors.parfor.sequential_parfor_lowering = save_state + + def test_oversized_tuple_as_arg_to_kernel(self): + + @njit(parallel=True) + def oversize_tuple(idx): + big_tup = (1,2,3,4) + z = 0 + for x in prange(10): + z += big_tup[idx] + return z + + with override_env_config('NUMBA_PARFOR_MAX_TUPLE_SIZE', '3'): + with self.assertRaises(errors.UnsupportedParforsError) as raises: + oversize_tuple(0) + + errstr = str(raises.exception) + self.assertIn("Use of a tuple", errstr) + self.assertIn("in a parallel region", errstr) + + def test_issue5167(self): + + def ndvi_njit(img_nir, img_red): + fillvalue = 0 + out_img = np.full(img_nir.shape, fillvalue, dtype=img_nir.dtype) + dims = img_nir.shape + for y in prange(dims[0]): + for x in prange(dims[1]): + out_img[y, x] = ((img_nir[y, x] - img_red[y, x]) / + (img_nir[y, x] + img_red[y, x])) + return out_img + + tile_shape = (4, 4) + array1 = np.random.uniform(low=1.0, high=10000.0, size=tile_shape) + array2 = np.random.uniform(low=1.0, high=10000.0, size=tile_shape) + self.check(ndvi_njit, array1, array2) + + def test_issue5065(self): + + def reproducer(a, dist, dist_args): + result = np.zeros((a.shape[0], a.shape[0]), dtype=np.float32) + for i in prange(a.shape[0]): + for j in range(i + 1, a.shape[0]): + d = dist(a[i], a[j], *dist_args) + result[i, j] = d + result[j, i] = d + return result + + @njit + def euclidean(x, y): + result = 0.0 + for i in range(x.shape[0]): + result += (x[i] - y[i]) ** 2 + return np.sqrt(result) + + a = np.random.random(size=(5, 2)) + + got = njit(parallel=True)(reproducer)(a.copy(), euclidean,()) + expected = reproducer(a.copy(), euclidean,()) + + np.testing.assert_allclose(got, expected) + + def test_issue5001(self): + + def test_numba_parallel(myarray): + result = [0] * len(myarray) + for i in prange(len(myarray)): + result[i] = len(myarray[i]) + return result + + myarray = (np.empty(100),np.empty(50)) + self.check(test_numba_parallel, myarray) + + def test_issue3169(self): + + @njit + def foo(grids): + pass + + @njit(parallel=True) + def bar(grids): + for x in prange(1): + foo(grids) + + # returns nothing, just check it compiles + bar(([1],) * 2) + + @disabled_test + def test_issue4846(self): + + mytype = namedtuple("mytype", ("a", "b")) + + def outer(mydata): + for k in prange(3): + inner(k, mydata) + return mydata.a + + @njit(nogil=True) + def inner(k, mydata): + f = (k, mydata.a) + g = (k, mydata.b) + + mydata = mytype(a="a", b="b") + + self.check(outer, mydata) + + def test_issue3748(self): + + def test1b(): + x = (1, 2, 3, 4, 5) + a = 0 + for i in prange(len(x)): + a += x[i] + return a + + self.check(test1b,) + + def test_issue5277(self): + + def parallel_test(size, arr): + for x in prange(size[0]): + for y in prange(size[1]): + arr[y][x] = x * 4.5 + y + return arr + + size = (10, 10) + arr = np.zeros(size, dtype=int) + + self.check(parallel_test, size, arr) + + def test_issue5570_ssa_races(self): + @njit(parallel=True) + def foo(src, method, out): + for i in prange(1): + for j in range(1): + out[i, j] = 1 + if method: + out += 1 + return out + + src = np.zeros((5,5)) + method = 57 + out = np.zeros((2, 2)) + + self.assertPreciseEqual( + foo(src, method, out), + foo.py_func(src, method, out) + ) + + def test_issue6095_numpy_max(self): + @njit(parallel=True) + def find_maxima_3D_jit(args): + package = args + for index in range(0, 10): + z_stack = package[index, :, :] + return np.max(z_stack) + + np.random.seed(0) + args = np.random.random((10, 10, 10)) + self.assertPreciseEqual( + find_maxima_3D_jit(args), + find_maxima_3D_jit.py_func(args), + ) + + def test_issue5942_1(self): + # issue5942: tests statement reordering of + # aliased arguments. + def test_impl(gg, gg_next): + gs = gg.shape + d = gs[0] + for i_gg in prange(d): + gg_next[i_gg, :] = gg[i_gg, :] + gg_next[i_gg, 0] += 1 + + return gg_next + + d = 4 + k = 2 + + gg = np.zeros((d, k), dtype = np.int32) + gg_next = np.zeros((d, k), dtype = np.int32) + self.check(test_impl, gg, gg_next) + + def test_issue5942_2(self): + # issue5942: tests statement reordering + def test_impl(d, k): + gg = np.zeros((d, k), dtype = np.int32) + gg_next = np.zeros((d, k), dtype = np.int32) + + for i_gg in prange(d): + for n in range(k): + gg[i_gg, n] = i_gg + gg_next[i_gg, :] = gg[i_gg, :] + gg_next[i_gg, 0] += 1 + + return gg_next + + d = 4 + k = 2 + + self.check(test_impl, d, k) + + @skip_unless_scipy + def test_issue6102(self): + # The problem is originally observed on Python3.8 because of the + # changes in how loops are represented in 3.8 bytecode. + @njit(parallel=True) + def f(r): + for ir in prange(r.shape[0]): + dist = np.inf + tr = np.array([0, 0, 0], dtype=np.float32) + for i in [1, 0, -1]: + dist_t = np.linalg.norm(r[ir, :] + i) + if dist_t < dist: + dist = dist_t + tr = np.array([i, i, i], dtype=np.float32) + r[ir, :] += tr + return r + + r = np.array([[0., 0., 0.], [0., 0., 1.]]) + self.assertPreciseEqual(f(r), f.py_func(r)) + + def test_issue6774(self): + + def test_impl(): + n = 5 + na_mask = np.ones((n,)) + result = np.empty((n - 1,)) + for i in prange(len(result)): + result[i] = np.sum(na_mask[i:i + 1]) + return result + + self.check(test_impl) + + def test_issue4963_globals(self): + def test_impl(): + buf = np.zeros((_GLOBAL_INT_FOR_TESTING1, _GLOBAL_INT_FOR_TESTING2)) + return buf + self.check(test_impl) + + def test_issue4963_freevars(self): + _FREEVAR_INT_FOR_TESTING1 = 17 + _FREEVAR_INT_FOR_TESTING2 = 5 + def test_impl(): + buf = np.zeros((_FREEVAR_INT_FOR_TESTING1, _FREEVAR_INT_FOR_TESTING2)) + return buf + self.check(test_impl) + + def test_issue_9182_recursion_error(self): + from numba.types import ListType, Tuple, intp + + @numba.njit + def _sink(x): + pass + + + @numba.njit(cache=False, parallel=True) + def _ground_node_rule( + clauses, + nodes, + ): + for piter in prange(len(nodes)): + for clause in clauses: + clause_type = clause[0] + clause_variables = clause[2] + if clause_type == 0: + clause_var_1 = clause_variables[0] + elif len(clause_variables) == 2: + clause_var_1, clause_var_2 = ( + clause_variables[0], + clause_variables[1], + ) + + elif len(clause_variables) == 4: + pass + + if clause_type == 1: + _sink(clause_var_1) + _sink(clause_var_2) + + _ground_node_rule.compile( + ( + ListType(Tuple([intp, intp, ListType(intp)])), + ListType(intp), + ) + ) + + def test_lookup_cycle_detection(self): + # This test is added due to a bug discovered in the PR 9244 patch. + # The cyclic detection was incorrectly flagging cycles. + @njit(parallel=True) + def foo(): + # The following `acc` variable is used in the `lookup()` function + # in parfor's reduction code. + acc = 0 + for n in prange(1): + for i in range(1): + for j in range(1): + acc += 1 + return acc + + self.assertEqual(foo(), foo.py_func()) + + def test_issue_9678_build_map(self): + def issue_9678(num_nodes): + out = 0 + for inode_uint in numba.prange(num_nodes): + inode = numba.int64(inode_uint) + p = {inode: 0.0} # mainly this build_map bytecode here + for _ in range(5): + p[inode] += 1 # and here + out += p[inode] + return out + + num_nodes = 12 + issue_9678_serial = numba.jit(parallel=False)(issue_9678) + issue_9678_parallel = numba.jit(parallel=True)(issue_9678) + self.assertEqual(issue_9678_serial(num_nodes), + issue_9678_parallel(num_nodes)) + + +@skip_parfors_unsupported +class TestParforsDiagnostics(TestParforsBase): + + def check(self, pyfunc, *args, **kwargs): + cfunc, cpfunc = self.compile_all(pyfunc, *args) + self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs) + + def assert_fusion_equivalence(self, got, expected): + a = self._fusion_equivalent(got) + b = self._fusion_equivalent(expected) + self.assertEqual(a, b) + + def _fusion_equivalent(self, thing): + # parfors indexes the Parfors class instance id's from wherever the + # internal state happens to be. To assert fusion equivalence we just + # check that the relative difference between fusion adjacency lists + # is the same. For example: + # {3: [2, 1]} is the same as {13: [12, 11]} + # this function strips the indexing etc out returning something suitable + # for checking equivalence + new = defaultdict(list) + min_key = min(thing.keys()) + for k in sorted(thing.keys()): + new[k - min_key] = [x - min_key for x in thing[k]] + return new + + def assert_diagnostics(self, diagnostics, parfors_count=None, + fusion_info=None, nested_fusion_info=None, + replaced_fns=None, hoisted_allocations=None): + if parfors_count is not None: + self.assertEqual(parfors_count, diagnostics.count_parfors()) + if fusion_info is not None: + self.assert_fusion_equivalence(fusion_info, diagnostics.fusion_info) + if nested_fusion_info is not None: + self.assert_fusion_equivalence(nested_fusion_info, + diagnostics.nested_fusion_info) + if replaced_fns is not None: + repl = diagnostics.replaced_fns.values() + for x in replaced_fns: + for replaced in repl: + if replaced[0] == x: + break + else: + msg = "Replacement for %s was not found. Had %s" % (x, repl) + raise AssertionError(msg) + + if hoisted_allocations is not None: + hoisted_allocs = diagnostics.hoisted_allocations() + self.assertEqual(hoisted_allocations, len(hoisted_allocs)) + + # just make sure that the dump() function doesn't have an issue! + with captured_stdout(): + for x in range(1, 5): + diagnostics.dump(x) + + def test_array_expr(self): + def test_impl(): + n = 10 + a = np.ones(n) + b = np.zeros(n) + return a + b + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=1, + fusion_info = {3: [4, 5]}) + + def test_prange(self): + def test_impl(): + n = 10 + a = np.empty(n) + for i in prange(n): + a[i] = i * 10 + return a + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=1) + + def test_user_varname(self): + """make sure original user variable name is used in fusion info + """ + def test_impl(): + n = 10 + x = np.ones(n) + a = np.sin(x) + b = np.cos(a * a) + acc = 0 + for i in prange(n - 2): + for j in prange(n - 1): + acc += b[i] + b[j + 1] + return acc + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + # make sure original 'n' variable name is used in fusion report for loop + # dimension mismatch + self.assertTrue( + any("slice(0, n, 1)" in r.message for r in diagnostics.fusion_reports)) + + def test_nested_prange(self): + def test_impl(): + n = 10 + a = np.empty((n, n)) + for i in prange(n): + for j in prange(n): + a[i, j] = i * 10 + j + return a + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=2, + nested_fusion_info={2: [1]}) + + def test_function_replacement(self): + def test_impl(): + n = 10 + a = np.ones(n) + b = np.argmin(a) + return b + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=1, + fusion_info={2: [3]}, + replaced_fns = [('argmin', 'numpy'),]) + + def test_reduction(self): + def test_impl(): + n = 10 + a = np.ones(n + 1) # prevent fusion + acc = 0 + for i in prange(n): + acc += a[i] + return acc + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=2) + + def test_reduction_binop(self): + def test_impl(): + n = 10 + a = np.ones(n + 1) # prevent fusion + acc = 0 + for i in prange(n): + acc = acc - a[i] + return acc + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=2) + + def test_setitem(self): + def test_impl(): + n = 10 + a = np.ones(n) + a[:] = 7 + return a + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, parfors_count=1) + + def test_allocation_hoisting(self): + def test_impl(): + n = 10 + m = 5 + acc = 0 + for i in prange(n): + temp = np.zeros((m,)) # the np.empty call should get hoisted + for j in range(m): + temp[j] = i + acc += temp[-1] + return acc + + self.check(test_impl,) + cpfunc = self.compile_parallel(test_impl, ()) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + self.assert_diagnostics(diagnostics, hoisted_allocations=1) + + +class TestPrangeBase(TestParforsBase): + + def generate_prange_func(self, pyfunc, patch_instance): + """ + This function does the actual code augmentation to enable the explicit + testing of `prange` calls in place of `range`. + """ + pyfunc_code = pyfunc.__code__ + + prange_names = list(pyfunc_code.co_names) + + if patch_instance is None: + # patch all instances, cheat by just switching + # range for prange + assert 'range' in pyfunc_code.co_names + prange_names = tuple([x if x != 'range' else 'prange' + for x in pyfunc_code.co_names]) + new_code = bytes(pyfunc_code.co_code) + else: + # patch specified instances... + # find where 'range' is in co_names + range_idx = pyfunc_code.co_names.index('range') + range_locations = [] + # look for LOAD_GLOBALs that point to 'range' + for instr in dis.Bytecode(pyfunc_code): + if instr.opname == 'LOAD_GLOBAL': + if _fix_LOAD_GLOBAL_arg(instr.arg) == range_idx: + range_locations.append(instr.offset + 1) + # add in 'prange' ref + prange_names.append('prange') + prange_names = tuple(prange_names) + prange_idx = len(prange_names) - 1 + if utils.PYVERSION in ((3, 11), (3, 12), (3, 13)): + # this is the inverse of _fix_LOAD_GLOBAL_arg + prange_idx = 1 + (prange_idx << 1) + elif utils.PYVERSION in ((3, 10),): + pass + else: + raise NotImplementedError(utils.PYVERSION) + new_code = bytearray(pyfunc_code.co_code) + assert len(patch_instance) <= len(range_locations) + # patch up the new byte code + for i in patch_instance: + idx = range_locations[i] + new_code[idx] = prange_idx + new_code = bytes(new_code) + + # create code object with prange mutation + prange_code = pyfunc_code.replace(co_code=new_code, + co_names=prange_names) + + # get function + pfunc = pytypes.FunctionType(prange_code, globals()) + + return pfunc + + def prange_tester(self, pyfunc, *args, **kwargs): + """ + The `prange` tester + This is a hack. It basically switches out range calls for prange. + It does this by copying the live code object of a function + containing 'range' then copying the .co_names and mutating it so + that 'range' is replaced with 'prange'. It then creates a new code + object containing the mutation and instantiates a function to contain + it. At this point three results are created: + 1. The result of calling the original python function. + 2. The result of calling a njit compiled version of the original + python function. + 3. The result of calling a njit(parallel=True) version of the mutated + function containing `prange`. + The three results are then compared and the `prange` based function's + llvm_ir is inspected to ensure the scheduler code is present. + + Arguments: + pyfunc - the python function to test + args - data arguments to pass to the pyfunc under test + + Keyword Arguments: + patch_instance - iterable containing which instances of `range` to + replace. If not present all instance of `range` are + replaced. + scheduler_type - 'signed', 'unsigned' or None, default is None. + Supply in cases where the presence of a specific + scheduler is to be asserted. + check_fastmath - if True then a check will be performed to ensure the + IR contains instructions labelled with 'fast' + check_fastmath_result - if True then a check will be performed to + ensure the result of running with fastmath + on matches that of the pyfunc + Remaining kwargs are passed to np.testing.assert_almost_equal + + + Example: + def foo(): + acc = 0 + for x in range(5): + for y in range(10): + acc +=1 + return acc + + # calling as + prange_tester(foo) + # will test code equivalent to + # def foo(): + # acc = 0 + # for x in prange(5): # <- changed + # for y in prange(10): # <- changed + # acc +=1 + # return acc + + # calling as + prange_tester(foo, patch_instance=[1]) + # will test code equivalent to + # def foo(): + # acc = 0 + # for x in range(5): # <- outer loop (0) unchanged + # for y in prange(10): # <- inner loop (1) changed + # acc +=1 + # return acc + + """ + patch_instance = kwargs.pop('patch_instance', None) + check_fastmath = kwargs.pop('check_fastmath', False) + check_fastmath_result = kwargs.pop('check_fastmath_result', False) + + pfunc = self.generate_prange_func(pyfunc, patch_instance) + + # Compile functions + # compile a standard njit of the original function + sig = tuple([numba.typeof(x) for x in args]) + cfunc = self.compile_njit(pyfunc, sig) + + # compile the prange injected function + with warnings.catch_warnings(record=True) as raised_warnings: + warnings.simplefilter('always') + cpfunc = self.compile_parallel(pfunc, sig) + + # if check_fastmath is True then check fast instructions + if check_fastmath: + self.assert_fastmath(pfunc, sig) + + # if check_fastmath_result is True then compile a function + # so that the parfors checker can assert the result is ok. + if check_fastmath_result: + fastcpfunc = self.compile_parallel_fastmath(pfunc, sig) + kwargs = dict({'fastmath_pcres': fastcpfunc}, **kwargs) + + self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs) + return raised_warnings + + +@skip_parfors_unsupported +class TestPrangeBasic(TestPrangeBase): + """ Tests Prange """ + + def test_prange01(self): + def test_impl(): + n = 4 + A = np.zeros(n) + for i in range(n): + A[i] = 2.0 * i + return A + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange02(self): + def test_impl(): + n = 4 + A = np.zeros(n - 1) + for i in range(1, n): + A[i - 1] = 2.0 * i + return A + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange03(self): + def test_impl(): + s = 10 + for i in range(10): + s += 2 + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange03mul(self): + def test_impl(): + s = 3 + for i in range(10): + s *= 2 + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange03sub(self): + def test_impl(): + s = 100 + for i in range(10): + s -= 2 + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange03div(self): + def test_impl(): + s = 10 + for i in range(10): + s /= 2 + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange04(self): + def test_impl(): + a = 2 + b = 3 + A = np.empty(4) + for i in range(4): + if i == a: + A[i] = b + else: + A[i] = 0 + return A + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange05(self): + def test_impl(): + n = 4 + A = np.ones((n), dtype=np.float64) + s = 0 + for i in range(1, n - 1, 1): + s += A[i] + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange06(self): + def test_impl(): + n = 4 + A = np.ones((n), dtype=np.float64) + s = 0 + for i in range(1, 1, 1): + s += A[i] + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange07(self): + def test_impl(): + n = 4 + A = np.ones((n), dtype=np.float64) + s = 0 + for i in range(n, 1): + s += A[i] + return s + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange08(self): + def test_impl(): + n = 4 + A = np.ones((n)) + acc = 0 + for i in range(len(A)): + for j in range(len(A)): + acc += A[i] + return acc + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange08_1(self): + def test_impl(): + n = 4 + A = np.ones((n)) + acc = 0 + for i in range(4): + for j in range(4): + acc += A[i] + return acc + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange09(self): + def test_impl(): + n = 4 + acc = 0 + for i in range(n): + for j in range(n): + acc += 1 + return acc + # patch inner loop to 'prange' + self.prange_tester(test_impl, patch_instance=[1], + scheduler_type='unsigned', + check_fastmath=True) + + def test_prange10(self): + def test_impl(): + n = 4 + acc2 = 0 + for j in range(n): + acc1 = 0 + for i in range(n): + acc1 += 1 + acc2 += acc1 + return acc2 + # patch outer loop to 'prange' + self.prange_tester(test_impl, patch_instance=[0], + scheduler_type='unsigned', + check_fastmath=True) + + @unittest.skip("list append is not thread-safe yet (#2391, #2408)") + def test_prange11(self): + def test_impl(): + n = 4 + return [np.sin(j) for j in range(n)] + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange12(self): + def test_impl(): + acc = 0 + n = 4 + X = np.ones(n) + for i in range(-len(X)): + acc += X[i] + return acc + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + + def test_prange13(self): + def test_impl(n): + acc = 0 + for i in range(n): + acc += 1 + return acc + self.prange_tester(test_impl, np.int32(4), scheduler_type='unsigned', + check_fastmath=True) + + def test_prange14(self): + def test_impl(A): + s = 3 + for i in range(len(A)): + s += A[i]*2 + return s + # this tests reduction detection well since the accumulated variable + # is initialized before the parfor and the value accessed from the array + # is updated before accumulation + self.prange_tester(test_impl, np.random.ranf(4), + scheduler_type='unsigned', + check_fastmath=True) + + def test_prange15(self): + # from issue 2587 + # test parfor type inference when there is multi-dimensional indexing + def test_impl(N): + acc = 0 + for i in range(N): + x = np.ones((1, 1)) + acc += x[0, 0] + return acc + self.prange_tester(test_impl, 1024, scheduler_type='unsigned', + check_fastmath=True) + + # Tests for negative ranges + def test_prange16(self): + def test_impl(N): + acc = 0 + for i in range(-N, N): + acc += 2 + return acc + self.prange_tester(test_impl, 1024, scheduler_type='signed', + check_fastmath=True) + + def test_prange17(self): + def test_impl(N): + acc = 0 + X = np.ones(N) + for i in range(-N, N): + acc += X[i] + return acc + self.prange_tester(test_impl, 9, scheduler_type='signed', + check_fastmath=True) + + def test_prange18(self): + def test_impl(N): + acc = 0 + X = np.ones(N) + for i in range(-N, 5): + acc += X[i] + for j in range(-4, N): + acc += X[j] + return acc + self.prange_tester(test_impl, 9, scheduler_type='signed', + check_fastmath=True) + + def test_prange19(self): + def test_impl(N): + acc = 0 + M = N + 4 + X = np.ones((N, M)) + for i in range(-N, N): + for j in range(-M, M): + acc += X[i, j] + return acc + self.prange_tester(test_impl, 9, scheduler_type='signed', + check_fastmath=True) + + def test_prange20(self): + def test_impl(N): + acc = 0 + X = np.ones(N) + for i in range(-1, N): + acc += X[i] + return acc + self.prange_tester(test_impl, 9, scheduler_type='signed', + check_fastmath=True) + + def test_prange21(self): + def test_impl(N): + acc = 0 + for i in range(-3, -1): + acc += 3 + return acc + self.prange_tester(test_impl, 9, scheduler_type='signed', + check_fastmath=True) + + def test_prange22(self): + def test_impl(): + a = 0 + b = 3 + A = np.empty(4) + for i in range(-2, 2): + if i == a: + A[i] = b + elif i < 1: + A[i] = -1 + else: + A[i] = 7 + return A + self.prange_tester(test_impl, scheduler_type='signed', + check_fastmath=True, check_fastmath_result=True) + + def test_prange23(self): + # test non-contig input + def test_impl(A): + for i in range(len(A)): + A[i] = i + return A + A = np.zeros(32)[::2] + self.prange_tester(test_impl, A, scheduler_type='unsigned', + check_fastmath=True, check_fastmath_result=True) + + def test_prange24(self): + # test non-contig input, signed range + def test_impl(A): + for i in range(-len(A), 0): + A[i] = i + return A + A = np.zeros(32)[::2] + self.prange_tester(test_impl, A, scheduler_type='signed', + check_fastmath=True, check_fastmath_result=True) + + def test_prange25(self): + def test_impl(A): + n = len(A) + buf = [np.zeros_like(A) for _ in range(n)] + for i in range(n): + buf[i] = A + i + return buf + A = np.ones((10,)) + self.prange_tester(test_impl, A, patch_instance=[1], + scheduler_type='unsigned', check_fastmath=True, + check_fastmath_result=True) + + cpfunc = self.compile_parallel(test_impl, (numba.typeof(A),)) + diagnostics = cpfunc.metadata['parfor_diagnostics'] + hoisted_allocs = diagnostics.hoisted_allocations() + self.assertEqual(len(hoisted_allocs), 0) + + def test_prange26(self): + def test_impl(A): + B = A[::3] + for i in range(len(B)): + B[i] = i + return A + A = np.zeros(32)[::2] + self.prange_tester(test_impl, A, scheduler_type='unsigned', + check_fastmath=True, check_fastmath_result=True) + + def test_prange27(self): + # issue5597: usedef error in parfor + def test_impl(a, b, c): + for j in range(b[0]-1): + for k in range(2): + z = np.abs(a[c-1:c+1]) + return 0 + + # patch inner loop to 'prange' + self.prange_tester(test_impl, + np.arange(20), + np.asarray([4,4,4,4,4,4,4,4,4,4]), + 0, + patch_instance=[1], + scheduler_type='unsigned', + check_fastmath=True) + + def test_prange28(self): + # issue7105: label conflict in nested parfor + def test_impl(x, y): + out = np.zeros(len(y)) + for idx in range(0, len(y)): + i0 = y[idx, 0] + i1 = y[idx, 1] + Pt1 = x[i0] + Pt2 = x[i1] + v = Pt1 - Pt2 + vl2 = v[0] + v[1] + out[idx] = vl2 + return out + + X = np.array([[-1., -1.], + [-1., 1.], + [ 0., 0.], + [ 1., -1.], + [ 1., 0.], + [ 1., 1.]]) + + Y = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4], + [4, 5]]) + + self.prange_tester(test_impl, X, Y, scheduler_type='unsigned', + check_fastmath=True, check_fastmath_result=True) + + def test_prange29(self): + # issue7630: SSA renaming in prange header + def test_impl(flag): + result = 0 + if flag: + for i in range(1): + result += 1 + else: + for i in range(1): + result -= 3 + return result + + self.prange_tester(test_impl, True) + self.prange_tester(test_impl, False) + + def test_prange30(self): + # issue7675: broadcast setitem + def test_impl(x, par, numthreads): + n_par = par.shape[0] + n_x = len(x) + result = np.zeros((n_par, n_x), dtype=np.float64) + chunklen = (len(x) + numthreads - 1) // numthreads + + for i in range(numthreads): + start = i * chunklen + stop = (i + 1) * chunklen + result[:, start:stop] = x[start:stop] * par[:] + + return result + + x = np.array(np.arange(0, 6, 1.0)) + par = np.array([1.0, 2.0, 3.0]) + + self.prange_tester(test_impl, x, par, 2) + + +@register_jitable +def test_call_hoisting_outcall(a,b): + return (a, b) + + +@skip_parfors_unsupported +class TestPrangeSpecific(TestPrangeBase): + """ Tests specific features/problems found under prange""" + + def test_prange_two_instances_same_reduction_var(self): + # issue4922 - multiple uses of same reduction variable + def test_impl(n): + c = 0 + for i in range(n): + c += 1 + if i > 10: + c += 1 + return c + self.prange_tester(test_impl, 9) + + def test_prange_conflicting_reduction_ops(self): + def test_impl(n): + c = 0 + for i in range(n): + c += 1 + if i > 10: + c *= 1 + return c + + with self.assertRaises(errors.UnsupportedError) as raises: + self.prange_tester(test_impl, 9) + msg = ('Reduction variable c has multiple conflicting reduction ' + 'operators.') + self.assertIn(msg, str(raises.exception)) + + def test_prange_two_conditional_reductions(self): + # issue6414 + def test_impl(): + A = B = 0 + for k in range(1): + if k == 2: + A += 1 + else: + x = np.zeros((1, 1)) + if x[0, 0]: + B += 1 + return A, B + self.prange_tester(test_impl) + + def test_prange_nested_reduction1(self): + def test_impl(): + A = 0 + for k in range(1): + for i in range(1): + if i == 0: + A += 1 + return A + self.prange_tester(test_impl) + + @disabled_test + def test_check_error_model(self): + def test_impl(): + n = 32 + A = np.zeros(n) + for i in range(n): + A[i] = 1 / i # div-by-zero when i = 0 + return A + + with self.assertRaises(ZeroDivisionError) as raises: + test_impl() + + # compile parallel functions + pfunc = self.generate_prange_func(test_impl, None) + pcres = self.compile_parallel(pfunc, ()) + pfcres = self.compile_parallel_fastmath(pfunc, ()) + + # should raise + with self.assertRaises(ZeroDivisionError) as raises: + pcres.entry_point() + + # should not raise + result = pfcres.entry_point() + self.assertEqual(result[0], np.inf) + + def test_check_alias_analysis(self): + # check alias analysis reports ok + def test_impl(A): + for i in range(len(A)): + B = A[i] + B[:] = 1 + return A + A = np.zeros(32).reshape(4, 8) + self.prange_tester(test_impl, A, scheduler_type='unsigned', + check_fastmath=True, check_fastmath_result=True) + pfunc = self.generate_prange_func(test_impl, None) + sig = tuple([numba.typeof(A)]) + cres = self.compile_parallel_fastmath(pfunc, sig) + _ir = self._get_gufunc_ir(cres) + for k, v in _ir.items(): + for line in v.splitlines(): + # get the fn definition line + if 'define' in line and k in line: + # there should only be 2x noalias, one on each of the first + # 2 args (retptr, excinfo). + # Note: used to be 3x no noalias, but env arg is dropped. + self.assertEqual(line.count('noalias'), 2) + break + + def test_prange_raises_invalid_step_size(self): + def test_impl(N): + acc = 0 + for i in range(0, N, 2): + acc += 2 + return acc + + with self.assertRaises(errors.UnsupportedRewriteError) as raises: + self.prange_tester(test_impl, 1024) + msg = 'Only constant step size of 1 is supported for prange' + self.assertIn(msg, str(raises.exception)) + + def test_prange_fastmath_check_works(self): + # this function will benefit from `fastmath`, the div will + # get optimised to a multiply by reciprocal and the accumulator + # then becomes an fmadd: A = A + i * 0.5 + def test_impl(): + n = 128 + A = 0 + for i in range(n): + A += i / 2.0 + return A + self.prange_tester(test_impl, scheduler_type='unsigned', + check_fastmath=True) + pfunc = self.generate_prange_func(test_impl, None) + cres = self.compile_parallel_fastmath(pfunc, ()) + ir = self._get_gufunc_ir(cres) + _id = '%[A-Z_0-9]?(.[0-9]+)+[.]?[i]?' + recipr_str = r'\s+%s = fmul fast double %s, 5.000000e-01' + reciprocal_inst = re.compile(recipr_str % (_id, _id)) + fadd_inst = re.compile(r'\s+%s = fadd fast double %s, %s' + % (_id, _id, _id)) + # check there is something like: + # %.329 = fmul fast double %.325, 5.000000e-01 + # %.337 = fadd fast double %A.07, %.329 + found = False + for name, kernel in ir.items(): + # make sure to look at the kernel corresponding to the cres/pfunc + if name in cres.library.get_llvm_str(): + splitted = kernel.splitlines() + for i, x in enumerate(splitted): + if reciprocal_inst.match(x): + self.assertTrue(fadd_inst.match(splitted[i + 1])) + found = True + break + + self.assertTrue(found, "fast instruction pattern was not found.") + + def test_parfor_alias1(self): + def test_impl(n): + b = np.zeros((n, n)) + a = b[0] + for j in range(n): + a[j] = j + 1 + return b.sum() + self.prange_tester(test_impl, 4) + + def test_parfor_alias2(self): + def test_impl(n): + b = np.zeros((n, n)) + for i in range(n): + a = b[i] + for j in range(n): + a[j] = i + j + return b.sum() + self.prange_tester(test_impl, 4) + + def test_parfor_alias3(self): + def test_impl(n): + b = np.zeros((n, n, n)) + for i in range(n): + a = b[i] + for j in range(n): + c = a[j] + for k in range(n): + c[k] = i + j + k + return b.sum() + self.prange_tester(test_impl, 4) + + def test_parfor_race_1(self): + def test_impl(x, y): + for j in range(y): + k = x + return k + raised_warnings = self.prange_tester(test_impl, 10, 20) + warning_obj = raised_warnings[0] + expected_msg = ("Variable k used in parallel loop may be written to " + "simultaneously by multiple workers and may result " + "in non-deterministic or unintended results.") + self.assertIn(expected_msg, str(warning_obj.message)) + + def test_nested_parfor_push_call_vars(self): + """ issue 3686: if a prange has something inside it that causes + a nested parfor to be generated and both the inner and outer + parfor use the same call variable defined outside the parfors + then ensure that when that call variable is pushed into the + parfor that the call variable isn't duplicated with the same + name resulting in a redundant type lock. + """ + def test_impl(): + B = 0 + f = np.negative + for i in range(1): + this_matters = f(1.) + B += f(np.zeros(1,))[0] + for i in range(2): + this_matters = f(1.) + B += f(np.zeros(1,))[0] + + return B + self.prange_tester(test_impl) + + def test_copy_global_for_parfor(self): + """ issue4903: a global is copied next to a parfor so that + it can be inlined into the parfor and thus not have to be + passed to the parfor (i.e., an unsupported function type). + This global needs to be renamed in the block into which + it is copied. + """ + def test_impl(zz, tc): + lh = np.zeros(len(tc)) + lc = np.zeros(len(tc)) + for i in range(1): + nt = tc[i] + for t in range(nt): + lh += np.exp(zz[i, t]) + for t in range(nt): + lc += np.exp(zz[i, t]) + return lh, lc + + m = 2 + zz = np.ones((m, m, m)) + tc = np.ones(m, dtype=np.int_) + self.prange_tester(test_impl, zz, tc, patch_instance=[0]) + + def test_multiple_call_getattr_object(self): + def test_impl(n): + B = 0 + f = np.negative + for i in range(1): + this_matters = f(1.0) + B += f(n) + + return B + self.prange_tester(test_impl, 1.0) + + def test_argument_alias_recarray_field(self): + # Test for issue4007. + def test_impl(n): + for i in range(len(n)): + n.x[i] = 7.0 + return n + X1 = np.zeros(10, dtype=[('x', float), ('y', int), ]) + X2 = np.zeros(10, dtype=[('x', float), ('y', int), ]) + X3 = np.zeros(10, dtype=[('x', float), ('y', int), ]) + v1 = X1.view(np.recarray) + v2 = X2.view(np.recarray) + v3 = X3.view(np.recarray) + + # Numpy doesn't seem to support almost equal on recarray. + # So, we convert to list and use assertEqual instead. + python_res = list(test_impl(v1)) + njit_res = list(njit(test_impl)(v2)) + pa_func = njit(test_impl, parallel=True) + pa_res = list(pa_func(v3)) + self.assertEqual(python_res, njit_res) + self.assertEqual(python_res, pa_res) + + def test_mutable_list_param(self): + """ issue3699: test that mutable variable to call in loop + is not hoisted. The call in test_impl forces a manual + check here rather than using prange_tester. + """ + @njit + def list_check(X): + """ If the variable X is hoisted in the test_impl prange + then subsequent list_check calls would return increasing + values. + """ + ret = X[-1] + a = X[-1] + 1 + X.append(a) + return ret + def test_impl(n): + for i in prange(n): + X = [100] + a = list_check(X) + return a + python_res = test_impl(10) + njit_res = njit(test_impl)(10) + pa_func = njit(test_impl, parallel=True) + pa_res = pa_func(10) + self.assertEqual(python_res, njit_res) + self.assertEqual(python_res, pa_res) + + def test_list_comprehension_prange(self): + # issue4569 + def test_impl(x): + return np.array([len(x[i]) for i in range(len(x))]) + x = [np.array([1,2,3], dtype=int),np.array([1,2], dtype=int)] + self.prange_tester(test_impl, x) + + def test_ssa_false_reduction(self): + # issue5698 + # SSA for h creates assignments to h that make it look like a + # reduction variable except that it lacks an associated + # reduction operator. Test here that h is excluded as a + # reduction variable. + def test_impl(image, a, b): + empty = np.zeros(image.shape) + for i in range(image.shape[0]): + r = image[i][0] / 255.0 + if a == 0: + h = 0 + if b == 0: + h = 0 + empty[i] = [h, h, h] + return empty + + image = np.zeros((3, 3), dtype=np.int32) + self.prange_tester(test_impl, image, 0, 0) + + def test_list_setitem_hoisting(self): + # issue5979 + # Don't hoist list initialization if list item set. + def test_impl(): + n = 5 + a = np.empty(n, dtype=np.int64) + for k in range(5): + X = [0] + X[0] = 1 + a[k] = X[0] + return a + + self.prange_tester(test_impl) + + def test_tuple_hoisting(self): + # issue9529 + def test_impl(inputs): + outputs = [(Dict.empty(key_type=types.int64, value_type=types.float64), np.zeros(1)) for _ in range(len(inputs))] + for i in range(len(inputs)): + y = inputs[i] + out = np.zeros(1) + out[0] = i + outputs[i] = (inputs[i], out) + return outputs[0][1][0] + + N = config.NUMBA_NUM_THREADS + 1 + self.prange_tester(test_impl, [Dict.empty(key_type=types.int64, value_type=types.float64) for i in range(N)], patch_instance=[1]) + + def test_call_hoisting(self): + # issue9529 + def test_impl(inputs): + outputs = [(Dict.empty(key_type=types.int64, value_type=types.float64), np.zeros(1)) for _ in range(len(inputs))] + for i in range(len(inputs)): + y = inputs[i] + out = np.zeros(1) + out[0] = i + outputs[i] = test_call_hoisting_outcall(inputs[i], out) + return outputs[0][1][0] + + N = config.NUMBA_NUM_THREADS + 1 + self.prange_tester(test_impl, [Dict.empty(key_type=types.int64, value_type=types.float64) for i in range(N)], patch_instance=[1]) + + def test_record_array_setitem(self): + # issue6704 + state_dtype = np.dtype([('var', np.int32)]) + + def test_impl(states): + for i in range(1): + states[i]['var'] = 1 + + def comparer(a, b): + assert(a[0]['var'] == b[0]['var']) + + self.prange_tester(test_impl, + np.zeros(shape=1, dtype=state_dtype), + check_arg_equality=[comparer]) + + def test_record_array_setitem_yield_array(self): + state_dtype = np.dtype([('x', np.intp)]) + + def test_impl(states): + n = states.size + for i in range(states.size): + states["x"][i] = 7 + i + return states + + states = np.zeros(10, dtype=state_dtype) + + def comparer(a, b): + np.testing.assert_equal(a, b) + + self.prange_tester(test_impl, + states, + check_arg_equality=[comparer]) + + def test_issue7501(self): + def test_impl(size, case): + result = np.zeros((size,)) + if case == 1: + for i in range(size): + result[i] += 1 + else: + for i in range(size): + result[i] += 2 + return result[0] + + self.prange_tester(test_impl, 3, 1) + + def test_kde_example(self): + def test_impl(X): + # KDE example + b = 0.5 + points = np.array([-1.0, 2.0, 5.0]) + N = points.shape[0] + n = X.shape[0] + exps = 0 + for i in range(n): + p = X[i] + d = (-(p - points)**2) / (2 * b**2) + m = np.min(d) + exps += m - np.log(b * N) + np.log(np.sum(np.exp(d - m))) + return exps + + n = 128 + X = np.random.ranf(n) + self.prange_tester(test_impl, X) + + @skip_parfors_unsupported + def test_issue7578(self): + def test_impl(x): + A = np.zeros_like(x) + tmp = np.cos(x) # this can be any 1-arity ufunc + + for i in range(len(x)): + A[i] = tmp.sum() + + return A + + x = np.arange(10.) + self.prange_tester(test_impl, x) + +@skip_parfors_unsupported +class TestParforChunksizing(TestCase): + """ + Tests chunksize handling in ParallelAccelerator. + """ + _numba_parallel_test_ = False + + def setUp(self): + set_parallel_chunksize(0) + + def tearDown(self): + set_parallel_chunksize(0) + + def test_python_parallel_chunksize_basic(self): + # Test basic chunksize operations outside njit. + self.assertEqual(get_parallel_chunksize(), 0) + set_parallel_chunksize(8) + self.assertEqual(get_parallel_chunksize(), 8) + set_parallel_chunksize(0) + self.assertEqual(get_parallel_chunksize(), 0) + + def test_python_with_chunksize(self): + # Test "with parallel_chunksize" outside njit. + self.assertEqual(get_parallel_chunksize(), 0) + with parallel_chunksize(8): + self.assertEqual(get_parallel_chunksize(), 8) + self.assertEqual(get_parallel_chunksize(), 0) + + def test_njit_parallel_chunksize_basic(self): + # Test basic chunksize operations inside njit. + @njit + def get_cs(): + return get_parallel_chunksize() + + @njit + def set_cs(x): + return set_parallel_chunksize(x) + + self.assertEqual(get_cs(), 0) + set_cs(8) + self.assertEqual(get_cs(), 8) + set_cs(0) + self.assertEqual(get_cs(), 0) + + def test_njit_with_chunksize(self): + # Test "with parallel_chunksize" inside njit. + @njit + def test_impl(x): + cs1 = get_parallel_chunksize() + with parallel_chunksize(8): + cs2 = get_parallel_chunksize() + cs3 = get_parallel_chunksize() + return cs1, cs2, cs3 + + cs1, cs2, cs3 = test_impl(8) + + self.assertEqual(cs1, 0) + self.assertEqual(cs2, 8) + self.assertEqual(cs3, 0) + + def test_all_iterations_reset_chunksize(self): + """ Test that all the iterations get run if you set the + chunksize. Also check that the chunksize that each + worker thread sees has been reset to 0. """ + + @njit(parallel=True) + def test_impl(cs, n): + res = np.zeros(n) + inner_cs = np.full(n, -13) + with numba.parallel_chunksize(cs): + for i in numba.prange(n): + inner_cs[i] = numba.get_parallel_chunksize() + res[i] = 13 + return res, inner_cs + + # Test a variety of array and chunk sizes. + # 1000 is a round number, 997 is prime, 943 is product of two + # primes, 961 is square of a prime. + for j in [1000, 997, 943, 961]: + for i in range(15): + res, inner_cs = test_impl(i+1, j) + self.assertTrue(np.all(res == 13)) + self.assertTrue(np.all(inner_cs == 0)) + + def test_njit_parallel_chunksize_negative(self): + # Test negative set_parallel_chunksize inside njit. + with self.assertRaises(ValueError) as raised: + @njit + def neg_test(): + set_parallel_chunksize(-1) + + neg_test() + + msg = "chunksize must be greater than or equal to zero" + self.assertIn(msg, str(raised.exception)) + + def test_python_parallel_chunksize_negative(self): + # Test negative set_parallel_chunksize outside njit. + with self.assertRaises(ValueError) as raised: + set_parallel_chunksize(-1) + + msg = "chunksize must be greater than or equal to zero" + self.assertIn(msg, str(raised.exception)) + + def test_njit_parallel_chunksize_invalid_type(self): + with self.assertRaises(errors.TypingError) as raised: + @njit + def impl(): + set_parallel_chunksize('invalid_type') + + impl() + + msg = "The parallel chunksize must be an integer" + self.assertIn(msg, str(raised.exception)) + + def test_python_parallel_chunksize_invalid_type(self): + with self.assertRaises(TypeError) as raised: + set_parallel_chunksize('invalid_type') + + msg = "The parallel chunksize must be an integer" + self.assertIn(msg, str(raised.exception)) + + +@skip_parfors_unsupported +@x86_only +class TestParforsVectorizer(TestPrangeBase): + + # env mutating test + _numba_parallel_test_ = False + + def get_gufunc_asm(self, func, schedule_type, *args, **kwargs): + + fastmath = kwargs.pop('fastmath', False) + cpu_name = kwargs.pop('cpu_name', 'skylake-avx512') + assertions = kwargs.pop('assertions', True) + # force LLVM to use zmm registers for vectorization + # https://reviews.llvm.org/D67259 + cpu_features = kwargs.pop('cpu_features', '-prefer-256-bit') + + env_opts = {'NUMBA_CPU_NAME': cpu_name, + 'NUMBA_CPU_FEATURES': cpu_features, + } + + overrides = [] + for k, v in env_opts.items(): + overrides.append(override_env_config(k, v)) + + with overrides[0], overrides[1]: + sig = tuple([numba.typeof(x) for x in args]) + pfunc_vectorizable = self.generate_prange_func(func, None) + if fastmath == True: + cres = self.compile_parallel_fastmath(pfunc_vectorizable, sig) + else: + cres = self.compile_parallel(pfunc_vectorizable, sig) + + # get the gufunc asm + asm = self._get_gufunc_asm(cres) + + if assertions: + schedty = re.compile(r'call\s+\w+\*\s+@do_scheduling_(\w+)\(') + matches = schedty.findall(cres.library.get_llvm_str()) + self.assertGreaterEqual(len(matches), 1) # at least 1 parfor call + self.assertEqual(matches[0], schedule_type) + self.assertNotEqual(asm, {}) + + return asm + + @linux_only + @TestCase.run_test_in_subprocess + def test_vectorizer_fastmath_asm(self): + """ This checks that if fastmath is set and the underlying hardware + is suitable, and the function supplied is amenable to fastmath based + vectorization, that the vectorizer actually runs. + """ + + # This function will benefit from `fastmath` if run on a suitable + # target. The vectorizer should unwind the loop and generate + # packed dtype=double add and sqrt instructions. + def will_vectorize(A): + n = len(A) + acc = 0 + for i in range(n): + acc += np.sqrt(i) + return acc + + arg = np.zeros(10) + + fast_asm = self.get_gufunc_asm(will_vectorize, 'unsigned', arg, + fastmath=True) + slow_asm = self.get_gufunc_asm(will_vectorize, 'unsigned', arg, + fastmath=False) + for v in fast_asm.values(): + # should unwind and call vector sqrt then vector add + # all on packed doubles using zmm's + self.assertTrue('vaddpd' in v) + self.assertTrue('vsqrtpd' in v or '__svml_sqrt' in v) + self.assertTrue('zmm' in v) + + for v in slow_asm.values(): + # vector variants should not be present + self.assertTrue('vaddpd' not in v) + self.assertTrue('vsqrtpd' not in v) + # check scalar variant is present + self.assertTrue('vsqrtsd' in v and '__svml_sqrt' not in v) + self.assertTrue('vaddsd' in v) + # check no zmm addressing is present + self.assertTrue('zmm' not in v) + + @linux_only + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '0'}) + def test_unsigned_refusal_to_vectorize(self): + """ This checks that if fastmath is set and the underlying hardware + is suitable, and the function supplied is amenable to fastmath based + vectorization, that the vectorizer actually runs. + """ + + def will_not_vectorize(A): + n = len(A) + for i in range(-n, 0): + A[i] = np.sqrt(A[i]) + return A + + def will_vectorize(A): + n = len(A) + for i in range(n): + A[i] = np.sqrt(A[i]) + return A + + arg = np.zeros(10) + + # Boundschecking breaks vectorization + self.assertFalse(config.BOUNDSCHECK) + novec_asm = self.get_gufunc_asm(will_not_vectorize, 'signed', arg, + fastmath=True) + + vec_asm = self.get_gufunc_asm(will_vectorize, 'unsigned', arg, + fastmath=True) + + for v in novec_asm.values(): + # vector variant should not be present + self.assertTrue('vsqrtpd' not in v) + # check scalar variant is present + self.assertTrue('vsqrtsd' in v) + # check no zmm addressing is present + self.assertTrue('zmm' not in v) + + for v in vec_asm.values(): + # should unwind and call vector sqrt then vector mov + # all on packed doubles using zmm's + self.assertTrue('vsqrtpd' in v or '__svml_sqrt' in v) + self.assertTrue('vmovupd' in v) + self.assertTrue('zmm' in v) + + @linux_only + # needed as 32bit doesn't have equivalent signed/unsigned instruction + # generation for this function + @TestCase.run_test_in_subprocess(envvars={'NUMBA_BOUNDSCHECK': '0'}) + def test_signed_vs_unsigned_vec_asm(self): + """ This checks vectorization for signed vs unsigned variants of a + trivial accumulator, the only meaningful difference should be the + presence of signed vs. unsigned unpack instructions (for the + induction var). + """ + def signed_variant(): + n = 4096 + A = 0. + for i in range(-n, 0): + A += i + return A + + def unsigned_variant(): + n = 4096 + A = 0. + for i in range(n): + A += i + return A + + # Boundschecking breaks the diff check below because of the pickled exception + self.assertFalse(config.BOUNDSCHECK) + signed_asm = self.get_gufunc_asm(signed_variant, 'signed', + fastmath=True) + unsigned_asm = self.get_gufunc_asm(unsigned_variant, 'unsigned', + fastmath=True) + + def strip_instrs(asm): + acc = [] + for x in asm.splitlines(): + spd = x.strip() + # filter out anything that isn't a trivial instruction + # and anything with the gufunc id as it contains an address + if spd != '' and not (spd.startswith('.') + or spd.startswith('_') + or spd.startswith('"') + or '__numba_parfor_gufunc' in spd): + acc.append(re.sub('[\t]', '', spd)) + return acc + + for k, v in signed_asm.items(): + signed_instr = strip_instrs(v) + break + + for k, v in unsigned_asm.items(): + unsigned_instr = strip_instrs(v) + break + + from difflib import SequenceMatcher as sm + # make sure that the only difference in instruction (if there is a + # difference) is the char 'u'. For example: + # vcvtsi2sdq vs. vcvtusi2sdq + self.assertEqual(len(signed_instr), len(unsigned_instr)) + for a, b in zip(signed_instr, unsigned_instr): + if a == b: + continue + else: + s = sm(lambda x: x == '\t', a, b) + ops = s.get_opcodes() + for op in ops: + if op[0] == 'insert': + self.assertEqual(b[op[-2]:op[-1]], 'u') + + +@skip_parfors_unsupported +class TestParforReductionSetNumThreads(TestCase): + """Test execution correctness on reductions with set_num_threads. + """ + def test_add(self): + N = config.NUMBA_NUM_THREADS + M = 2 * N + mask = N - 1 + + @njit(parallel=True) + def udt(nthreads): + acc = 0 + set_num_threads(nthreads) + for i in prange(M): + local_mask = 1 + i % mask + set_num_threads(local_mask) + gnt = get_num_threads() + acc += gnt + return acc + + expect = udt.py_func(mask) + got = udt(mask) + self.assertPreciseEqual(expect, got) + + def test_mul(self): + # This min will prevent larger thread counts from generating + # overflow in the loop below. + N = min(4, config.NUMBA_NUM_THREADS) + M = 2 * N + mask = N - 1 + + @njit(parallel=True) + def udt(nthreads): + acc = 1 + set_num_threads(nthreads) + for i in prange(M): + local_mask = 1 + i % mask + set_num_threads(local_mask) + gnt = get_num_threads() + acc *= gnt + return acc + + expect = udt.py_func(mask) + got = udt(mask) + self.assertPreciseEqual(expect, got) + + def test_max(self): + N = config.NUMBA_NUM_THREADS + M = 2 * N + mask = N - 1 + + @njit(parallel=True) + def udt(nthreads): + acc = 1 + set_num_threads(nthreads) + for i in prange(M): + local_mask = 1 + i % mask + set_num_threads(local_mask) + gnt = get_num_threads() + acc = max(acc, gnt) + return acc + + expect = udt.py_func(mask) + got = udt(mask) + self.assertPreciseEqual(expect, got) + + +@skip_parfors_unsupported +class TestDiagnosticEnvVar(TestCase): + @TestCase.run_test_in_subprocess() + def test_diagnostics_env_var1(self): + os.environ['NUMBA_PARALLEL_DIAGNOSTICS']='4' + with captured_stdout() as stdout: + @njit(parallel=True) + def impl(): + n = 100 + b = np.zeros((n), dtype=np.float64) + for i in prange(n): + b[i] = 1 + return b + + impl() + the_output = stdout.getvalue() + self.assertIn("Parallel Accelerator Optimizing", the_output) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_parfors_caching.py b/venv/lib/python3.10/site-packages/numba/tests/test_parfors_caching.py new file mode 100644 index 0000000000000000000000000000000000000000..87ee3c0e1a2deb35ce4ef84f4aa67ff313f3d973 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_parfors_caching.py @@ -0,0 +1,83 @@ +import os.path +import subprocess +import sys + +import numpy as np + +from numba.tests.support import skip_parfors_unsupported +from .test_caching import DispatcherCacheUsecasesTest + + +@skip_parfors_unsupported +class TestParforsCache(DispatcherCacheUsecasesTest): + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "parfors_cache_usecases.py") + modname = "parfors_caching_test_fodder" + + def run_test(self, fname, num_funcs=1): + mod = self.import_module() + self.check_pycache(0) + f = getattr(mod, fname) + ary = np.ones(10) + # The result of these functions is derived from e.g. out of order + # accumulation so allclose should be fine. + np.testing.assert_allclose(f(ary), f.py_func(ary)) + + dynamic_globals = [cres.library.has_dynamic_globals + for cres in f.overloads.values()] + [cres] = f.overloads.values() + self.assertEqual(dynamic_globals, [False]) + # For each cached func, there're 2 entries (index + data) + self.check_pycache(num_funcs * 2) + + self.run_in_separate_process() + + def test_arrayexprs(self): + f = 'arrayexprs_case' + self.run_test(f) + + def test_prange(self): + f = 'prange_case' + self.run_test(f) + + def test_caller(self): + f = 'caller_case' + # num_funcs=3 because, there's the `caller_case()` which calls + # the `prange_case()` and `arrayexprs_case()` + self.run_test(f, num_funcs=3) + + +@skip_parfors_unsupported +class TestParforsCacheChangingThreads(DispatcherCacheUsecasesTest): + # NOTE: This test is checking issue #7518, that thread counts are not + # baked into cached objects. + + here = os.path.dirname(__file__) + usecases_file = os.path.join(here, "parfors_cache_usecases.py") + modname = "parfors_caching_test_fodder" + + def run_in_separate_process(self, thread_count): + # Cached functions can be run from a distinct process. + code = """if 1: + import sys + + sys.path.insert(0, %(tempdir)r) + mod = __import__(%(modname)r) + mod.self_run() + """ % dict(tempdir=self.tempdir, modname=self.modname) + + new_env = {**os.environ, "NUMBA_NUM_THREADS" : str(thread_count)} + popen = subprocess.Popen([sys.executable, "-c", code], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=new_env) + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError(f"process failed with code {popen.returncode}:" + f"stderr follows\n{err.decode()}\n") + + def test_caching(self): + self.check_pycache(0) + self.run_in_separate_process(1) + self.check_pycache(3 * 2) # ran 3 functions, 2 entries each + self.run_in_separate_process(2) + self.check_pycache(3 * 2) # ran 3 functions, 2 entries each diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_parfors_passes.py b/venv/lib/python3.10/site-packages/numba/tests/test_parfors_passes.py new file mode 100644 index 0000000000000000000000000000000000000000..2e8c23fe6ca75236b1acd5faf9bf76953d8840dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_parfors_passes.py @@ -0,0 +1,676 @@ +""" +Tests for sub-components of parfors. +These tests are aimed to produce a good-enough coverage of parfor passes +so that refactoring on these passes are easier with faster testing turnaround. +""" +import unittest +from functools import reduce + +import numpy as np + +from numba import njit, typeof, prange, pndindex +import numba.parfors.parfor +from numba.core import ( + rewrites, + typed_passes, + untyped_passes, + inline_closurecall, + compiler, + cpu, + errors +) +from numba.core.registry import cpu_target +from numba.tests.support import (TestCase, is_parfors_unsupported) + + +class MyPipeline(object): + def __init__(self, typingctx, targetctx, args, test_ir): + self.state = compiler.StateDict() + self.state.typingctx = typingctx + self.state.targetctx = targetctx + self.state.args = args + self.state.func_ir = test_ir + self.state.typemap = None + self.state.return_type = None + self.state.calltypes = None + self.state.metadata = {} + + +class BaseTest(TestCase): + @classmethod + def _run_parfor(cls, test_func, args, swap_map=None): + # TODO: refactor this with get_optimized_numba_ir() where this is + # copied from + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + test_ir = compiler.run_frontend(test_func) + options = cpu.ParallelOptions(True) + + tp = MyPipeline(typingctx, targetctx, args, test_ir) + + typingctx.refresh() + targetctx.refresh() + + inline_pass = inline_closurecall.InlineClosureCallPass( + tp.state.func_ir, options, typed=True + ) + inline_pass.run() + + rewrites.rewrite_registry.apply("before-inference", tp.state) + + untyped_passes.ReconstructSSA().run_pass(tp.state) + + ( + tp.state.typemap, + tp.state.return_type, + tp.state.calltypes, + _ + ) = typed_passes.type_inference_stage( + tp.state.typingctx, tp.state.targetctx, tp.state.func_ir, + tp.state.args, None + ) + + typed_passes.PreLowerStripPhis().run_pass(tp.state) + + diagnostics = numba.parfors.parfor.ParforDiagnostics() + + preparfor_pass = numba.parfors.parfor.PreParforPass( + tp.state.func_ir, + tp.state.typemap, + tp.state.calltypes, + tp.state.typingctx, + tp.state.targetctx, + options, + swapped=diagnostics.replaced_fns, + replace_functions_map=swap_map, + ) + preparfor_pass.run() + + rewrites.rewrite_registry.apply("after-inference", tp.state) + return tp, options, diagnostics, preparfor_pass + + @classmethod + def run_parfor_sub_pass(cls, test_func, args): + tp, options, diagnostics, _ = cls._run_parfor(test_func, args) + + flags = compiler.Flags() + parfor_pass = numba.parfors.parfor.ParforPass( + tp.state.func_ir, + tp.state.typemap, + tp.state.calltypes, + tp.state.return_type, + tp.state.typingctx, + tp.state.targetctx, + options, + flags, + tp.state.metadata, + diagnostics=diagnostics, + ) + parfor_pass._pre_run() + # Run subpass + sub_pass = cls.sub_pass_class(parfor_pass) + sub_pass.run(parfor_pass.func_ir.blocks) + + return sub_pass + + @classmethod + def run_parfor_pre_pass(cls, test_func, args, swap_map=None): + tp, options, diagnostics, preparfor_pass = cls._run_parfor( + test_func, args, swap_map + ) + return preparfor_pass + + def _run_parallel(self, func, *args, **kwargs): + cfunc = njit(parallel=True)(func) + expect = func(*args, **kwargs) + got = cfunc(*args, **kwargs) + return expect, got + + def run_parallel(self, func, *args, **kwargs): + if is_parfors_unsupported: + # Skip + return + expect, got = self._run_parallel(func, *args, **kwargs) + self.assertPreciseEqual(expect, got) + + def run_parallel_check_output_array(self, func, *args, **kwargs): + if is_parfors_unsupported: + # Skip + return + expect, got = self._run_parallel(func, *args, **kwargs) + # Don't match the value, just the return type. must return array + self.assertIsInstance(expect, np.ndarray) + self.assertIsInstance(got, np.ndarray) + self.assertEqual(expect.shape, got.shape) + + def check_records(self, records): + for rec in records: + self.assertIsInstance(rec["new"], numba.parfors.parfor.Parfor) + + +class TestConvertSetItemPass(BaseTest): + sub_pass_class = numba.parfors.parfor.ConvertSetItemPass + + def test_setitem_full_slice(self): + def test_impl(): + n = 10 + a = np.ones(n) + a[:] = 7 + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "slice") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_setitem_slice_stop_bound(self): + def test_impl(): + n = 10 + a = np.ones(n) + a[:5] = 7 + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "slice") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_setitem_slice_start_bound(self): + def test_impl(): + n = 10 + a = np.ones(n) + a[4:] = 7 + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "slice") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_setitem_gather_if_scalar(self): + def test_impl(): + n = 10 + a = np.ones(n) + b = np.ones_like(a, dtype=np.bool_) + a[b] = 7 + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "masked_assign_broadcast_scalar") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_setitem_gather_if_array(self): + def test_impl(): + n = 10 + a = np.ones(n) + b = np.ones_like(a, dtype=np.bool_) + c = np.ones_like(a) + a[b] = c[b] + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "masked_assign_array") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + +class TestConvertNumpyPass(BaseTest): + sub_pass_class = numba.parfors.parfor.ConvertNumpyPass + + def check_numpy_allocators(self, fn): + def test_impl(): + n = 10 + a = fn(n) + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "numpy_allocator") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def check_numpy_random(self, fn): + def test_impl(): + n = 10 + a = fn(n) + return a + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "numpy_allocator") + self.check_records(sub_pass.rewritten) + + self.run_parallel_check_output_array(test_impl) + + def test_numpy_allocators(self): + fns = [np.ones, np.zeros] + for fn in fns: + with self.subTest(fn.__name__): + self.check_numpy_allocators(fn) + + def test_numpy_random(self): + fns = [np.random.random] + for fn in fns: + with self.subTest(fn.__name__): + self.check_numpy_random(fn) + + def test_numpy_arrayexpr(self): + def test_impl(a, b): + return a + b + + a = b = np.ones(10) + + args = (a, b) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "arrayexpr") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl, *args) + + def test_numpy_arrayexpr_ufunc(self): + def test_impl(a, b): + return np.sin(-a) + np.float64(1) / np.sqrt(b) + + a = b = np.ones(10) + + args = (a, b) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "arrayexpr") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl, *args) + + def test_numpy_arrayexpr_boardcast(self): + def test_impl(a, b): + return a + b + np.array(1) + + a = np.ones(10) + b = np.ones((3, 10)) + + args = (a, b) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "arrayexpr") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl, *args) + + def test_numpy_arrayexpr_reshaped(self): + def test_impl(a, b): + a = a.reshape(1, a.size) # shape[0] is now constant + return a + b + + a = np.ones(10) + b = np.ones(10) + + args = (a, b) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "arrayexpr") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl, *args) + + +class TestConvertReducePass(BaseTest): + sub_pass_class = numba.parfors.parfor.ConvertReducePass + + def test_reduce_max_basic(self): + def test_impl(arr): + return reduce(lambda x, y: max(x, y), arr, 0.0) + + x = np.ones(10) + args = (x,) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "reduce") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl, *args) + + def test_reduce_max_masked(self): + def test_impl(arr): + return reduce(lambda x, y: max(x, y), arr[arr > 5], 0.0) + + x = np.ones(10) + args = (x,) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "reduce") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl, *args) + + +class TestConvertLoopPass(BaseTest): + sub_pass_class = numba.parfors.parfor.ConvertLoopPass + + def test_prange_reduce_simple(self): + def test_impl(): + n = 20 + c = 0 + for i in prange(n): + c += i + return c + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_prange_map_simple(self): + def test_impl(): + n = 20 + arr = np.ones(n) + for i in prange(n): + arr[i] += i + return arr + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_prange_two_args(self): + def test_impl(): + n = 20 + arr = np.ones(n) + for i in prange(3, n): + arr[i] += i + return arr + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_prange_three_args(self): + def test_impl(): + n = 20 + arr = np.ones(n) + for i in prange(3, n, 2): + arr[i] += i + return arr + + with self.assertRaises(errors.UnsupportedRewriteError) as raises: + self.run_parfor_sub_pass(test_impl, ()) + self.assertIn( + "Only constant step size of 1 is supported for prange", + str(raises.exception), + ) + + def test_prange_map_inner_loop(self): + def test_impl(): + n = 20 + arr = np.ones((n, n)) + for i in prange(n): + for j in range(i): + arr[i, j] += i + j * n + return arr + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + + self.run_parallel(test_impl) + + def test_prange_map_nested_prange(self): + def test_impl(): + n = 20 + arr = np.ones((n, n)) + for i in prange(n): + for j in prange(i): + arr[i, j] += i + j * n + return arr + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 2) + self.check_records(sub_pass.rewritten) + for record in sub_pass.rewritten: + self.assertEqual(record["reason"], "loop") + + self.run_parallel(test_impl) + + def test_prange_map_none_index(self): + def test_impl(): + n = 20 + arr = np.ones(n) + for i in prange(n): + inner = arr[i : i + 1] + inner[()] += 1 + return arr + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + self.check_records(sub_pass.rewritten) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + + self.run_parallel(test_impl) + + def test_prange_map_overwrite_index(self): + def test_impl(): + n = 20 + arr = np.ones(n) + for i in prange(n): + i += 1 + arr[i - 1] = i + return arr + + with self.assertRaises(errors.UnsupportedRewriteError) as raises: + self.run_parfor_sub_pass(test_impl, ()) + self.assertIn( + "Overwrite of parallel loop index", + str(raises.exception), + ) + + def test_init_prange(self): + def test_impl(): + n = 20 + arr = np.ones(n) + numba.parfors.parfor.init_prange() + val = 0 + for i in numba.parfors.parfor.internal_prange(len(arr)): + val += arr[i] + return val + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + self.check_records(sub_pass.rewritten) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + + self.run_parallel(test_impl) + + def test_pndindex(self): + def test_impl(): + n = 20 + arr = np.ones((n, n)) + val = 0 + for idx in pndindex(arr.shape): + val += idx[0] * idx[1] + return val + + sub_pass = self.run_parfor_sub_pass(test_impl, ()) + self.assertEqual(len(sub_pass.rewritten), 1) + self.check_records(sub_pass.rewritten) + + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + + self.run_parallel(test_impl) + + def test_numpy_sum(self): + def test_impl(arr): + return np.sum(arr) + + shape = 11, 13 + arr = np.arange(np.prod(shape)).reshape(shape) + args = (arr,) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + self.run_parallel(test_impl, *args) + + def test_numpy_sum_bool_array_masked(self): + def test_impl(arr): + sliced = arr[:, 0] + return np.sum(arr[sliced >= 3, 1:2]) + + shape = 11, 13 + arr = np.arange(np.prod(shape)).reshape(shape) + args = (arr,) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + self.run_parallel(test_impl, *args) + + def test_numpy_sum_int_array_masked(self): + def test_impl(arr): + sel = np.arange(arr.shape[1]) + return np.sum(arr[:, sel]) + + shape = 11, 13 + arr = np.arange(np.prod(shape)).reshape(shape) + args = (arr,) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + # 1 for arange; 1 for sum + self.assertEqual(len(sub_pass.rewritten), 2) + for record in sub_pass.rewritten: + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + self.run_parallel(test_impl, *args) + + def test_numpy_fill_method(self): + def test_impl(arr): + arr.fill(3) + return arr + + shape = 11, 13 + arr = np.arange(np.prod(shape)).reshape(shape) + args = (arr,) + argtypes = [typeof(x) for x in args] + + sub_pass = self.run_parfor_sub_pass(test_impl, argtypes) + # 1 for arange; 1 for sum + self.assertEqual(len(sub_pass.rewritten), 1) + [record] = sub_pass.rewritten + self.assertEqual(record["reason"], "loop") + self.check_records(sub_pass.rewritten) + self.run_parallel(test_impl, *args) + + +class TestPreParforPass(BaseTest): + class sub_pass_class: + def __init__(self, pass_states): + pass + + def run(self, blocks): + pass + + def test_dtype_conversion(self): + # array.dtype are converted to np.dtype(array) in the PreParforPass + def test_impl(a): + b = np.ones(20, dtype=a.dtype) + return b + + arr = np.arange(10) + args = (arr,) + argtypes = [typeof(x) for x in args] + + pre_pass = self.run_parfor_pre_pass(test_impl, argtypes) + self.assertEqual(pre_pass.stats["replaced_func"], 0) + self.assertEqual(pre_pass.stats["replaced_dtype"], 1) + self.run_parallel(test_impl, *args) + + def test_sum_replacement(self): + def test_impl(a): + return np.sum(a) + + arr = np.arange(10) + args = (arr,) + argtypes = [typeof(x) for x in args] + + pre_pass = self.run_parfor_pre_pass(test_impl, argtypes) + self.assertEqual(pre_pass.stats["replaced_func"], 1) + self.assertEqual(pre_pass.stats["replaced_dtype"], 0) + self.run_parallel(test_impl, *args) + + def test_replacement_map(self): + def test_impl(a): + return np.sum(a) + + arr = np.arange(10) + args = (arr,) + argtypes = [typeof(x) for x in args] + + swap_map = numba.parfors.parfor.swap_functions_map.copy() + swap_map.pop(("sum", "numpy")) + pre_pass = self.run_parfor_pre_pass(test_impl, argtypes, swap_map) + self.assertEqual(pre_pass.stats["replaced_func"], 0) + self.assertEqual(pre_pass.stats["replaced_dtype"], 0) + self.run_parallel(test_impl, *args) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_pipeline.py b/venv/lib/python3.10/site-packages/numba/tests/test_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..5a04da1bb69bc2c71c711452c4438a3337e2a779 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_pipeline.py @@ -0,0 +1,143 @@ +from numba.core.compiler import Compiler, DefaultPassBuilder +from numba.core.compiler_machinery import (FunctionPass, AnalysisPass, + register_pass) +from numba.core.untyped_passes import InlineInlinables +from numba.core.typed_passes import IRLegalization +from numba import jit, objmode, njit, cfunc +from numba.core import types, postproc, errors +from numba.core.ir import FunctionIR +from numba.tests.support import TestCase + + +class TestCustomPipeline(TestCase): + def setUp(self): + super(TestCustomPipeline, self).setUp() + + # Define custom pipeline class + class CustomPipeline(Compiler): + custom_pipeline_cache = [] + + def compile_extra(self, func): + # Store the compiled function + self.custom_pipeline_cache.append(func) + return super(CustomPipeline, self).compile_extra(func) + + def compile_ir(self, func_ir, *args, **kwargs): + # Store the compiled function + self.custom_pipeline_cache.append(func_ir) + return super(CustomPipeline, self).compile_ir( + func_ir, *args, **kwargs) + + self.pipeline_class = CustomPipeline + + def test_jit_custom_pipeline(self): + self.assertListEqual(self.pipeline_class.custom_pipeline_cache, []) + + @jit(pipeline_class=self.pipeline_class) + def foo(x): + return x + + self.assertEqual(foo(4), 4) + self.assertListEqual(self.pipeline_class.custom_pipeline_cache, + [foo.py_func]) + + def test_cfunc_custom_pipeline(self): + self.assertListEqual(self.pipeline_class.custom_pipeline_cache, []) + + @cfunc(types.int64(types.int64), pipeline_class=self.pipeline_class) + def foo(x): + return x + + self.assertEqual(foo(4), 4) + self.assertListEqual(self.pipeline_class.custom_pipeline_cache, + [foo.__wrapped__]) + + def test_objmode_custom_pipeline(self): + self.assertListEqual(self.pipeline_class.custom_pipeline_cache, []) + + @jit(pipeline_class=self.pipeline_class) + def foo(x): + with objmode(x="intp"): + x += int(0x1) + return x + + arg = 123 + self.assertEqual(foo(arg), arg + 1) + # Two items in the list. + self.assertEqual(len(self.pipeline_class.custom_pipeline_cache), 2) + # First item is the `foo` function + first = self.pipeline_class.custom_pipeline_cache[0] + self.assertIs(first, foo.py_func) + # Second item is a FunctionIR of the obj-lifted function + second = self.pipeline_class.custom_pipeline_cache[1] + self.assertIsInstance(second, FunctionIR) + + +class TestPassManagerFunctionality(TestCase): + + def _create_pipeline_w_del(self, base=None, inject_after=None): + """ + Creates a new compiler pipeline with the _InjectDelsPass injected after + the pass supplied in kwarg 'inject_after'. + """ + self.assertTrue(inject_after is not None) + self.assertTrue(base is not None) + + @register_pass(mutates_CFG=False, analysis_only=False) + class _InjectDelsPass(base): + """ + This pass injects ir.Del nodes into the IR + """ + _name = "inject_dels_%s" % str(base) + + def __init__(self): + base.__init__(self) + + def run_pass(self, state): + pp = postproc.PostProcessor(state.func_ir) + pp.run(emit_dels=True) + return True + + class TestCompiler(Compiler): + + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(_InjectDelsPass, inject_after) + pm.finalize() + return [pm] + + return TestCompiler + + def test_compiler_error_on_ir_del_from_functionpass(self): + new_compiler = self._create_pipeline_w_del(FunctionPass, + InlineInlinables) + + @njit(pipeline_class=new_compiler) + def foo(x): + return x + 1 + + with self.assertRaises(errors.CompilerError) as raises: + foo(10) + + errstr = str(raises.exception) + + self.assertIn("Illegal IR, del found at:", errstr) + self.assertIn("del x", errstr) + + def test_no_compiler_error_on_ir_del_after_legalization(self): + # Legalization should be the last FunctionPass to execute so it's fine + # for it to emit ir.Del nodes as no further FunctionPasses will run and + # therefore the checking routine in the PassManager won't execute. + # This test adds a new pass that is an AnalysisPass into the pipeline + # after legalisation, this pass will return with already existing dels + # in the IR but by virtue of it being an AnalysisPass the checking + # routine won't execute. + + new_compiler = self._create_pipeline_w_del(AnalysisPass, + IRLegalization) + + @njit(pipeline_class=new_compiler) + def foo(x): + return x + 1 + + self.assertTrue(foo(10), foo.py_func(10)) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_polynomial.py b/venv/lib/python3.10/site-packages/numba/tests/test_polynomial.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc8291f0502df6893a7216aede5195fec503f16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_polynomial.py @@ -0,0 +1,577 @@ +import gc +from itertools import product + +import numpy as np +from numpy.polynomial import polynomial as poly +from numpy.polynomial import polyutils as pu + +from numba import jit, njit +from numba.tests.support import (TestCase, needs_lapack, + EnableNRTStatsMixin, MemoryLeakMixin) +from numba.core.errors import TypingError + + +def roots_fn(p): + return np.roots(p) + + +def polyadd(c1,c2): + return poly.polyadd(c1,c2) + + +def polysub(c1,c2): + return poly.polysub(c1,c2) + + +def polymul(c1,c2): + return poly.polymul(c1,c2) + + +def trimseq(seq): + return pu.trimseq(seq) + + +def polyasseries1(a): + res = pu.as_series(a) + return res + + +def polyasseries2(a, trim): + res = pu.as_series(a, trim) + return res + + +def polydiv(c1, c2): + res = poly.polydiv(c1, c2) + return res + + +def polyval2(x, c): + res = poly.polyval(x, c) + return res + + +def polyval3T(x, c): + res = poly.polyval(x, c, True) + return res + + +def polyval3F(x, c): + res = poly.polyval(x, c, False) + return res + + +def polyint(c, m=1): + res = poly.polyint(c, m) + return res + + +class TestPolynomialBase(EnableNRTStatsMixin, TestCase): + """ + Provides setUp and common data/error modes for testing polynomial functions. + """ + + # supported dtypes + dtypes = (np.float64, np.float32, np.complex128, np.complex64) + + def setUp(self): + # Collect leftovers from previous test cases before checking for leaks + gc.collect() + super(TestPolynomialBase, self).setUp() + + def assert_error(self, cfunc, args, msg, err=ValueError): + with self.assertRaises(err) as raises: + cfunc(*args) + self.assertIn(msg, str(raises.exception)) + + def assert_1d_input(self, cfunc, args): + msg = "Input must be a 1d array." + self.assert_error(cfunc, args, msg) + + +class TestPoly1D(TestPolynomialBase): + + def assert_no_domain_change(self, name, cfunc, args): + msg = name + "() argument must not cause a domain change." + self.assert_error(cfunc, args, msg) + + @needs_lapack + def test_roots(self): + + cfunc = jit(nopython=True)(roots_fn) + + default_resolution = np.finfo(np.float64).resolution + + def check(a, **kwargs): + expected = roots_fn(a, **kwargs) + got = cfunc(a, **kwargs) + + # eigen decomposition used so type specific impl + # will be used in numba whereas a wide type impl + # will be used in numpy, so compare using a more + # fuzzy comparator + + if a.dtype in self.dtypes: + resolution = np.finfo(a.dtype).resolution + else: + # this is for integer types when roots() will cast to float64 + resolution = default_resolution + + np.testing.assert_allclose( + expected, + got, + rtol=10 * resolution, + atol=100 * resolution # zeros tend to be fuzzy + ) + + # Ensure proper resource management + with self.assertNoNRTLeak(): + cfunc(a, **kwargs) + + # test vectors in real space + # contrived examples to trip branches + r_vectors = ( + np.array([1]), + np.array([1, 3, 2]), + np.array([0, 0, 0]), + np.array([1, 6, 11, 6]), + np.array([0, 0, 0, 1, 3, 2]), + np.array([1, 1, 0, 0, 0]), + np.array([0, 0, 1, 0, 0, 0]) + ) + + # test loop real space + for v, dtype in \ + product(r_vectors, [np.int32, np.int64] + list(self.dtypes)): + a = v.astype(dtype) + check(a) + + c_vectors = ( + np.array([1 + 1j]), + np.array([1, 3 + 1j, 2]), + np.array([0, 0 + 0j, 0]), + np.array([1, 6 + 1j, 11, 6]), + np.array([0, 0, 0, 1 + 1j, 3, 2]), + np.array([1 + 1j, 1, 0, 0, 0]), + np.array([0, 0, 1 + 1j, 0, 0, 0]) + ) + + # test loop complex space + for v, dtype in product(c_vectors, self.dtypes[2:]): + a = v.astype(dtype) + check(a) + + # check input with dimension > 1 raises + self.assert_1d_input(cfunc, (np.arange(4.).reshape(2, 2),)) + + # check real input with complex roots raises + x = np.array([7., 2., 0., 1.]) + self.assert_no_domain_change("eigvals", cfunc, (x,)) + # but works fine if type conv to complex first + cfunc(x.astype(np.complex128)) + + +class TestPolynomial(MemoryLeakMixin, TestCase): + + # + # tests for Polyutils functions + # + + def test_trimseq_basic(self): + pyfunc = trimseq + cfunc = njit(trimseq) + + def inputs(): + for i in range(5): + yield np.array([1] + [0] * i) + + for coefs in inputs(): + self.assertPreciseEqual(pyfunc(coefs), cfunc(coefs)) + + def test_trimseq_exception(self): + cfunc = njit(trimseq) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc") + self.assertIn('The argument "seq" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as e: + cfunc(np.arange(10).reshape(5, 2)) + self.assertIn('Coefficient array is not 1-d', + str(e.exception)) + + with self.assertRaises(TypingError) as e: + cfunc((1, 2, 3, 0)) + self.assertIn('Unsupported type UniTuple(int64, 4) for argument "seq"', + str(e.exception)) + + def test_pu_as_series_basic(self): + pyfunc1 = polyasseries1 + cfunc1 = njit(polyasseries1) + pyfunc2 = polyasseries2 + cfunc2 = njit(polyasseries2) + + def inputs(): + yield np.arange(4) + yield np.arange(6).reshape((2,3)) + yield (1, np.arange(3), np.arange(2, dtype=np.float32)) + yield ([1, 2, 3, 4, 0], [1, 2, 3]) + yield ((0, 0, 1e-3, 0, 1e-5, 0, 0), (1, 2, 3, 4, 5, 6, 7)) + yield ((0, 0, 1e-3, 0, 1e-5, 0, 0), (1j, 2, 3j, 4j, 5, 6j, 7)) + yield (2, [1.1, 0.]) + yield ([1, 2, 3, 0], ) + yield ((1, 2, 3, 0), ) + yield (np.array([1, 2, 3, 0]), ) + yield [np.array([1, 2, 3, 0]), np.array([1, 2, 3, 0])] + yield [np.array([1,2,3]), ] + + for input in inputs(): + self.assertPreciseEqual(pyfunc1(input), cfunc1(input)) + self.assertPreciseEqual(pyfunc2(input, False), cfunc2(input, False)) + self.assertPreciseEqual(pyfunc2(input, True), cfunc2(input, True)) + + def test_pu_as_series_exception(self): + cfunc1 = njit(polyasseries1) + cfunc2 = njit(polyasseries2) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc1("abc") + self.assertIn('The argument "alist" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc2("abc", True) + self.assertIn('The argument "alist" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc2(np.arange(4), "abc") + self.assertIn('The argument "trim" must be boolean', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc1(([1, 2, 3], np.arange(16).reshape(4,4))) + self.assertIn('Coefficient array is not 1-d', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc1(np.arange(8).reshape((2, 2, 2))) + self.assertIn('Coefficient array is not 1-d', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc1([np.array([[1,2,3],[1,2,3]]), ]) + self.assertIn('Coefficient array is not 1-d', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc1(np.array([[]], dtype=np.float64)) + self.assertIn('Coefficient array is empty', + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc1(([1, 2, 3], np.array([], dtype=np.float64), + np.array([1,2,1]))) + self.assertIn('Coefficient array is empty', + str(raises.exception)) + + def _test_polyarithm_basic(self, pyfunc, ignore_sign_on_zero=False): + # test suite containing tests for polyadd, polysub, polymul, polydiv + cfunc = njit(pyfunc) + + def inputs(): + # basic, taken from https://github.com/numpy/numpy/blob/48a8277855849be094a5979c48d9f5f1778ee4de/numpy/polynomial/tests/test_polynomial.py#L58-L123 # noqa: E501 + for i in range(5): + for j in range(5): + p1 = np.array([0] * i + [1]) + p2 = np.array([0] * j + [1]) + yield p1, p2 + # test lists, tuples, scalars + yield [1, 2, 3], [1, 2, 3] + yield [1, 2, 3], (1, 2, 3) + yield (1, 2, 3), [1, 2, 3] + yield [1, 2, 3], 3 + yield 3, (1, 2, 3) + # test different dtypes + yield np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]) + yield np.array([1j, 2j, 3j]), np.array([1.0, 2.0, 3.0]) + yield np.array([1, 2, 3]), np.array([1j, 2j, 3j]) + yield (1, 2, 3), 3.0 + yield (1, 2, 3), 3j + yield (1, 1e-3, 3), (1, 2, 3) + + for p1, p2 in inputs(): + self.assertPreciseEqual(pyfunc(p1,p2), cfunc(p1,p2), + ignore_sign_on_zero=ignore_sign_on_zero) + + def _test_polyarithm_exception(self, pyfunc): + # test suite containing tests for polyadd, polysub, polymul, polydiv + cfunc = njit(pyfunc) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc", np.array([1,2,3])) + self.assertIn('The argument "c1" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.array([1,2,3]), "abc") + self.assertIn('The argument "c2" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as e: + cfunc(np.arange(10).reshape(5, 2), np.array([1, 2, 3])) + self.assertIn('Coefficient array is not 1-d', + str(e.exception)) + + with self.assertRaises(TypingError) as e: + cfunc(np.array([1, 2, 3]), np.arange(10).reshape(5, 2)) + self.assertIn('Coefficient array is not 1-d', + str(e.exception)) + + def test_polyadd_basic(self): + self._test_polyarithm_basic(polyadd) + + def test_polyadd_exception(self): + self._test_polyarithm_exception(polyadd) + + def test_polysub_basic(self): + self._test_polyarithm_basic(polysub, ignore_sign_on_zero=True) + + def test_polysub_exception(self): + self._test_polyarithm_exception(polysub) + + def test_polymul_basic(self): + self._test_polyarithm_basic(polymul) + + def test_polymul_exception(self): + self._test_polyarithm_exception(polymul) + + def test_poly_polydiv_basic(self): + pyfunc = polydiv + cfunc = njit(polydiv) + self._test_polyarithm_basic(polydiv) + + def inputs(): + # Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L99-L114 # noqa: E501 + # check scalar division + yield [2], [2] + yield [2, 2], [2] + # check rest. + for i in range(5): + for j in range(5): + ci = [0] * i + [1, 2] + cj = [0] * j + [1, 2] + tgt = poly.polyadd(ci, cj) + yield tgt, ci + yield np.array([1,0,0,0,0,0,-1]), np.array([1,0,0,-1]) + + for c1, c2 in inputs(): + self.assertPreciseEqual(pyfunc(c1, c2), cfunc(c1, c2)) + + def test_poly_polydiv_exception(self): + self._test_polyarithm_exception(polydiv) + cfunc = njit(polydiv) + # Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L97 # noqa: E501 + # check zero division + with self.assertRaises(ZeroDivisionError) as _: + cfunc([1], [0]) + + def test_poly_polyval_basic(self): + pyfunc2 = polyval2 + cfunc2 = njit(polyval2) + pyfunc3T = polyval3T + cfunc3T = njit(polyval3T) + pyfunc3F = polyval3F + cfunc3F = njit(polyval3F) + + def inputs(): + # Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L137-L157 # noqa: E501 + # check empty input + yield np.array([], dtype=np.float64), [1] + yield 1, [1,2,3] + yield np.arange(4).reshape(2,2), [1,2,3] + # check normal input + for i in range(5): + yield np.linspace(-1, 1), [0] * i + [1] + yield np.linspace(-1, 1), [0, -1, 0, 1] + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + yield x, [1] + yield x, [1, 0] + yield x, [1, 0, 0] + # Check that behaviour corresponds to tensor = False + yield np.array([1, 2]), np.arange(4).reshape(2,2) + yield [1, 2], np.arange(4).reshape(2,2) + + for x, c in inputs(): + self.assertPreciseEqual(pyfunc2(x, c), cfunc2(x, c)) + # test tensor argument + self.assertPreciseEqual(pyfunc3T(x, c), cfunc3T(x, c)) + self.assertPreciseEqual(pyfunc3F(x, c), cfunc3F(x, c)) + + def test_poly_polyval_exception(self): + cfunc2 = njit(polyval2) + cfunc3T = njit(polyval3T) + cfunc3F = njit(polyval3F) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc2(3, "abc") + self.assertIn('The argument "c" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc2("abc", 3) + self.assertIn('The argument "x" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc2("abc", "def") + self.assertIn('The argument "x" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc3T(3, "abc") + self.assertIn('The argument "c" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc3T("abc", 3) + self.assertIn('The argument "x" must be array-like', + str(raises.exception)) + + @njit + def polyval3(x, c, tensor): + res = poly.polyval(x, c, tensor) + return res + with self.assertRaises(TypingError) as raises: + polyval3(3, 3, "abc") + self.assertIn('The argument "tensor" must be boolean', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc3F("abc", "def") + self.assertIn('The argument "x" must be array-like', + str(raises.exception)) + + def test_poly_polyint_basic(self): + pyfunc = polyint + cfunc = njit(polyint) + # basic + self.assertPreciseEqual(pyfunc([1,2,3]), cfunc([1,2,3])) + # Based on https://github.com/numpy/numpy/blob/160c16f055d4d2fce072004e286d8075b31955cd/numpy/polynomial/tests/test_polynomial.py#L314-L381 # noqa: E501 + # test integration of zero polynomial + for i in range(2, 5): + self.assertPreciseEqual(pyfunc([0], m=i), cfunc([0], m=i)) + + # check single integration with integration constant + for i in range(5): + pol = [0] * i + [1] + self.assertPreciseEqual(pyfunc(pol, m=1), pyfunc(pol, m=1)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + self.assertPreciseEqual(pyfunc(pol, m=j), cfunc(pol, m=j)) + + # test multidimensional arrays + c2 = np.array([[0,1], [0,2]]) + self.assertPreciseEqual(pyfunc(c2), cfunc(c2)) + c3 = np.arange(8).reshape((2,2,2)) + self.assertPreciseEqual(pyfunc(c3), cfunc(c3)) + + def test_poly_polyint_exception(self): + cfunc = njit(polyint) + + self.disable_leak_check() + + with self.assertRaises(TypingError) as raises: + cfunc("abc") + self.assertIn('The argument "c" must be array-like', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(np.array([1,2,3]), "abc") + self.assertIn('The argument "m" must be an integer', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(['a', 'b', 'c'], 1) + self.assertIn('Input dtype must be scalar.', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc(('a', 'b', 'c'), 1) + self.assertIn('Input dtype must be scalar.', + str(raises.exception)) + + # + # tests for Polynomial class + # + + def test_Polynomial_constructor(self): + + def pyfunc3(c, dom, win): + p = poly.Polynomial(c, dom, win) + return p + cfunc3 = njit(pyfunc3) + + def pyfunc1(c): + p = poly.Polynomial(c) + return p + cfunc1 = njit(pyfunc1) + list1 = (np.array([0, 1]), np.array([0., 1.])) + list2 = (np.array([0, 1]), np.array([0., 1.])) + list3 = (np.array([0, 1]), np.array([0., 1.])) + for c in list1: + for dom in list2: + for win in list3: + p1 = pyfunc3(c, dom, win) + p2 = cfunc3(c, dom, win) + q1 = pyfunc1(c) + q2 = cfunc1(c) + self.assertPreciseEqual(p1, p2) + self.assertPreciseEqual(p1.coef, p2.coef) + self.assertPreciseEqual(p1.domain, p2.domain) + self.assertPreciseEqual(p1.window, p2.window) + self.assertPreciseEqual(q1.coef, q2.coef) + self.assertPreciseEqual(q1.domain, q2.domain) + self.assertPreciseEqual(q1.window, q2.window) + + def test_Polynomial_exeption(self): + def pyfunc3(c, dom, win): + p = poly.Polynomial(c, dom, win) + return p + cfunc3 = njit(pyfunc3) + + self.disable_leak_check() + + input2 = np.array([1, 2]) + input3 = np.array([1, 2, 3]) + input2D = np.arange(4).reshape((2, 2)) + + with self.assertRaises(ValueError) as raises: + cfunc3(input2, input3, input2) + self.assertIn("Domain has wrong number of elements.", + str(raises.exception)) + + with self.assertRaises(ValueError) as raises: + cfunc3(input2, input2, input3) + self.assertIn("Window has wrong number of elements.", + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc3(input2D, input2, input2) + self.assertIn("Coefficient array is not 1-d", + str(raises.exception)) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_practical_lowering_issues.py b/venv/lib/python3.10/site-packages/numba/tests/test_practical_lowering_issues.py new file mode 100644 index 0000000000000000000000000000000000000000..a069591946a845e3456d4499ca94845025e165c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_practical_lowering_issues.py @@ -0,0 +1,212 @@ +""" +Tests for practical lowering specific errors. +""" + +import numpy as np +from numba import njit +from numba.core import types, ir +from numba.core.compiler import CompilerBase, DefaultPassBuilder +from numba.core.typed_passes import NopythonTypeInference +from numba.core.compiler_machinery import register_pass, FunctionPass + +from numba.tests.support import MemoryLeakMixin, TestCase + + +def issue7507_lround(a): + """Dummy function used in test""" + pass + + +class TestLowering(MemoryLeakMixin, TestCase): + def test_issue4156_loop_vars_leak(self): + """Test issues with zero-filling of refct'ed variables inside loops. + + Before the fix, the in-loop variables are always zero-filled at their + definition location. As a result, their state from the previous + iteration is erased. No decref is applied. To fix this, the + zero-filling must only happen once after the alloca at the function + entry block. The loop variables are technically defined once per + function (one alloca per definition per function), but semantically + defined once per assignment. Semantically, their lifetime stop only + when the variable is re-assigned or when the function ends. + """ + @njit + def udt(N): + sum_vec = np.zeros(3) + for n in range(N): + if n >= 0: + # `vec` would leak without the fix. + vec = np.ones(1) + if n >= 0: + sum_vec += vec[0] + + return sum_vec + + got = udt(4) + expect = udt.py_func(4) + self.assertPreciseEqual(got, expect) + + def test_issue4156_loop_vars_leak_variant1(self): + """Variant of test_issue4156_loop_vars_leak. + + Adding an outer loop. + """ + @njit + def udt(N): + sum_vec = np.zeros(3) + for x in range(N): + for y in range(N): + n = x + y + if n >= 0: + # `vec` would leak without the fix. + vec = np.ones(1) + if n >= 0: + sum_vec += vec[0] + + return sum_vec + + got = udt(4) + expect = udt.py_func(4) + self.assertPreciseEqual(got, expect) + + def test_issue4156_loop_vars_leak_variant2(self): + """Variant of test_issue4156_loop_vars_leak. + + Adding deeper outer loop. + """ + @njit + def udt(N): + sum_vec = np.zeros(3) + for z in range(N): + for x in range(N): + for y in range(N): + n = x + y + z + if n >= 0: + # `vec` would leak without the fix. + vec = np.ones(1) + if n >= 0: + sum_vec += vec[0] + + return sum_vec + + got = udt(4) + expect = udt.py_func(4) + self.assertPreciseEqual(got, expect) + + def test_issue4156_loop_vars_leak_variant3(self): + """Variant of test_issue4156_loop_vars_leak. + + Adding inner loop around allocation + """ + @njit + def udt(N): + sum_vec = np.zeros(3) + for z in range(N): + for x in range(N): + n = x + z + if n >= 0: + for y in range(N): + # `vec` would leak without the fix. + vec = np.ones(y) + if n >= 0: + sum_vec += vec[0] + + return sum_vec + + got = udt(4) + expect = udt.py_func(4) + self.assertPreciseEqual(got, expect) + + def test_issue4156_loop_vars_leak_variant4(self): + """Variant of test_issue4156_loop_vars_leak. + + Interleaves loops and allocations + """ + @njit + def udt(N): + sum_vec = 0 + + for n in range(N): + vec = np.zeros(7) + for n in range(N): + z = np.zeros(7) + sum_vec += vec[0] + z[0] + + return sum_vec + + got = udt(4) + expect = udt.py_func(4) + self.assertPreciseEqual(got, expect) + + def test_issue_with_literal_in_static_getitem(self): + """Test an issue with literal type used as index of static_getitem + """ + + @register_pass(mutates_CFG=False, analysis_only=False) + class ForceStaticGetitemLiteral(FunctionPass): + + _name = "force_static_getitem_literal" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + repl = {} + # Force the static_getitem to have a literal type as + # index to replicate the problem. + for inst, sig in state.calltypes.items(): + if (isinstance(inst, ir.Expr) and + inst.op == 'static_getitem'): + [obj, idx] = sig.args + new_sig = sig.replace(args=(obj, + types.literal(inst.index))) + repl[inst] = new_sig + state.calltypes.update(repl) + return True + + class CustomPipeline(CompilerBase): + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(ForceStaticGetitemLiteral, + NopythonTypeInference) + pm.finalize() + return [pm] + + @njit(pipeline_class=CustomPipeline) + def foo(arr): + return arr[4] # force static_getitem + + arr = np.arange(10) + got = foo(arr) + expect = foo.py_func(arr) + self.assertEqual(got, expect) + + def test_issue7507(self): + """ + Test a problem with BaseContext.get_function() because of changes + related to the new style error handling. + """ + from numba.core.typing.templates import AbstractTemplate, infer_global + from numba.core.imputils import lower_builtin + + @infer_global(issue7507_lround) + class lroundTemplate(AbstractTemplate): + key = issue7507_lround + + def generic(self, args, kws): + signature = types.int64(types.float64) + + # insert a new builtin during the compilation process + @lower_builtin(issue7507_lround, types.float64) + def codegen(context, builder, sig, args): + # Simply truncate with the cast to integer. + return context.cast(builder, args[0], sig.args[0], + sig.return_type) + + return signature + + @njit('int64(float64)') + def foo(a): + return issue7507_lround(a) + + self.assertEqual(foo(3.4), 3) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_print.py b/venv/lib/python3.10/site-packages/numba/tests/test_print.py new file mode 100644 index 0000000000000000000000000000000000000000..01b51145bc0dcc17b6d1f9728c837288d4076af3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_print.py @@ -0,0 +1,189 @@ +import sys + +import numpy as np + +import unittest +from numba import jit, njit +from numba.core import types, errors, utils +from numba.tests.support import (captured_stdout, TestCase, EnableNRTStatsMixin) + + +def print_value(x): + print(x) + +def print_array_item(arr, i): + print(arr[i].x) + +def print_values(a, b, c): + print(a, b, c) + +def print_empty(): + print() + +def print_string(x): + print(x, "hop!", 3.5) + +def print_vararg(a, b, c): + print(a, b, *c) + +def print_string_vararg(a, b, c): + print(a, "hop!", b, *c) + +def make_print_closure(x): + def print_closure(): + return x + return jit(nopython=True)(x) + + +class TestPrint(EnableNRTStatsMixin, TestCase): + + def check_values(self, typ, values): + cfunc = njit((typ,))(print_value) + for val in values: + with captured_stdout(): + cfunc(val) + self.assertEqual(sys.stdout.getvalue(), str(val) + '\n') + + def test_print_values(self): + """ + Test printing a single argument value. + """ + # Various scalars + self.check_values(types.int32, (1, -234)) + self.check_values(types.int64, (1, -234, + 123456789876543210, + -123456789876543210)) + self.check_values(types.uint64, (1, 234, + 123456789876543210, 2**63 + 123)) + self.check_values(types.boolean, (True, False)) + self.check_values(types.float64, (1.5, 100.0**10.0, float('nan'))) + self.check_values(types.complex64, (1+1j,)) + self.check_values(types.NPTimedelta('ms'), (np.timedelta64(100, 'ms'),)) + + cfunc = njit((types.float32,))(print_value) + with captured_stdout(): + cfunc(1.1) + # Float32 will lose precision + got = sys.stdout.getvalue() + expect = '1.10000002384' + self.assertTrue(got.startswith(expect)) + self.assertTrue(got.endswith('\n')) + + # Test array + arraytype = types.Array(types.int32, 1, 'C') + cfunc = njit((arraytype,))(print_value) + with captured_stdout(): + cfunc(np.arange(10, dtype=np.int32)) + self.assertEqual(sys.stdout.getvalue(), + '[0 1 2 3 4 5 6 7 8 9]\n') + + @unittest.skip("Issue with intermittent NRT leak, see #9355.") + def test_print_nrt_type(self): + # NOTE: this check is extracted from the above as it started + # intermittently leaking since the merge of #9330 (compile_isolated + # removal patch). It's not clear why this happens, see #9355 for + # thoughts/details. This test is skipped until it is resolved. + + # NRT-enabled type + with self.assertNoNRTLeak(): + x = [1, 3, 5, 7] + with self.assertRefCount(x): + self.check_values(types.List(types.intp, reflected=True), (x,)) + + def test_print_array_item(self): + """ + Test printing a Numpy character sequence + """ + dtype = np.dtype([('x', 'S4')]) + arr = np.frombuffer(bytearray(range(1, 9)), dtype=dtype) + + pyfunc = print_array_item + cfunc = jit(nopython=True)(pyfunc) + for i in range(len(arr)): + with captured_stdout(): + cfunc(arr, i) + self.assertEqual(sys.stdout.getvalue(), str(arr[i]['x']) + '\n') + + def test_print_multiple_values(self): + pyfunc = print_values + cfunc = njit((types.intp,) * 3)(pyfunc) + with captured_stdout(): + cfunc(1, 2, 3) + self.assertEqual(sys.stdout.getvalue(), '1 2 3\n') + + def test_print_nogil(self): + pyfunc = print_values + cfunc = jit(nopython=True, nogil=True)(pyfunc) + with captured_stdout(): + cfunc(1, 2, 3) + self.assertEqual(sys.stdout.getvalue(), '1 2 3\n') + + def test_print_empty(self): + pyfunc = print_empty + cfunc = njit((),)(pyfunc) + with captured_stdout(): + cfunc() + self.assertEqual(sys.stdout.getvalue(), '\n') + + def test_print_strings(self): + pyfunc = print_string + cfunc = njit((types.intp,))(pyfunc) + with captured_stdout(): + cfunc(1) + self.assertEqual(sys.stdout.getvalue(), '1 hop! 3.5\n') + + def test_print_vararg(self): + # Test *args support for print(). This is desired since + # print() can use a dedicated IR node. + pyfunc = print_vararg + cfunc = jit(nopython=True)(pyfunc) + with captured_stdout(): + cfunc(1, (2, 3), (4, 5j)) + self.assertEqual(sys.stdout.getvalue(), '1 (2, 3) 4 5j\n') + + pyfunc = print_string_vararg + cfunc = jit(nopython=True)(pyfunc) + with captured_stdout(): + cfunc(1, (2, 3), (4, 5j)) + self.assertEqual(sys.stdout.getvalue(), '1 hop! (2, 3) 4 5j\n') + + def test_inner_fn_print(self): + @jit(nopython=True) + def foo(x): + print(x) + + @jit(nopython=True) + def bar(x): + foo(x) + foo('hello') + + # Printing an array requires the Env. + # We need to make sure the inner function can obtain the Env. + x = np.arange(5) + with captured_stdout(): + bar(x) + self.assertEqual(sys.stdout.getvalue(), '[0 1 2 3 4]\nhello\n') + + def test_print_w_kwarg_raises(self): + @jit(nopython=True) + def print_kwarg(): + print('x', flush=True) + + with self.assertRaises(errors.UnsupportedError) as raises: + print_kwarg() + expected = ("Numba's print() function implementation does not support " + "keyword arguments.") + self.assertIn(raises.exception.msg, expected) + + def test_print_no_truncation(self): + ''' See: https://github.com/numba/numba/issues/3811 + ''' + @jit(nopython=True) + def foo(): + print(''.join(['a'] * 10000)) + with captured_stdout(): + foo() + self.assertEqual(sys.stdout.getvalue(), ''.join(['a'] * 10000) + '\n') + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_profiler.py b/venv/lib/python3.10/site-packages/numba/tests/test_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..727abb2775c76415e7e8df04b50b0e0c83511d60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_profiler.py @@ -0,0 +1,142 @@ +import cProfile as profiler +import os +import pstats +import subprocess +import sys + +import numpy as np + +from numba import jit, types +from numba.tests.support import needs_blas +import unittest + + +def generate_standard_dot_case(): + + @jit((types.float32[::1], types.float32[::1],)) + def dot(a, b): + sum = 0 + for i in range(len(a)): + sum += a[i]*b[i] + return sum + + dot._enable_sysmon = True + return dot, dot + + +def generate_raising_dot_case(): + + @jit((types.float32[::1], types.float32[::1],)) + def raising_dot(a, b): + # this is like dot above, it does all the work, but the raises + sum = 0 + for i in range(len(a)): + sum += a[i]*b[i] + raise ValueError("problem with dot") + + raising_dot._enable_sysmon = True + + def call_raising_dot(a, b): + try: + raising_dot(a, b) + except ValueError: + pass + + return raising_dot, call_raising_dot + + +def np_dot(a, b): + return np.dot(a, b) + + +class TestProfiler(unittest.TestCase): + + def check_profiler_dot(self, caller, cfunc): + """ + Make sure the jit-compiled function shows up in the profile stats + as a regular Python function. + """ + a = np.arange(16, dtype=np.float32) + b = np.arange(16, dtype=np.float32) + n_calls = 123 + p = profiler.Profile() + p.enable() + try: + for _ in range(n_calls): + caller(a, b) + finally: + p.disable() + stats = pstats.Stats(p).strip_dirs() + + def check_stats_for_key(stats, code, n_calls): + expected_key = (os.path.basename(code.co_filename), + code.co_firstlineno, + code.co_name, + ) + # check the key is in the stats + self.assertIn(expected_key, stats.stats) + # check that call for the key has been made `n_calls` times. + func_stats = stats.stats[expected_key] + self.assertEqual(func_stats[:2], (n_calls, n_calls)) + + # check the JIT compiled function + check_stats_for_key(stats, cfunc.py_func.__code__, n_calls) + + # check the caller if it's not the same as the cfunc + if caller is not cfunc: + check_stats_for_key(stats, caller.__code__, n_calls) + + def test_profiler(self): + dot, _ = generate_standard_dot_case() + self.check_profiler_dot(dot, dot) + + def test_profiler_for_raising_function(self): + raising_dot, call_raising_dot = generate_raising_dot_case() + self.check_profiler_dot(call_raising_dot, raising_dot) + + @needs_blas + def test_profiler_np_dot(self): + # Issue #1786: initializing BLAS would crash when profiling + code = """if 1: + import cProfile as profiler + + import numpy as np + + from numba import jit + from numba.tests.test_profiler import np_dot + + cfunc = jit(nopython=True)(np_dot) + + a = np.arange(16, dtype=np.float32) + b = np.arange(16, dtype=np.float32) + + p = profiler.Profile() + p.enable() + cfunc(a, b) + cfunc(a, b) + p.disable() + """ + subprocess.check_call([sys.executable, "-c", code]) + + def test_issue_3229(self): + # Issue #3229: Seemingly random segfaults when profiling due to + # frame injection. + # numba.tests.npyufunc.test_dufunc.TestDUFunc.test_npm_call is the + # first test case crashing when profiling. Fingers crossed fixing + # this is sufficient proof for the general case. + + code = """if 1: + import cProfile as profiler + p = profiler.Profile() + p.enable() + + from numba.tests.npyufunc.test_dufunc import TestDUFunc + t = TestDUFunc('test_npm_call') + t.test_npm_call() + + p.disable() + """ + subprocess.check_call([sys.executable, "-c", code]) + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_pycc.py b/venv/lib/python3.10/site-packages/numba/tests/test_pycc.py new file mode 100644 index 0000000000000000000000000000000000000000..4541b3c10ae5e0c7412040e30a9ca25cb0f2346c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_pycc.py @@ -0,0 +1,361 @@ +import contextlib +import importlib +import os +import shutil +import subprocess +import sys +import tempfile +from unittest import skip +from ctypes import * + +import numpy as np + +import llvmlite.binding as ll + +from numba.core import utils +from numba.tests.support import (TestCase, tag, import_dynamic, temp_directory, + has_blas, needs_setuptools, skip_if_py313_on_windows) + +import unittest + + +_skip_reason = 'windows only' +_windows_only = unittest.skipIf(not sys.platform.startswith('win'), + _skip_reason) + + +base_path = os.path.dirname(os.path.abspath(__file__)) + + +def unset_macosx_deployment_target(): + """Unset MACOSX_DEPLOYMENT_TARGET because we are not building portable + libraries + """ + if 'MACOSX_DEPLOYMENT_TARGET' in os.environ: + del os.environ['MACOSX_DEPLOYMENT_TARGET'] + + +@needs_setuptools +class TestCompilerChecks(TestCase): + + # NOTE: THIS TEST MUST ALWAYS RUN ON WINDOWS, DO NOT SKIP + @_windows_only + def test_windows_compiler_validity(self): + # When inside conda-build VSINSTALLDIR should be set and windows should + # have a valid compiler available, `external_compiler_works()` should + # agree with this. If this is not the case then error out to alert devs. + + # This is a local import to avoid deprecation warnings being generated + # through the use of the numba.pycc module. + from numba.pycc.platform import external_compiler_works + is_running_conda_build = os.environ.get('CONDA_BUILD', None) is not None + if is_running_conda_build: + if os.environ.get('VSINSTALLDIR', None) is not None: + self.assertTrue(external_compiler_works()) + + +class BasePYCCTest(TestCase): + + def setUp(self): + unset_macosx_deployment_target() + + self.tmpdir = temp_directory('test_pycc') + # Make sure temporary files and directories created by + # distutils don't clutter the top-level /tmp + tempfile.tempdir = self.tmpdir + + def tearDown(self): + tempfile.tempdir = None + # Since we're executing the module-under-test several times + # from the same process, we must clear the exports registry + # between invocations. + + # This is a local import to avoid deprecation warnings being generated + # through the use of the numba.pycc module. + from numba.pycc.decorators import clear_export_registry + clear_export_registry() + + @contextlib.contextmanager + def check_c_ext(self, extdir, name): + sys.path.append(extdir) + try: + lib = import_dynamic(name) + yield lib + finally: + sys.path.remove(extdir) + sys.modules.pop(name, None) + + +@needs_setuptools +@skip_if_py313_on_windows +class TestCC(BasePYCCTest): + + def setUp(self): + super(TestCC, self).setUp() + self.skip_if_no_external_compiler() # external compiler needed + from numba.tests import compile_with_pycc + self._test_module = compile_with_pycc + importlib.reload(self._test_module) + + @contextlib.contextmanager + def check_cc_compiled(self, cc): + #cc.verbose = True + cc.output_dir = self.tmpdir + cc.compile() + + with self.check_c_ext(self.tmpdir, cc.name) as lib: + yield lib + + def check_cc_compiled_in_subprocess(self, lib, code): + prolog = """if 1: + import sys + import types + # to disable numba package + sys.modules['numba'] = types.ModuleType('numba') + try: + from numba import njit + except ImportError: + pass + else: + raise RuntimeError('cannot disable numba package') + + sys.path.insert(0, %(path)r) + import %(name)s as lib + """ % {'name': lib.__name__, + 'path': os.path.dirname(lib.__file__)} + code = prolog.strip(' ') + code + subprocess.check_call([sys.executable, '-c', code]) + + def test_cc_properties(self): + cc = self._test_module.cc + self.assertEqual(cc.name, 'pycc_test_simple') + + # Inferred output directory + d = self._test_module.cc.output_dir + self.assertTrue(os.path.isdir(d), d) + + # Inferred output filename + f = self._test_module.cc.output_file + self.assertFalse(os.path.exists(f), f) + self.assertTrue(os.path.basename(f).startswith('pycc_test_simple.'), f) + if sys.platform.startswith('linux'): + self.assertTrue(f.endswith('.so'), f) + # This is a local import to avoid deprecation warnings being + # generated through the use of the numba.pycc module. + from numba.pycc.platform import find_pyext_ending + self.assertIn(find_pyext_ending(), f) + + def test_compile(self): + with self.check_cc_compiled(self._test_module.cc) as lib: + res = lib.multi(123, 321) + self.assertPreciseEqual(res, 123 * 321) + res = lib.multf(987, 321) + self.assertPreciseEqual(res, 987.0 * 321.0) + res = lib.square(5) + self.assertPreciseEqual(res, 25) + self.assertIs(lib.get_none(), None) + with self.assertRaises(ZeroDivisionError): + lib.div(1, 0) + + def check_compile_for_cpu(self, cpu_name): + cc = self._test_module.cc + cc.target_cpu = cpu_name + + with self.check_cc_compiled(cc) as lib: + res = lib.multi(123, 321) + self.assertPreciseEqual(res, 123 * 321) + self.assertEqual(lib.multi.__module__, 'pycc_test_simple') + + def test_compile_for_cpu(self): + # Compiling for the host CPU should always succeed + self.check_compile_for_cpu(ll.get_host_cpu_name()) + + def test_compile_for_cpu_host(self): + # Compiling for the host CPU should always succeed + self.check_compile_for_cpu("host") + + def test_compile_helperlib(self): + with self.check_cc_compiled(self._test_module.cc_helperlib) as lib: + res = lib.power(2, 7) + self.assertPreciseEqual(res, 128) + for val in (-1, -1 + 0j, np.complex128(-1)): + res = lib.sqrt(val) + self.assertPreciseEqual(res, 1j) + for val in (4, 4.0, np.float64(4)): + res = lib.np_sqrt(val) + self.assertPreciseEqual(res, 2.0) + res = lib.spacing(1.0) + self.assertPreciseEqual(res, 2**-52) + # Implicit seeding at startup should guarantee a non-pathological + # start state. + self.assertNotEqual(lib.random(-1), lib.random(-1)) + res = lib.random(42) + expected = np.random.RandomState(42).random_sample() + self.assertPreciseEqual(res, expected) + res = lib.size(np.float64([0] * 3)) + self.assertPreciseEqual(res, 3) + + code = """if 1: + from numpy.testing import assert_equal, assert_allclose + res = lib.power(2, 7) + assert res == 128 + res = lib.random(42) + assert_allclose(res, %(expected)s) + res = lib.spacing(1.0) + assert_allclose(res, 2**-52) + """ % {'expected': expected} + self.check_cc_compiled_in_subprocess(lib, code) + + def test_compile_nrt(self): + with self.check_cc_compiled(self._test_module.cc_nrt) as lib: + # Sanity check + self.assertPreciseEqual(lib.zero_scalar(1), 0.0) + res = lib.zeros(3) + self.assertEqual(list(res), [0, 0, 0]) + if has_blas: + res = lib.vector_dot(4) + self.assertPreciseEqual(res, 30.0) + # test argsort + val = np.float64([2., 5., 1., 3., 4.]) + res = lib.np_argsort(val) + expected = np.argsort(val) + self.assertPreciseEqual(res, expected) + + code = """if 1: + from numpy.testing import assert_equal + from numpy import float64, argsort + res = lib.zero_scalar(1) + assert res == 0.0 + res = lib.zeros(3) + assert list(res) == [0, 0, 0] + if %(has_blas)s: + res = lib.vector_dot(4) + assert res == 30.0 + val = float64([2., 5., 1., 3., 4.]) + res = lib.np_argsort(val) + expected = argsort(val) + assert_equal(res, expected) + """ % dict(has_blas=has_blas) + self.check_cc_compiled_in_subprocess(lib, code) + + def test_hashing(self): + with self.check_cc_compiled(self._test_module.cc_nrt) as lib: + res = lib.hash_literal_str_A() + self.assertPreciseEqual(res, hash("A")) + res = lib.hash_str("A") + self.assertPreciseEqual(res, hash("A")) + + code = """if 1: + from numpy.testing import assert_equal + res = lib.hash_literal_str_A() + assert_equal(res, hash("A")) + res = lib.hash_str("A") + assert_equal(res, hash("A")) + """ + self.check_cc_compiled_in_subprocess(lib, code) + + def test_c_extension_usecase(self): + # Test C-extensions + with self.check_cc_compiled(self._test_module.cc_nrt) as lib: + arr = np.arange(128, dtype=np.intp) + got = lib.dict_usecase(arr) + expect = arr * arr + self.assertPreciseEqual(got, expect) + + +@needs_setuptools +@skip_if_py313_on_windows +class TestDistutilsSupport(TestCase): + + def setUp(self): + super().setUp() + self.skip_if_no_external_compiler() # external compiler needed + + unset_macosx_deployment_target() + + # Copy the test project into a temp directory to avoid + # keeping any build leftovers in the source tree + self.tmpdir = temp_directory('test_pycc_distutils') + source_dir = os.path.join(base_path, 'pycc_distutils_usecase') + self.usecase_dir = os.path.join(self.tmpdir, 'work') + shutil.copytree(source_dir, self.usecase_dir) + + def check_setup_py(self, setup_py_file): + # Compute PYTHONPATH to ensure the child processes see this Numba + import numba + numba_path = os.path.abspath(os.path.dirname( + os.path.dirname(numba.__file__))) + env = dict(os.environ) + if env.get('PYTHONPATH', ''): + env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH'] + else: + env['PYTHONPATH'] = numba_path + + def run_python(args): + p = subprocess.Popen([sys.executable] + args, + cwd=self.usecase_dir, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + env=env) + out, _ = p.communicate() + rc = p.wait() + if rc != 0: + self.fail("python failed with the following output:\n%s" + % out.decode('utf-8', 'ignore')) + + run_python([setup_py_file, "build_ext", "--inplace"]) + code = """if 1: + import pycc_compiled_module as lib + assert lib.get_const() == 42 + res = lib.ones(3) + assert list(res) == [1.0, 1.0, 1.0] + """ + run_python(["-c", code]) + + def check_setup_nested_py(self, setup_py_file): + # Compute PYTHONPATH to ensure the child processes see this Numba + import numba + numba_path = os.path.abspath(os.path.dirname( + os.path.dirname(numba.__file__))) + env = dict(os.environ) + if env.get('PYTHONPATH', ''): + env['PYTHONPATH'] = numba_path + os.pathsep + env['PYTHONPATH'] + else: + env['PYTHONPATH'] = numba_path + + def run_python(args): + p = subprocess.Popen([sys.executable] + args, + cwd=self.usecase_dir, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + env=env) + out, _ = p.communicate() + rc = p.wait() + if rc != 0: + self.fail("python failed with the following output:\n%s" + % out.decode('utf-8', 'ignore')) + + run_python([setup_py_file, "build_ext", "--inplace"]) + code = """if 1: + import nested.pycc_compiled_module as lib + assert lib.get_const() == 42 + res = lib.ones(3) + assert list(res) == [1.0, 1.0, 1.0] + """ + run_python(["-c", code]) + + def test_setup_py_distutils(self): + self.check_setup_py("setup_distutils.py") + + def test_setup_py_distutils_nested(self): + self.check_setup_nested_py("setup_distutils_nested.py") + + def test_setup_py_setuptools(self): + self.check_setup_py("setup_setuptools.py") + + def test_setup_py_setuptools_nested(self): + self.check_setup_nested_py("setup_setuptools_nested.py") + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_python_int.py b/venv/lib/python3.10/site-packages/numba/tests/test_python_int.py new file mode 100644 index 0000000000000000000000000000000000000000..19405ca7f3ae46390e6b364187c679f3d24e6a14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_python_int.py @@ -0,0 +1,51 @@ +import unittest +from numba import jit +from numba.core import types + + +force_pyobj_flags = {'forceobj': True} +no_pyobj_flags = {'nopython': True} + + +def return_int(a, b): + return a + b + + +class TestPythonInt(unittest.TestCase): + + # Issue #474: ints should be returned rather than longs under Python 2, + # as much as possible. + + def test_int_return_type(self, flags=force_pyobj_flags, + int_type=types.int64, operands=(3, 4)): + pyfunc = return_int + cfunc = jit((int_type, int_type), **flags)(pyfunc) + expected = pyfunc(*operands) + got = cfunc(*operands) + self.assertIs(type(got), type(expected)) + self.assertEqual(got, expected) + + def test_int_return_type_npm(self): + self.test_int_return_type(flags=no_pyobj_flags) + + def test_unsigned_int_return_type(self, flags=force_pyobj_flags): + self.test_int_return_type(int_type=types.uint64, flags=flags) + + def test_unsigned_int_return_type_npm(self): + self.test_unsigned_int_return_type(flags=no_pyobj_flags) + + def test_long_int_return_type(self, flags=force_pyobj_flags): + # Same but returning a 64-bit integer. The return type should be + # `int` on 64-bit builds, `long` on 32-bit ones (or Windows). + self.test_int_return_type(flags=flags, operands=(2**33, 2**40)) + + def test_long_int_return_type_npm(self): + self.test_long_int_return_type(flags=no_pyobj_flags) + + def test_longer_int_return_type(self, flags=force_pyobj_flags): + # This won't be supported in nopython mode. + self.test_int_return_type(flags=flags, operands=(2**70, 2**75)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_pythonapi.py b/venv/lib/python3.10/site-packages/numba/tests/test_pythonapi.py new file mode 100644 index 0000000000000000000000000000000000000000..297b6e3394473edf896dcb427ebf2c494fc772f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_pythonapi.py @@ -0,0 +1,123 @@ +import ctypes +import unittest +from numba.core import types +from numba.core.extending import intrinsic +from numba import jit, njit +from numba.tests.support import captured_stdout + + +@intrinsic +def _pyapi_bytes_as_string(typingctx, csrc, size): + sig = types.voidptr(csrc, size) # cstring == void* + + def codegen(context, builder, sig, args): + [csrc, size] = args + api = context.get_python_api(builder) + b = api.bytes_from_string_and_size(csrc, size) + return api.bytes_as_string(b) + return sig, codegen + + +def PyBytes_AsString(uni): + # test_PyBytes_AsString will call this function with a unicode type. + # We then use the underlying buffer to create a PyBytes object and call the + # PyBytes_AsString function with PyBytes object as argument + return _pyapi_bytes_as_string(uni._data, uni._length) + + +@intrinsic +def _pyapi_bytes_as_string_and_size(typingctx, csrc, size): + # return a tuple containing the c-string and size + retty = types.Tuple.from_types((csrc, size)) + sig = retty(csrc, size) + + def codegen(context, builder, sig, args): + [csrc, size] = args + pyapi = context.get_python_api(builder) + b = pyapi.bytes_from_string_and_size(csrc, size) + p_cstr = builder.alloca(pyapi.cstring) + p_size = builder.alloca(pyapi.py_ssize_t) + pyapi.bytes_as_string_and_size(b, p_cstr, p_size) + + cstr = builder.load(p_cstr) + size = builder.load(p_size) + tup = context.make_tuple(builder, sig.return_type, (cstr, size)) + return tup + return sig, codegen + + +def PyBytes_AsStringAndSize(uni): + return _pyapi_bytes_as_string_and_size(uni._data, uni._length) + + +class TestPythonAPI(unittest.TestCase): + + def test_PyBytes_AsString(self): + cfunc = jit(nopython=True)(PyBytes_AsString) + cstr = cfunc('hello') # returns a cstring + + fn = ctypes.pythonapi.PyBytes_FromString + fn.argtypes = [ctypes.c_void_p] + fn.restype = ctypes.py_object + obj = fn(cstr) + + # Use the cstring created from bytes_as_string to create a python + # bytes object + self.assertEqual(obj, b'hello') + + def test_PyBytes_AsStringAndSize(self): + cfunc = jit(nopython=True)(PyBytes_AsStringAndSize) + tup = cfunc('hello\x00world') # returns a tuple: cstring and its size + + fn = ctypes.pythonapi.PyBytes_FromStringAndSize + fn.argtypes = [ctypes.c_void_p, ctypes.c_size_t] + fn.restype = ctypes.py_object + obj = fn(tup[0], tup[1]) + + # Use the cstring created from bytes_from_string_and_size to create + # a python bytes object + self.assertEqual(obj, b'hello\x00world') + + +class PythonAPIEmptyArgs(unittest.TestCase): + def test_empty_args(self): + def callme(**kwargs): + print("callme", kwargs) + + @intrinsic + def py_call(tyctx): + def codegen(context, builder, sig, args): + pyapi = context.get_python_api(builder) + gil = pyapi.gil_ensure() + + num = pyapi.long_from_longlong( + context.get_constant(types.intp, 0xCAFE) + ) + kwds = pyapi.dict_pack({"key": num}.items()) + fn_print = pyapi.unserialize(pyapi.serialize_object(callme)) + # segfault: https://github.com/numba/numba/issues/5871 + res = pyapi.call(fn_print, None, kwds) + + pyapi.decref(res) + pyapi.decref(fn_print) + pyapi.decref(kwds) + pyapi.decref(num) + + pyapi.gil_release(gil) + return res + + return types.none(), codegen + + @njit + def foo(): + py_call() + + with captured_stdout() as out: + foo() + d = {"key": 0xCAFE} + expected = f"callme {d}\n" + self.assertEqual(out.getvalue(), expected) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_random.py b/venv/lib/python3.10/site-packages/numba/tests/test_random.py new file mode 100644 index 0000000000000000000000000000000000000000..8127ef44dff60f4b00c66cbd61dd524d1c7052e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_random.py @@ -0,0 +1,1965 @@ +import collections +import functools +import math +import multiprocessing +import os +import random +import subprocess +import sys +import threading +import itertools +from textwrap import dedent + +import numpy as np + +import unittest + +import numba +from numba import jit, _helperlib, njit +from numba.core import types +from numba.tests.support import TestCase, compile_function, tag +from numba.core.errors import TypingError + + +# State size of the Mersenne Twister +N = 624 + + +def get_py_state_ptr(): + return _helperlib.rnd_get_py_state_ptr() + +def get_np_state_ptr(): + return _helperlib.rnd_get_np_state_ptr() + + +def numpy_randint1(a): + return np.random.randint(a) + +def numpy_randint2(a, b): + return np.random.randint(a, b) + +def random_randint(a, b): + return random.randint(a, b) + +def random_randrange1(a): + return random.randrange(a) + +def random_randrange2(a, b): + return random.randrange(a, b) + +def random_randrange3(a, b, c): + return random.randrange(a, b, c) + +def numpy_choice1(a): + return np.random.choice(a) + +def numpy_choice2(a, size): + return np.random.choice(a, size=size) + +def numpy_choice3(a, size, replace): + return np.random.choice(a, size=size, replace=replace) + +def numpy_multinomial2(n, pvals): + return np.random.multinomial(n, pvals) + +def numpy_multinomial3(n, pvals, size): + return np.random.multinomial(n, pvals=pvals, size=size) + +def numpy_dirichlet(alpha, size): + return np.random.dirichlet(alpha, size=size) + +def numpy_dirichlet_default(alpha): + return np.random.dirichlet(alpha) + +def numpy_noncentral_chisquare(df, nonc, size): + return np.random.noncentral_chisquare(df, nonc, size=size) + +def numpy_noncentral_chisquare_default(df, nonc): + return np.random.noncentral_chisquare(df, nonc) + +def numpy_check_rand(seed, a, b): + np.random.seed(seed) + expected = np.random.random((a, b)) + np.random.seed(seed) + got = np.random.rand(a, b) + return expected, got + +def numpy_check_randn(seed, a, b): + np.random.seed(seed) + expected = np.random.standard_normal((a, b)) + np.random.seed(seed) + got = np.random.randn(a, b) + return expected, got + +def jit_with_args(name, argstring): + code = """def func(%(argstring)s): + return %(name)s(%(argstring)s) +""" % locals() + pyfunc = compile_function("func", code, globals()) + return jit(nopython=True)(pyfunc) + +def jit_with_kwargs(name, kwarg_list): + # Similar to jit_with_args, but uses keyword arguments + call_args_with_kwargs = ','.join([f'{kw}={kw}' for kw in kwarg_list]) + signature = ','.join(kwarg_list) + code = f"""def func({signature}): + return {name}({call_args_with_kwargs}) +""" + pyfunc = compile_function("func", code, globals()) + return jit(nopython=True)(pyfunc) + +def jit_nullary(name): + return jit_with_args(name, "") + +def jit_unary(name): + return jit_with_args(name, "a") + +def jit_binary(name): + return jit_with_args(name, "a, b") + +def jit_ternary(name): + return jit_with_args(name, "a, b, c") + + +random_gauss = jit_binary("random.gauss") +random_random = jit_nullary("random.random") +random_seed = jit_unary("random.seed") + +numpy_normal = jit_binary("np.random.normal") +numpy_random = jit_nullary("np.random.random") +numpy_seed = jit_unary("np.random.seed") + + +def _copy_py_state(r, ptr): + """ + Copy state of Python random *r* to Numba state *ptr*. + """ + mt = r.getstate()[1] + ints, index = mt[:-1], mt[-1] + _helperlib.rnd_set_state(ptr, (index, list(ints))) + return ints, index + +def _copy_np_state(r, ptr): + """ + Copy state of Numpy random *r* to Numba state *ptr*. + """ + ints, index = r.get_state()[1:3] + _helperlib.rnd_set_state(ptr, (index, [int(x) for x in ints])) + return ints, index + +def sync_to_numpy(r): + _ver, mt_st, _gauss_next = r.getstate() + mt_pos = mt_st[-1] + mt_ints = mt_st[:-1] + assert len(mt_ints) == 624 + + np_st = ('MT19937', np.array(mt_ints, dtype='uint32'), mt_pos) + if _gauss_next is None: + np_st += (0, 0.0) + else: + np_st += (1, _gauss_next) + + np.random.set_state(np_st) + +# Pure Python equivalents of some of the Numpy distributions, using +# Python's basic generators. + +def py_chisquare(r, df): + return 2.0 * r.gammavariate(df / 2.0, 1.0) + +def py_f(r, num, denom): + return ((py_chisquare(r, num) * denom) / + (py_chisquare(r, denom) * num)) + + +class BaseTest(TestCase): + + def _follow_cpython(self, ptr, seed=2): + r = random.Random(seed) + _copy_py_state(r, ptr) + return r + + def _follow_numpy(self, ptr, seed=2): + r = np.random.RandomState(seed) + _copy_np_state(r, ptr) + return r + + +class TestInternals(BaseTest): + """ + Test low-level internals of the implementation. + """ + + def _check_get_set_state(self, ptr): + state = _helperlib.rnd_get_state(ptr) + i, ints = state + self.assertIsInstance(i, int) + self.assertIsInstance(ints, list) + self.assertEqual(len(ints), N) + j = (i * 100007) % N + ints = [i * 3 for i in range(N)] + # Roundtrip + _helperlib.rnd_set_state(ptr, (j, ints)) + self.assertEqual(_helperlib.rnd_get_state(ptr), (j, ints)) + + def _check_shuffle(self, ptr): + # We test shuffling against CPython + r = random.Random() + ints, index = _copy_py_state(r, ptr) + # Force shuffling in CPython generator + for i in range(index, N + 1, 2): + r.random() + _helperlib.rnd_shuffle(ptr) + # Check new integer keys + mt = r.getstate()[1] + ints, index = mt[:-1], mt[-1] + self.assertEqual(_helperlib.rnd_get_state(ptr)[1], list(ints)) + + def _check_init(self, ptr): + # We use the same integer seeding as Numpy + # (CPython is different: it treats the integer as a byte array) + r = np.random.RandomState() + for i in [0, 1, 125, 2**32 - 5]: + # Need to cast to a C-sized int (for Numpy <= 1.7) + r.seed(np.uint32(i)) + st = r.get_state() + ints = list(st[1]) + index = st[2] + assert index == N # sanity check + _helperlib.rnd_seed(ptr, i) + self.assertEqual(_helperlib.rnd_get_state(ptr), (index, ints)) + + def _check_perturb(self, ptr): + states = [] + for i in range(10): + # Initialize with known state + _helperlib.rnd_seed(ptr, 0) + # Perturb with entropy + _helperlib.rnd_seed(ptr, os.urandom(512)) + states.append(tuple(_helperlib.rnd_get_state(ptr)[1])) + # No two identical states + self.assertEqual(len(set(states)), len(states)) + + def test_get_set_state(self): + self._check_get_set_state(get_py_state_ptr()) + + def test_shuffle(self): + self._check_shuffle(get_py_state_ptr()) + + def test_init(self): + self._check_init(get_py_state_ptr()) + + def test_perturb(self): + self._check_perturb(get_py_state_ptr()) + + +class TestRandom(BaseTest): + + # NOTE: there may be cascading imprecision issues (e.g. between x87-using + # C code and SSE-using LLVM code), which is especially brutal for some + # iterative algorithms with sensitive exit conditions. + # Therefore we stick to hardcoded integers for seed values. + + def _check_random_seed(self, seedfunc, randomfunc): + """ + Check seed()- and random()-like functions. + """ + # Our seed() mimics NumPy's. + r = np.random.RandomState() + for i in [0, 1, 125, 2**32 - 1]: + # Need to cast to a C-sized int (for Numpy <= 1.7) + r.seed(np.uint32(i)) + seedfunc(i) + # Be sure to trigger a reshuffle + for j in range(N + 10): + self.assertPreciseEqual(randomfunc(), r.uniform(0.0, 1.0)) + + def test_random_random(self): + self._check_random_seed(random_seed, random_random) + + def test_numpy_random(self): + self._check_random_seed(numpy_seed, numpy_random) + # Test aliases + self._check_random_seed(numpy_seed, jit_nullary("np.random.random_sample")) + self._check_random_seed(numpy_seed, jit_nullary("np.random.ranf")) + self._check_random_seed(numpy_seed, jit_nullary("np.random.sample")) + self._check_random_seed(numpy_seed, jit_nullary("np.random.rand")) + + def _check_random_sized(self, seedfunc, randomfunc): + # Our seed() mimics NumPy's. + r = np.random.RandomState() + for i in [0, 1, 125, 2**32 - 1]: + # Need to cast to a C-sized int (for Numpy <= 1.7) + r.seed(np.uint32(i)) + seedfunc(i) + for n in range(10): + self.assertPreciseEqual(randomfunc(n), r.uniform(0.0, 1.0, n)) + + def test_numpy_random_sized(self): + self._check_random_sized(numpy_seed, jit_unary("np.random.random_sample")) + self._check_random_sized(numpy_seed, jit_unary("np.random.ranf")) + self._check_random_sized(numpy_seed, jit_unary("np.random.sample")) + self._check_random_sized(numpy_seed, jit_unary("np.random.rand")) + + def test_independent_generators(self): + # PRNGs for Numpy and Python are independent. + N = 10 + random_seed(1) + py_numbers = [random_random() for i in range(N)] + numpy_seed(2) + np_numbers = [numpy_random() for i in range(N)] + random_seed(1) + numpy_seed(2) + pairs = [(random_random(), numpy_random()) for i in range(N)] + self.assertPreciseEqual([p[0] for p in pairs], py_numbers) + self.assertPreciseEqual([p[1] for p in pairs], np_numbers) + + def _check_getrandbits(self, func, ptr): + """ + Check a getrandbits()-like function. + """ + # Our implementation follows CPython's for bits <= 64. + r = self._follow_cpython(ptr) + for nbits in range(1, 65): + expected = r.getrandbits(nbits) + got = func(nbits) + self.assertPreciseEqual(expected, got) + self.assertRaises(OverflowError, func, 65) + self.assertRaises(OverflowError, func, 9999999) + self.assertRaises(OverflowError, func, -1) + + def test_random_getrandbits(self): + self._check_getrandbits(jit_unary("random.getrandbits"), get_py_state_ptr()) + + # Explanation for the large ulps value: on 32-bit platforms, our + # LLVM-compiled functions use SSE but they are compared against + # C functions which use x87. + # On some distributions, the errors seem to accumulate dramatically. + + def _check_dist(self, func, pyfunc, argslist, niters=3, + prec='double', ulps=12, pydtype=None): + assert len(argslist) + for args in argslist: + results = [func(*args) for i in range(niters)] + pyresults = [(pyfunc(*args, dtype=pydtype) if pydtype else pyfunc(*args)) + for i in range(niters)] + self.assertPreciseEqual(results, pyresults, prec=prec, ulps=ulps, + msg="for arguments %s" % (args,)) + + def _check_dist_kwargs(self, func, pyfunc, kwargslist, niters=3, + prec='double', ulps=12, pydtype=None): + assert len(kwargslist) + for kwargs in kwargslist: + results = [func(**kwargs) for i in range(niters)] + pyresults = [(pyfunc(**kwargs, dtype=pydtype) if pydtype else pyfunc(**kwargs)) + for i in range(niters)] + self.assertPreciseEqual(results, pyresults, prec=prec, ulps=ulps, + msg="for arguments %s" % (kwargs,)) + + def _check_gauss(self, func2, func1, func0, ptr): + """ + Check a gauss()-like function. + """ + # Our implementation follows Numpy's. + r = self._follow_numpy(ptr) + if func2 is not None: + self._check_dist(func2, r.normal, + [(1.0, 1.0), (2.0, 0.5), (-2.0, 0.5)], + niters=N // 2 + 10) + if func1 is not None: + self._check_dist(func1, r.normal, [(0.5,)]) + if func0 is not None: + self._check_dist(func0, r.normal, [()]) + + def test_random_gauss(self): + self._check_gauss(jit_binary("random.gauss"), None, None, get_py_state_ptr()) + + def test_random_normalvariate(self): + # normalvariate() is really an alias to gauss() in Numba + # (not in Python, though - they use different algorithms) + self._check_gauss(jit_binary("random.normalvariate"), None, None, + get_py_state_ptr()) + + def test_numpy_normal(self): + self._check_gauss(jit_binary("np.random.normal"), + jit_unary("np.random.normal"), + jit_nullary("np.random.normal"), + get_np_state_ptr()) + + def test_numpy_standard_normal(self): + self._check_gauss(None, None, jit_nullary("np.random.standard_normal"), + get_np_state_ptr()) + + def test_numpy_randn(self): + self._check_gauss(None, None, jit_nullary("np.random.randn"), + get_np_state_ptr()) + + def _check_lognormvariate(self, func2, func1, func0, ptr): + """ + Check a lognormvariate()-like function. + """ + # Our implementation follows Numpy's. + r = self._follow_numpy(ptr) + if func2 is not None: + self._check_dist(func2, r.lognormal, + [(1.0, 1.0), (2.0, 0.5), (-2.0, 0.5)], + niters=N // 2 + 10) + if func1 is not None: + self._check_dist(func1, r.lognormal, [(0.5,)]) + if func0 is not None: + self._check_dist(func0, r.lognormal, [()]) + + def test_random_lognormvariate(self): + self._check_lognormvariate(jit_binary("random.lognormvariate"), + None, None, get_py_state_ptr()) + + def test_numpy_lognormal(self): + self._check_lognormvariate(jit_binary("np.random.lognormal"), + jit_unary("np.random.lognormal"), + jit_nullary("np.random.lognormal"), + get_np_state_ptr()) + + def _check_randrange(self, func1, func2, func3, ptr, max_width, is_numpy, tp=None): + """ + Check a randrange()-like function. + """ + # Sanity check + ints = [] + for i in range(10): + ints.append(func1(500000000)) + ints.append(func2(5, 500000000)) + if func3 is not None: + ints.append(func3(5, 500000000, 3)) + if is_numpy: + rr = self._follow_numpy(ptr).randint + else: + rr = self._follow_cpython(ptr).randrange + widths = [w for w in [1, 5, 8, 5000, 2**40, 2**62 + 2**61] if w < max_width] + pydtype = tp if is_numpy else None + for width in widths: + self._check_dist(func1, rr, [(width,)], niters=10, + pydtype=pydtype) + self._check_dist(func2, rr, [(-2, 2 +width)], niters=10, + pydtype=pydtype) + if func3 is not None: + self.assertPreciseEqual(func3(-2, 2 + width, 6), + rr(-2, 2 + width, 6)) + self.assertPreciseEqual(func3(2 + width, 2, -3), + rr(2 + width, 2, -3)) + # Empty ranges + self.assertRaises(ValueError, func1, 0) + self.assertRaises(ValueError, func1, -5) + self.assertRaises(ValueError, func2, 5, 5) + self.assertRaises(ValueError, func2, 5, 2) + if func3 is not None: + self.assertRaises(ValueError, func3, 5, 7, -1) + self.assertRaises(ValueError, func3, 7, 5, 1) + + def test_random_randrange(self): + for tp, max_width in [(types.int64, 2**63), (types.int32, 2**31)]: + cf1 = njit((tp,))(random_randrange1) + cf2 = njit((tp, tp,),)(random_randrange2) + cf3 = njit((tp, tp, tp,))(random_randrange3) + self._check_randrange(cf1, cf2, cf3, get_py_state_ptr(), + max_width, False) + + def test_numpy_randint(self): + for tp, np_tp, max_width in [(types.int64, np.int64, 2**63), + (types.int32, np.int32, 2**31)]: + cf1 = njit((tp,))(numpy_randint1) + cf2 = njit((tp, tp,))(numpy_randint2) + self._check_randrange(cf1, cf2, None, get_np_state_ptr(), max_width, + True, np_tp) + + def _check_randint(self, func, ptr, max_width): + """ + Check a randint()-like function. + """ + # Sanity check + ints = [] + for i in range(10): + ints.append(func(5, 500000000)) + self.assertEqual(len(ints), len(set(ints)), ints) + + r = self._follow_cpython(ptr) + for args in [(1, 5), (13, 5000), (20, 2**62 + 2**61)]: + if args[1] > max_width: + continue + self._check_dist(func, r.randint, [args], niters=10) + # Empty ranges + self.assertRaises(ValueError, func, 5, 4) + self.assertRaises(ValueError, func, 5, 2) + + def test_random_randint(self): + for tp, max_width in [(types.int64, 2**63), (types.int32, 2**31)]: + cf = njit((tp, tp,))(random_randint) + self._check_randint(cf, get_py_state_ptr(), max_width) + + def _check_uniform(self, func, ptr): + """ + Check a uniform()-like function. + """ + # Our implementation follows Python's. + r = self._follow_cpython(ptr) + self._check_dist(func, r.uniform, + [(1.5, 1e6), (-2.5, 1e3), (1.5, -2.5)]) + + def _check_any_distrib_kwargs(self, func, ptr, distrib, paramlist): + """ + Check any numpy distribution function. Does Numba use the same keyword + argument names as Numpy? + And given a fixed seed, do they both return the same samples? + """ + # Our implementation follows Numpy's (not Python's) + r = self._follow_numpy(ptr) + distrib_method_of_numpy = getattr(r, distrib) + self._check_dist_kwargs(func, distrib_method_of_numpy, paramlist) + + + def test_random_uniform(self): + self._check_uniform(jit_binary("random.uniform"), get_py_state_ptr()) + + def test_numpy_uniform(self): + self._check_uniform(jit_binary("np.random.uniform"), get_np_state_ptr()) + + def test_numpy_uniform_kwargs(self): + self._check_any_distrib_kwargs( + jit_with_kwargs("np.random.uniform", ['low', 'high']), + get_np_state_ptr(), + 'uniform', + paramlist=[{'low': 1.5, 'high': 1e6}, + {'low': -2.5, 'high': 1e3}, + {'low': 1.5, 'high': -2.5}]) + + def test_numpy_uniform_empty_size(self): + self._check_any_distrib_kwargs( + jit_with_kwargs("np.random.uniform", ['low', 'high', 'size']), + get_np_state_ptr(), + 'uniform', + paramlist=[{'low': 1.5, 'high': 1e6, 'size': ()}, + {'low': -2.5, 'high': 1e3, 'size': ()}, + {'low': 1.5, 'high': -2.5, 'size': ()}, + {'low': 1.5, 'high': 1e6, 'size': None}, + {'low': -2.5, 'high': 1e3, 'size': None}, + {'low': 1.5, 'high': -2.5, 'size': None},]) + + def _check_triangular(self, func2, func3, ptr): + """ + Check a triangular()-like function. + """ + # Our implementation follows Python's. + r = self._follow_cpython(ptr) + if func2 is not None: + self._check_dist(func2, r.triangular, + [(1.5, 3.5), (-2.5, 1.5), (1.5, 1.5)]) + self._check_dist(func3, r.triangular, [(1.5, 3.5, 2.2)]) + + def test_random_triangular(self): + self._check_triangular(jit_binary("random.triangular"), + jit_ternary("random.triangular"), + get_py_state_ptr()) + + def test_numpy_triangular(self): + triangular = jit_ternary("np.random.triangular") + fixed_triangular = lambda l, r, m: triangular(l, m, r) + self._check_triangular(None, fixed_triangular, get_np_state_ptr()) + + def _check_gammavariate(self, func2, func1, ptr): + """ + Check a gammavariate()-like function. + """ + # Our implementation follows Python's. + r = self._follow_cpython(ptr) + if func2 is not None: + self._check_dist(func2, r.gammavariate, + [(0.5, 2.5), (1.0, 1.5), (1.5, 3.5)]) + if func1 is not None: + self.assertPreciseEqual(func1(1.5), r.gammavariate(1.5, 1.0)) + # Invalid inputs + if func2 is not None: + self.assertRaises(ValueError, func2, 0.0, 1.0) + self.assertRaises(ValueError, func2, 1.0, 0.0) + self.assertRaises(ValueError, func2, -0.5, 1.0) + self.assertRaises(ValueError, func2, 1.0, -0.5) + if func1 is not None: + self.assertRaises(ValueError, func1, 0.0) + self.assertRaises(ValueError, func1, -0.5) + + def test_random_gammavariate(self): + self._check_gammavariate(jit_binary("random.gammavariate"), None, + get_py_state_ptr()) + + def test_numpy_gamma(self): + self._check_gammavariate(jit_binary("np.random.gamma"), + jit_unary("np.random.gamma"), + get_np_state_ptr()) + self._check_gammavariate(None, + jit_unary("np.random.standard_gamma"), + get_np_state_ptr()) + + def _check_betavariate(self, func, ptr): + """ + Check a betavariate()-like function. + """ + # Our implementation follows Python's. + r = self._follow_cpython(ptr) + self._check_dist(func, r.betavariate, [(0.5, 2.5)]) + # Invalid inputs + self.assertRaises(ValueError, func, 0.0, 1.0) + self.assertRaises(ValueError, func, 1.0, 0.0) + self.assertRaises(ValueError, func, -0.5, 1.0) + self.assertRaises(ValueError, func, 1.0, -0.5) + + def test_random_betavariate(self): + self._check_betavariate(jit_binary("random.betavariate"), get_py_state_ptr()) + + def test_numpy_beta(self): + self._check_betavariate(jit_binary("np.random.beta"), get_np_state_ptr()) + + def _check_vonmisesvariate(self, func, ptr): + """ + Check a vonmisesvariate()-like function. + """ + r = self._follow_cpython(ptr) + self._check_dist(func, r.vonmisesvariate, [(0.5, 2.5)]) + + def test_random_vonmisesvariate(self): + self._check_vonmisesvariate(jit_binary("random.vonmisesvariate"), + get_py_state_ptr()) + + def test_numpy_vonmises(self): + self._check_vonmisesvariate(jit_binary("np.random.vonmises"), + get_np_state_ptr()) + + def _check_expovariate(self, func, ptr): + """ + Check a expovariate()-like function. Note the second argument + is inversed compared to np.random.exponential(). + """ + r = self._follow_numpy(ptr) + for lambd in (0.2, 0.5, 1.5): + for i in range(3): + self.assertPreciseEqual(func(lambd), r.exponential(1 / lambd), + prec='double') + + def test_random_expovariate(self): + self._check_expovariate(jit_unary("random.expovariate"), get_py_state_ptr()) + + def _check_exponential(self, func1, func0, ptr): + """ + Check a exponential()-like function. + """ + r = self._follow_numpy(ptr) + if func1 is not None: + self._check_dist(func1, r.exponential, [(0.5,), (1.0,), (1.5,)]) + if func0 is not None: + self._check_dist(func0, r.exponential, [()]) + + def test_numpy_exponential(self): + self._check_exponential(jit_unary("np.random.exponential"), + jit_nullary("np.random.exponential"), + get_np_state_ptr()) + + def test_numpy_standard_exponential(self): + self._check_exponential(None, + jit_nullary("np.random.standard_exponential"), + get_np_state_ptr()) + + def _check_paretovariate(self, func, ptr): + """ + Check a paretovariate()-like function. + """ + # Our implementation follows Python's. + r = self._follow_cpython(ptr) + self._check_dist(func, r.paretovariate, [(0.5,), (3.5,)]) + + def test_random_paretovariate(self): + self._check_paretovariate(jit_unary("random.paretovariate"), get_py_state_ptr()) + + def test_numpy_pareto(self): + pareto = jit_unary("np.random.pareto") + fixed_pareto = lambda a: pareto(a) + 1.0 + self._check_paretovariate(fixed_pareto, get_np_state_ptr()) + + def _check_weibullvariate(self, func2, func1, ptr): + """ + Check a weibullvariate()-like function. + """ + # Our implementation follows Python's. + r = self._follow_cpython(ptr) + if func2 is not None: + self._check_dist(func2, r.weibullvariate, [(0.5, 2.5)]) + if func1 is not None: + for i in range(3): + self.assertPreciseEqual(func1(2.5), + r.weibullvariate(1.0, 2.5)) + + def test_random_weibullvariate(self): + self._check_weibullvariate(jit_binary("random.weibullvariate"), + None, get_py_state_ptr()) + + def test_numpy_weibull(self): + self._check_weibullvariate(None, jit_unary("np.random.weibull"), + get_np_state_ptr()) + + def test_numpy_binomial(self): + # We follow Numpy's algorithm up to n*p == 30 + binomial = jit_binary("np.random.binomial") + r = self._follow_numpy(get_np_state_ptr(), 0) + self._check_dist(binomial, r.binomial, [(18, 0.25)]) + # Sanity check many values + for n in (100, 1000, 10000): + self.assertEqual(binomial(n, 0.0), 0) + self.assertEqual(binomial(n, 1.0), n) + for p in (0.0001, 0.1, 0.4, 0.49999, 0.5, 0.50001, 0.8, 0.9, 0.9999): + r = binomial(n, p) + if p > 0.5: + r = n - r + p = 1 - p + self.assertGreaterEqual(r, 0) + self.assertLessEqual(r, n) + expected = p * n + tol = 3 * n / math.sqrt(n) + self.assertGreaterEqual(r, expected - tol, (p, n, r)) + self.assertLessEqual(r, expected + tol, (p, n, r)) + # Invalid values + self.assertRaises(ValueError, binomial, -1, 0.5) + self.assertRaises(ValueError, binomial, 10, -0.1) + self.assertRaises(ValueError, binomial, 10, 1.1) + + def test_numpy_chisquare(self): + chisquare = jit_unary("np.random.chisquare") + r = self._follow_cpython(get_np_state_ptr()) + self._check_dist(chisquare, + functools.partial(py_chisquare, r), + [(1.5,), (2.5,)]) + + def test_numpy_f(self): + f = jit_binary("np.random.f") + r = self._follow_cpython(get_np_state_ptr()) + self._check_dist(f, functools.partial(py_f, r), + [(0.5, 1.5), (1.5, 0.8)]) + + def test_numpy_geometric(self): + geom = jit_unary("np.random.geometric") + # p out of domain + self.assertRaises(ValueError, geom, -1.0) + self.assertRaises(ValueError, geom, 0.0) + self.assertRaises(ValueError, geom, 1.001) + # Some basic checks + N = 200 + r = [geom(1.0) for i in range(N)] + self.assertPreciseEqual(r, [1] * N) + r = [geom(0.9) for i in range(N)] + n = r.count(1) + self.assertGreaterEqual(n, N // 2) + self.assertLess(n, N) + self.assertFalse([i for i in r if i > 1000]) # unlikely + r = [geom(0.4) for i in range(N)] + self.assertTrue([i for i in r if i > 4]) # likely + r = [geom(0.01) for i in range(N)] + self.assertTrue([i for i in r if i > 50]) # likely + r = [geom(1e-15) for i in range(N)] + self.assertTrue([i for i in r if i > 2**32]) # likely + + def test_numpy_gumbel(self): + gumbel = jit_binary("np.random.gumbel") + r = self._follow_numpy(get_np_state_ptr()) + self._check_dist(gumbel, r.gumbel, [(0.0, 1.0), (-1.5, 3.5)]) + + def test_numpy_gumbel_kwargs(self): + self._check_any_distrib_kwargs( + jit_with_kwargs("np.random.gumbel", ['loc', 'scale']), + get_np_state_ptr(), + distrib="gumbel", + paramlist=[{'loc': 0.0, 'scale': 1.0}, + {'loc': -1.5, 'scale': 3.5}]) + + + def test_numpy_hypergeometric(self): + # Our implementation follows Numpy's up to nsamples = 10. + hg = jit_ternary("np.random.hypergeometric") + r = self._follow_numpy(get_np_state_ptr()) + self._check_dist(hg, r.hypergeometric, + [(1000, 5000, 10), (5000, 1000, 10)], + niters=30) + # Sanity checks + r = [hg(1000, 1000, 100) for i in range(100)] + self.assertTrue(all(x >= 0 and x <= 100 for x in r), r) + self.assertGreaterEqual(np.mean(r), 40.0) + self.assertLessEqual(np.mean(r), 60.0) + r = [hg(1000, 100000, 100) for i in range(100)] + self.assertTrue(all(x >= 0 and x <= 100 for x in r), r) + self.assertLessEqual(np.mean(r), 10.0) + r = [hg(100000, 1000, 100) for i in range(100)] + self.assertTrue(all(x >= 0 and x <= 100 for x in r), r) + self.assertGreaterEqual(np.mean(r), 90.0) + + def test_numpy_laplace(self): + r = self._follow_numpy(get_np_state_ptr()) + self._check_dist(jit_binary("np.random.laplace"), r.laplace, + [(0.0, 1.0), (-1.5, 3.5)]) + self._check_dist(jit_unary("np.random.laplace"), r.laplace, + [(0.0,), (-1.5,)]) + self._check_dist(jit_nullary("np.random.laplace"), r.laplace, [()]) + + def test_numpy_logistic(self): + r = self._follow_numpy(get_np_state_ptr()) + self._check_dist(jit_binary("np.random.logistic"), r.logistic, + [(0.0, 1.0), (-1.5, 3.5)]) + self._check_dist(jit_unary("np.random.logistic"), r.logistic, + [(0.0,), (-1.5,)]) + self._check_dist(jit_nullary("np.random.logistic"), r.logistic, [()]) + + def test_numpy_logseries(self): + r = self._follow_numpy(get_np_state_ptr()) + logseries = jit_unary("np.random.logseries") + self._check_dist(logseries, r.logseries, + [(0.1,), (0.99,), (0.9999,)], + niters=50) + # Numpy's logseries overflows on 32-bit builds, so instead + # hardcode Numpy's (correct) output on 64-bit builds. + r = self._follow_numpy(get_np_state_ptr(), seed=1) + self.assertEqual([logseries(0.9999999999999) for i in range(10)], + [2022733531, 77296, 30, 52204, 9341294, 703057324, + 413147702918, 1870715907, 16009330, 738]) + self.assertRaises(ValueError, logseries, 0.0) + self.assertRaises(ValueError, logseries, -0.1) + self.assertRaises(ValueError, logseries, 1.1) + + def test_numpy_poisson(self): + r = self._follow_numpy(get_np_state_ptr()) + poisson = jit_unary("np.random.poisson") + # Our implementation follows Numpy's. + self._check_dist(poisson, r.poisson, + [(0.0,), (0.5,), (2.0,), (10.0,), (900.5,)], + niters=50) + self.assertRaises(ValueError, poisson, -0.1) + + def test_numpy_negative_binomial(self): + self._follow_numpy(get_np_state_ptr(), 0) + negbin = jit_binary("np.random.negative_binomial") + self.assertEqual([negbin(10, 0.9) for i in range(10)], + [2, 3, 1, 5, 2, 1, 0, 1, 0, 0]) + self.assertEqual([negbin(10, 0.1) for i in range(10)], + [55, 71, 56, 57, 56, 56, 34, 55, 101, 67]) + self.assertEqual([negbin(1000, 0.1) for i in range(10)], + [9203, 8640, 9081, 9292, 8938, + 9165, 9149, 8774, 8886, 9117]) + m = np.mean([negbin(1000000000, 0.1) + for i in range(50)]) + self.assertGreater(m, 9e9 * 0.99) + self.assertLess(m, 9e9 * 1.01) + self.assertRaises(ValueError, negbin, 0, 0.5) + self.assertRaises(ValueError, negbin, -1, 0.5) + self.assertRaises(ValueError, negbin, 10, -0.1) + self.assertRaises(ValueError, negbin, 10, 1.1) + + def test_numpy_power(self): + r = self._follow_numpy(get_np_state_ptr()) + power = jit_unary("np.random.power") + self._check_dist(power, r.power, + [(0.1,), (0.5,), (0.9,), (6.0,)]) + self.assertRaises(ValueError, power, 0.0) + self.assertRaises(ValueError, power, -0.1) + + def test_numpy_rayleigh(self): + r = self._follow_numpy(get_np_state_ptr()) + rayleigh1 = jit_unary("np.random.rayleigh") + rayleigh0 = jit_nullary("np.random.rayleigh") + self._check_dist(rayleigh1, r.rayleigh, + [(0.1,), (0.8,), (25.,), (1e3,)]) + self._check_dist(rayleigh0, r.rayleigh, [()]) + self.assertRaises(ValueError, rayleigh1, 0.0) + self.assertRaises(ValueError, rayleigh1, -0.1) + + def test_numpy_standard_cauchy(self): + r = self._follow_numpy(get_np_state_ptr()) + cauchy = jit_nullary("np.random.standard_cauchy") + self._check_dist(cauchy, r.standard_cauchy, [()]) + + def test_numpy_standard_t(self): + # We use CPython's algorithm for the gamma dist and numpy's + # for the normal dist. Standard T calls both so we can't check + # against either generator's output. + r = self._follow_cpython(get_np_state_ptr()) + standard_t = jit_unary("np.random.standard_t") + avg = np.mean([standard_t(5) for i in range(5000)]) + # Sanity check + self.assertLess(abs(avg), 0.5) + + def test_numpy_wald(self): + r = self._follow_numpy(get_np_state_ptr()) + wald = jit_binary("np.random.wald") + self._check_dist(wald, r.wald, [(1.0, 1.0), (2.0, 5.0)]) + self.assertRaises(ValueError, wald, 0.0, 1.0) + self.assertRaises(ValueError, wald, -0.1, 1.0) + self.assertRaises(ValueError, wald, 1.0, 0.0) + self.assertRaises(ValueError, wald, 1.0, -0.1) + + def test_numpy_wald_kwargs(self): + numba_version = jit_with_kwargs("np.random.wald", ['mean', 'scale']) + self._check_any_distrib_kwargs(numba_version, + get_np_state_ptr(), + distrib="wald", + paramlist=[{'mean': 1.0, 'scale': 1.0}, + {'mean': 2.0, 'scale': 5.0}]) + self.assertRaises(ValueError, numba_version, 0.0, 1.0) + self.assertRaises(ValueError, numba_version, -0.1, 1.0) + self.assertRaises(ValueError, numba_version, 1.0, 0.0) + self.assertRaises(ValueError, numba_version, 1.0, -0.1) + + def test_numpy_zipf(self): + r = self._follow_numpy(get_np_state_ptr()) + zipf = jit_unary("np.random.zipf") + self._check_dist(zipf, r.zipf, [(1.5,), (2.5,)], niters=100) + for val in (1.0, 0.5, 0.0, -0.1): + self.assertRaises(ValueError, zipf, val) + + def _check_shuffle(self, func, ptr, is_numpy): + """ + Check a shuffle()-like function for arrays. + """ + arrs = [np.arange(20), np.arange(32).reshape((8, 4))] + if is_numpy: + r = self._follow_numpy(ptr) + else: + r = self._follow_cpython(ptr) + for a in arrs: + for i in range(3): + got = a.copy() + expected = a.copy() + func(got) + if is_numpy or len(a.shape) == 1: + r.shuffle(expected) + self.assertPreciseEqual(got, expected) + # Test with an arbitrary buffer-providing object + a = arrs[0] + b = a.copy() + func(memoryview(b)) + self.assertNotEqual(list(a), list(b)) + self.assertEqual(sorted(a), sorted(b)) + # Read-only object + with self.assertTypingError(): + func(memoryview(b"xyz")) + + def test_random_shuffle(self): + self._check_shuffle(jit_unary("random.shuffle"), get_py_state_ptr(), False) + + def test_numpy_shuffle(self): + self._check_shuffle(jit_unary("np.random.shuffle"), get_np_state_ptr(), True) + + def _check_startup_randomness(self, func_name, func_args): + """ + Check that the state is properly randomized at startup. + """ + code = """if 1: + from numba.tests import test_random + func = getattr(test_random, %(func_name)r) + print(func(*%(func_args)r)) + """ % (locals()) + numbers = set() + for i in range(3): + popen = subprocess.Popen([sys.executable, "-c", code], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = popen.communicate() + if popen.returncode != 0: + raise AssertionError("process failed with code %s: stderr follows\n%s\n" + % (popen.returncode, err.decode())) + numbers.add(float(out.strip())) + self.assertEqual(len(numbers), 3, numbers) + + def test_random_random_startup(self): + self._check_startup_randomness("random_random", ()) + + def test_random_gauss_startup(self): + self._check_startup_randomness("random_gauss", (1.0, 1.0)) + + def test_numpy_random_startup(self): + self._check_startup_randomness("numpy_random", ()) + + def test_numpy_gauss_startup(self): + self._check_startup_randomness("numpy_normal", (1.0, 1.0)) + + def test_numpy_random_permutation(self): + func = jit_unary("np.random.permutation") + r = self._follow_numpy(get_np_state_ptr()) + for s in [5, 10, 15, 20]: + a = np.arange(s) + b = a.copy() + # Test array version + self.assertPreciseEqual(func(a), r.permutation(a)) + # Test int version + self.assertPreciseEqual(func(s), r.permutation(s)) + # Permutation should not modify its argument + self.assertPreciseEqual(a, b) + # Check multi-dimensional arrays + arrs = [np.arange(10).reshape(2, 5), + np.arange(27).reshape(3, 3, 3), + np.arange(36).reshape(2, 3, 3, 2)] + for a in arrs: + b = a.copy() + self.assertPreciseEqual(func(a), r.permutation(a)) + self.assertPreciseEqual(a, b) + + +class TestRandomArrays(BaseTest): + """ + Test array-producing variants of np.random.* functions. + """ + + def _compile_array_dist(self, funcname, nargs): + qualname = "np.random.%s" % (funcname,) + argstring = ', '.join('abcd'[:nargs]) + return jit_with_args(qualname, argstring) + + def _check_array_dist(self, funcname, scalar_args): + """ + Check returning an array according to a given distribution. + """ + cfunc = self._compile_array_dist(funcname, len(scalar_args) + 1) + r = self._follow_numpy(get_np_state_ptr()) + pyfunc = getattr(r, funcname) + for size in (8, (2, 3)): + args = scalar_args + (size,) + expected = pyfunc(*args) + got = cfunc(*args) + # Numpy may return int32s where we return int64s, adjust + if (expected.dtype == np.dtype('int32') + and got.dtype == np.dtype('int64')): + expected = expected.astype(got.dtype) + self.assertPreciseEqual(expected, got, prec='double', ulps=5) + args = scalar_args + (None,) + expected = pyfunc(*args) + got = cfunc(*args) + self.assertPreciseEqual(expected, got, prec='double', ulps=5) + + def _check_array_dist_gamma(self, funcname, scalar_args, extra_pyfunc_args): + """ + Check returning an array according to a given gamma distribution, + where we use CPython's implementation rather than NumPy's. + """ + cfunc = self._compile_array_dist(funcname, len(scalar_args) + 1) + r = self._follow_cpython(get_np_state_ptr()) + pyfunc = getattr(r, "gammavariate") + pyfunc_args = scalar_args + extra_pyfunc_args + pyrandom = lambda *_args: pyfunc(*pyfunc_args) + + args = scalar_args + (None,) + expected = pyrandom() + got = cfunc(*args) + self.assertPreciseEqual(expected, got, prec='double', ulps=5) + for size in (8, (2, 3)): + args = scalar_args + (size,) + expected = np.empty(size) + expected_flat = expected.flat + for idx in range(expected.size): + expected_flat[idx] = pyrandom() + got = cfunc(*args) + self.assertPreciseEqual(expected, got, prec='double', ulps=5) + + def _check_array_dist_self(self, funcname, scalar_args): + """ + Check function returning an array against its scalar implementation. + Because we use the CPython gamma distribution rather than the NumPy one, + distributions which use the gamma distribution vary in ways that are + difficult to compare. Instead, we compile both the array and scalar + versions and check that the array is filled with the same values as + we would expect from the scalar version. + """ + @numba.njit + def reset(): + np.random.seed(1234) + + array_func = self._compile_array_dist(funcname, len(scalar_args) + 1) + + qualname = "np.random.%s" % (funcname,) + argstring = ', '.join('abcd'[:len(scalar_args)]) + scalar_func = jit_with_args(qualname, argstring) + + for size in (8, (2, 3)): + args = scalar_args + (size,) + reset() + got = array_func(*args) + reset() + # We're just going to go with whatever type the array version + # gives us and hope it's not Boolean or something useless. + expected = np.empty(size, dtype=got.dtype) + flat = expected.flat + for idx in range(expected.size): + flat[idx] = scalar_func(*scalar_args) + self.assertPreciseEqual(expected, got, prec='double', ulps=5) + + reset() + args = scalar_args + (None,) + expected = scalar_func(*scalar_args) + reset() + got = array_func(*args) + self.assertPreciseEqual(expected, got, prec='double', ulps=5) + + def test_numpy_randint(self): + cfunc = self._compile_array_dist("randint", 3) + low, high = 1000, 10000 + size = (30, 30) + res = cfunc(low, high, size) + self.assertIsInstance(res, np.ndarray) + self.assertEqual(res.shape, size) + self.assertIn(res.dtype, (np.dtype('int32'), np.dtype('int64'))) + self.assertTrue(np.all(res >= low)) + self.assertTrue(np.all(res < high)) + # Crude statistical tests + mean = (low + high) / 2 + tol = (high - low) / 20 + self.assertGreaterEqual(res.mean(), mean - tol) + self.assertLessEqual(res.mean(), mean + tol) + + def test_numpy_random_random(self): + cfunc = self._compile_array_dist("random", 1) + size = (30, 30) + res = cfunc(size) + self.assertIsInstance(res, np.ndarray) + self.assertEqual(res.shape, size) + self.assertEqual(res.dtype, np.dtype('float64')) + # Results are within expected bounds + self.assertTrue(np.all(res >= 0.0)) + self.assertTrue(np.all(res < 1.0)) + # Crude statistical tests + self.assertTrue(np.any(res <= 0.1)) + self.assertTrue(np.any(res >= 0.9)) + mean = res.mean() + self.assertGreaterEqual(mean, 0.45) + self.assertLessEqual(mean, 0.55) + + # Sanity-check various distributions. For convenience, we only check + # those distributions that produce the exact same values as Numpy's. + + def test_numpy_beta(self): + self._check_array_dist_self("beta", (0.5, 2.5)) + + def test_numpy_binomial(self): + self._check_array_dist("binomial", (20, 0.5)) + + def test_numpy_chisquare(self): + self._check_array_dist_self("chisquare", (1.5,)) + + def test_numpy_exponential(self): + self._check_array_dist("exponential", (1.5,)) + + def test_numpy_f(self): + self._check_array_dist_self("f", (0.5, 1.5)) + + def test_numpy_gamma(self): + self._check_array_dist_gamma("gamma", (2.0, 1.0), ()) + + def test_numpy_geometric(self): + self._check_array_dist("geometric", (1.0,)) + + def test_numpy_gumbel(self): + self._check_array_dist("gumbel", (1.5, 0.5)) + + def test_numpy_hypergeometric(self): + self._check_array_dist("hypergeometric", (1000, 5000, 10)) + + def test_numpy_laplace(self): + self._check_array_dist("laplace", (1.5, 0.5)) + + def test_numpy_logistic(self): + self._check_array_dist("logistic", (1.5, 0.5)) + + def test_numpy_lognormal(self): + self._check_array_dist("lognormal", (1.5, 2.0)) + + def test_numpy_logseries(self): + self._check_array_dist("logseries", (0.8,)) + + def test_numpy_normal(self): + self._check_array_dist("normal", (0.5, 2.0)) + + def test_numpy_pareto(self): + self._check_array_dist("pareto", (0.5,)) + + def test_numpy_poisson(self): + self._check_array_dist("poisson", (0.8,)) + + def test_numpy_power(self): + self._check_array_dist("power", (0.8,)) + + def test_numpy_rand(self): + cfunc = jit(nopython=True)(numpy_check_rand) + expected, got = cfunc(42, 2, 3) + self.assertEqual(got.shape, (2, 3)) + self.assertPreciseEqual(expected, got) + + def test_numpy_randn(self): + cfunc = jit(nopython=True)(numpy_check_randn) + expected, got = cfunc(42, 2, 3) + self.assertEqual(got.shape, (2, 3)) + self.assertPreciseEqual(expected, got) + + def test_numpy_rayleigh(self): + self._check_array_dist("rayleigh", (0.8,)) + + def test_numpy_standard_cauchy(self): + self._check_array_dist("standard_cauchy", ()) + + def test_numpy_standard_exponential(self): + self._check_array_dist("standard_exponential", ()) + + def test_numpy_standard_gamma(self): + self._check_array_dist_gamma("standard_gamma", (2.0,), (1.0,)) + + def test_numpy_standard_normal(self): + self._check_array_dist("standard_normal", ()) + + def test_numpy_triangular(self): + self._check_array_dist("triangular", (1.5, 2.2, 3.5)) + + def test_numpy_uniform(self): + self._check_array_dist("uniform", (0.1, 0.4)) + + def test_numpy_wald(self): + self._check_array_dist("wald", (0.1, 0.4)) + + def test_numpy_vonmises(self): + self._check_array_dist_self("vonmises", (0.5, 2.5)) + + def test_numpy_zipf(self): + self._check_array_dist("zipf", (2.5,)) + + +class TestRandomChoice(BaseTest): + """ + Test np.random.choice. + """ + + def _check_results(self, pop, res, replace=True): + """ + Check basic expectations about a batch of samples. + """ + spop = set(pop) + sres = set(res) + # All results are in the population + self.assertLessEqual(sres, spop) + # Sorted results are unlikely + self.assertNotEqual(sorted(res), list(res)) + if replace: + # Duplicates are likely + self.assertLess(len(sres), len(res), res) + else: + # No duplicates + self.assertEqual(len(sres), len(res), res) + + def _check_dist(self, pop, samples): + """ + Check distribution of some samples. + """ + # Sanity check that we have enough samples + self.assertGreaterEqual(len(samples), len(pop) * 100) + # Check equidistribution of samples + expected_frequency = len(samples) / len(pop) + c = collections.Counter(samples) + for value in pop: + n = c[value] + self.assertGreaterEqual(n, expected_frequency * 0.5) + self.assertLessEqual(n, expected_frequency * 2.0) + + def _accumulate_array_results(self, func, nresults): + """ + Accumulate array results produced by *func* until they reach + *nresults* elements. + """ + res = [] + while len(res) < nresults: + res += list(func().flat) + return res[:nresults] + + def _check_choice_1(self, a, pop): + """ + Check choice(a) against pop. + """ + cfunc = jit(nopython=True)(numpy_choice1) + n = len(pop) + res = [cfunc(a) for i in range(n)] + self._check_results(pop, res) + dist = [cfunc(a) for i in range(n * 100)] + self._check_dist(pop, dist) + + def test_choice_scalar_1(self): + """ + Test choice(int) + """ + n = 50 + pop = list(range(n)) + self._check_choice_1(n, pop) + + def test_choice_array_1(self): + """ + Test choice(array) + """ + pop = np.arange(50) * 2 + 100 + self._check_choice_1(pop, pop) + + def _check_array_results(self, func, pop, replace=True): + """ + Check array results produced by *func* and their distribution. + """ + n = len(pop) + res = list(func().flat) + self._check_results(pop, res, replace) + dist = self._accumulate_array_results(func, n * 100) + self._check_dist(pop, dist) + + def _check_choice_2(self, a, pop): + """ + Check choice(a, size) against pop. + """ + cfunc = jit(nopython=True)(numpy_choice2) + n = len(pop) + # Final sizes should be large enough, so as to stress + # replacement + sizes = [n - 10, (3, (n - 1) // 3), n * 10] + + for size in sizes: + # Check result shape + res = cfunc(a, size) + expected_shape = size if isinstance(size, tuple) else (size,) + self.assertEqual(res.shape, expected_shape) + # Check results and their distribution + self._check_array_results(lambda: cfunc(a, size), pop) + + def test_choice_scalar_2(self): + """ + Test choice(int, size) + """ + n = 50 + pop = np.arange(n) + self._check_choice_2(n, pop) + + def test_choice_array_2(self): + """ + Test choice(array, size) + """ + pop = np.arange(50) * 2 + 100 + self._check_choice_2(pop, pop) + + def _check_choice_3(self, a, pop): + """ + Check choice(a, size, replace) against pop. + """ + cfunc = jit(nopython=True)(numpy_choice3) + n = len(pop) + # Final sizes should be close but slightly <= n, so as to stress + # replacement (or not) + sizes = [n - 10, (3, (n - 1) // 3)] + replaces = [True, False] + + # Check result shapes + for size in sizes: + for replace in [True, False]: + res = cfunc(a, size, replace) + expected_shape = size if isinstance(size, tuple) else (size,) + self.assertEqual(res.shape, expected_shape) + + # Check results for replace=True + for size in sizes: + self._check_array_results(lambda: cfunc(a, size, True), pop) + # Check results for replace=False + for size in sizes: + self._check_array_results(lambda: cfunc(a, size, False), pop, False) + + # Can't ask for more samples than population size with replace=False + for size in [n + 1, (3, n // 3 + 1)]: + with self.assertRaises(ValueError): + cfunc(a, size, False) + + def test_choice_scalar_3(self): + """ + Test choice(int, size, replace) + """ + n = 50 + pop = np.arange(n) + self._check_choice_3(n, pop) + + def test_choice_array_3(self): + """ + Test choice(array, size, replace) + """ + pop = np.arange(50) * 2 + 100 + self._check_choice_3(pop, pop) + + def test_choice_follows_seed(self): + # See issue #3888, np.random.choice must acknowledge the seed + + @jit(nopython=True) + def numba_rands(n_to_return, choice_array): + np.random.seed(1337) + out = np.empty((n_to_return, 2), np.int32) + for i in range(n_to_return): + out[i] = np.random.choice(choice_array, 2, False) + return out + + choice_array = np.random.randint(300, size=1000).astype(np.int32) + tmp_np = choice_array.copy() + expected = numba_rands.py_func(5, tmp_np) + tmp_nb = choice_array.copy() + got = numba_rands(5, tmp_nb) + np.testing.assert_allclose(expected, got) + # check no mutation + np.testing.assert_allclose(choice_array, tmp_np) + np.testing.assert_allclose(choice_array, tmp_nb) + + +class TestRandomMultinomial(BaseTest): + """ + Test np.random.multinomial. + """ + # A biased dice + pvals = np.array([1, 1, 1, 2, 3, 1], dtype=np.float64) + pvals /= pvals.sum() + + def _check_sample(self, n, pvals, sample): + """ + Check distribution of some samples. + """ + self.assertIsInstance(sample, np.ndarray) + self.assertEqual(sample.shape, (len(pvals),)) + self.assertIn(sample.dtype, (np.dtype('int32'), np.dtype('int64'))) + # Statistical properties + self.assertEqual(sample.sum(), n) + for p, nexp in zip(pvals, sample): + self.assertGreaterEqual(nexp, 0) + self.assertLessEqual(nexp, n) + pexp = float(nexp) / n + self.assertGreaterEqual(pexp, p * 0.5) + self.assertLessEqual(pexp, p * 2.0) + + def test_multinomial_2(self): + """ + Test multinomial(n, pvals) + """ + cfunc = jit(nopython=True)(numpy_multinomial2) + n, pvals = 1000, self.pvals + res = cfunc(n, pvals) + self._check_sample(n, pvals, res) + # pvals as list + pvals = list(pvals) + res = cfunc(n, pvals) + self._check_sample(n, pvals, res) + # A case with extreme probabilities + n = 1000000 + pvals = np.array([1, 0, n // 100, 1], dtype=np.float64) + pvals /= pvals.sum() + res = cfunc(n, pvals) + self._check_sample(n, pvals, res) + + def test_multinomial_3_int(self): + """ + Test multinomial(n, pvals, size: int) + """ + cfunc = jit(nopython=True)(numpy_multinomial3) + n, pvals = 1000, self.pvals + k = 10 + res = cfunc(n, pvals, k) + self.assertEqual(res.shape[0], k) + for sample in res: + self._check_sample(n, pvals, sample) + + def test_multinomial_3_tuple(self): + """ + Test multinomial(n, pvals, size: tuple) + """ + cfunc = jit(nopython=True)(numpy_multinomial3) + n, pvals = 1000, self.pvals + k = (3, 4) + res = cfunc(n, pvals, k) + self.assertEqual(res.shape[:-1], k) + for sample in res.reshape((-1, res.shape[-1])): + self._check_sample(n, pvals, sample) + + +class TestRandomDirichlet(BaseTest): + alpha = np.array([1, 1, 1, 2], dtype=np.float64) + + def _check_sample(self, alpha, size, sample): + + """Check output structure""" + self.assertIsInstance(sample, np.ndarray) + self.assertEqual(sample.dtype, np.float64) + if size is None: + self.assertEqual(sample.size, len(alpha)) + elif type(size) is int: + self.assertEqual(sample.shape, (size, len(alpha))) + else: + self.assertEqual(sample.shape, size + (len(alpha),)) + + """Check statistical properties""" + for val in np.nditer(sample): + self.assertGreaterEqual(val, 0) + self.assertLessEqual(val, 1) + if size is None: + self.assertAlmostEqual(sample.sum(), 1, places=5) + else: + for totals in np.nditer(sample.sum(axis=-1)): + self.assertAlmostEqual(totals, 1, places=5) + + def test_dirichlet_default(self): + """ + Test dirichlet(alpha, size=None) + """ + cfunc = jit(nopython=True)(numpy_dirichlet_default) + alphas = ( + self.alpha, + tuple(self.alpha), + np.array([1, 1, 10000, 1], dtype=np.float64), + np.array([1, 1, 1.5, 1], dtype=np.float64), + ) + for alpha in alphas: + res = cfunc(alpha) + self._check_sample(alpha, None, res) + + def test_dirichlet(self): + """ + Test dirichlet(alpha, size=None) + """ + cfunc = jit(nopython=True)(numpy_dirichlet) + sizes = (None, (10,), (10, 10)) + alphas = ( + self.alpha, + tuple(self.alpha), + np.array([1, 1, 10000, 1], dtype=np.float64), + np.array([1, 1, 1.5, 1], dtype=np.float64), + ) + + for alpha, size in itertools.product(alphas, sizes): + res = cfunc(alpha, size) + self._check_sample(alpha, size, res) + + def test_dirichlet_exceptions(self): + cfunc = jit(nopython=True)(numpy_dirichlet) + alpha = tuple((0, 1, 1)) + with self.assertRaises(ValueError) as raises: + cfunc(alpha, 1) + self.assertIn("dirichlet: alpha must be > 0.0", str(raises.exception)) + + alpha = self.alpha + sizes = (True, 3j, 1.5, (1.5, 1), (3j, 1), (3j, 3j), (np.int8(3), np.int64(7))) + for size in sizes: + with self.assertRaises(TypingError) as raises: + cfunc(alpha, size) + self.assertIn( + "np.random.dirichlet(): size should be int or " + "tuple of ints or None, got", + str(raises.exception), + ) + +class TestRandomNoncentralChiSquare(BaseTest): + + def _check_sample(self, size, sample): + + # Check output structure + if size is not None: + self.assertIsInstance(sample, np.ndarray) + self.assertEqual(sample.dtype, np.float64) + + if isinstance(size, int): + self.assertEqual(sample.shape, (size,)) + else: + self.assertEqual(sample.shape, size) + else: + self.assertIsInstance(sample, float) + + # Check statistical properties + for val in np.nditer(sample): + self.assertGreaterEqual(val, 0) + + def test_noncentral_chisquare_default(self): + """ + Test noncentral_chisquare(df, nonc, size=None) + """ + cfunc = jit(nopython=True)(numpy_noncentral_chisquare_default) + inputs = ( + (0.5, 1), # test branch when df < 1 + (1, 5), + (5, 1), + (100000, 1), + (1, 10000), + ) + for df, nonc in inputs: + res = cfunc(df, nonc) + self._check_sample(None, res) + res = cfunc(df, np.nan) # test branch when nonc is nan + self.assertTrue(np.isnan(res)) + + + def test_noncentral_chisquare(self): + """ + Test noncentral_chisquare(df, nonc, size) + """ + cfunc = jit(nopython=True)(numpy_noncentral_chisquare) + sizes = (None, 10, (10,), (10, 10)) + inputs = ( + (0.5, 1), + (1, 5), + (5, 1), + (100000, 1), + (1, 10000), + ) + + for (df, nonc), size in itertools.product(inputs, sizes): + res = cfunc(df, nonc, size) + self._check_sample(size, res) + res = cfunc(df, np.nan, size) # test branch when nonc is nan + self.assertTrue(np.isnan(res).all()) + + def test_noncentral_chisquare_exceptions(self): + cfunc = jit(nopython=True)(numpy_noncentral_chisquare) + df, nonc = 0, 1 + with self.assertRaises(ValueError) as raises: + cfunc(df, nonc, 1) + self.assertIn("df <= 0", str(raises.exception)) + + df, nonc = 1, -1 + with self.assertRaises(ValueError) as raises: + cfunc(df, nonc, 1) + self.assertIn("nonc < 0", str(raises.exception)) + + df, nonc = 1, 1 + sizes = (True, 3j, 1.5, (1.5, 1), (3j, 1), (3j, 3j), (np.int8(3), np.int64(7))) + for size in sizes: + with self.assertRaises(TypingError) as raises: + cfunc(df, nonc, size) + self.assertIn( + "np.random.noncentral_chisquare(): size should be int or " + "tuple of ints or None, got", + str(raises.exception), + ) + +@jit(nopython=True, nogil=True) +def py_extract_randomness(seed, out): + if seed != 0: + random.seed(seed) + for i in range(out.size): + out[i] = random.getrandbits(32) + +_randint_limit = 1 << 32 + +@jit(nopython=True, nogil=True) +def np_extract_randomness(seed, out): + if seed != 0: + np.random.seed(seed) + s = 0 + for i in range(out.size): + out[i] = np.random.randint(_randint_limit) + + + +class ConcurrencyBaseTest(TestCase): + + # Enough iterations for: + # 1. Mersenne-Twister state shuffles to occur (once every 624) + # 2. Race conditions to be plausible + # 3. Nice statistical properties to emerge + _extract_iterations = 100000 + + def setUp(self): + # Warm up, to avoid compiling in the threads + args = (42, self._get_output(1)) + py_extract_randomness(*args) + np_extract_randomness(*args) + + def _get_output(self, size): + return np.zeros(size, dtype=np.uint32) + + def check_output(self, out): + """ + Check statistical properties of output. + """ + # Output should follow a uniform distribution in [0, 1<<32) + expected_avg = 1 << 31 + expected_std = (1 << 32) / np.sqrt(12) + rtol = 0.05 # given enough iterations + np.testing.assert_allclose(out.mean(), expected_avg, rtol=rtol) + np.testing.assert_allclose(out.std(), expected_std, rtol=rtol) + + def check_several_outputs(self, results, same_expected): + # Outputs should have the expected statistical properties + # (an uninitialized PRNG or a PRNG whose internal state was + # corrupted by a race condition could produce bogus randomness) + for out in results: + self.check_output(out) + + # Check all threads gave either the same sequence or + # distinct sequences + if same_expected: + expected_distinct = 1 + else: + expected_distinct = len(results) + + heads = {tuple(out[:5]) for out in results} + tails = {tuple(out[-5:]) for out in results} + sums = {out.sum() for out in results} + self.assertEqual(len(heads), expected_distinct, heads) + self.assertEqual(len(tails), expected_distinct, tails) + self.assertEqual(len(sums), expected_distinct, sums) + + +class TestThreads(ConcurrencyBaseTest): + """ + Check the PRNG behaves well with threads. + """ + + def extract_in_threads(self, nthreads, extract_randomness, seed): + """ + Run *nthreads* threads extracting randomness with the given *seed* + (no seeding if 0). + """ + results = [self._get_output(self._extract_iterations) + for i in range(nthreads + 1)] + + def target(i): + # The PRNG will be seeded in thread + extract_randomness(seed=seed, out=results[i]) + + threads = [threading.Thread(target=target, args=(i,)) + for i in range(nthreads)] + + for th in threads: + th.start() + # Exercise main thread as well + target(nthreads) + for th in threads: + th.join() + + return results + + def check_thread_safety(self, extract_randomness): + """ + When initializing the PRNG the same way, each thread + should produce the same sequence of random numbers, + using independent states, regardless of parallel + execution. + """ + # Note the seed value doesn't matter, as long as it's + # the same for all threads + results = self.extract_in_threads(15, extract_randomness, seed=42) + + # All threads gave the same sequence + self.check_several_outputs(results, same_expected=True) + + def check_implicit_initialization(self, extract_randomness): + """ + The PRNG in new threads should be implicitly initialized with + system entropy, if seed() wasn't called. + """ + results = self.extract_in_threads(4, extract_randomness, seed=0) + + # All threads gave a different, valid random sequence + self.check_several_outputs(results, same_expected=False) + + def test_py_thread_safety(self): + self.check_thread_safety(py_extract_randomness) + + def test_np_thread_safety(self): + self.check_thread_safety(np_extract_randomness) + + def test_py_implicit_initialization(self): + self.check_implicit_initialization(py_extract_randomness) + + def test_np_implicit_initialization(self): + self.check_implicit_initialization(np_extract_randomness) + + +@unittest.skipIf(os.name == 'nt', "Windows is not affected by fork() issues") +class TestProcesses(ConcurrencyBaseTest): + """ + Check the PRNG behaves well in child processes. + """ + + # Avoid nested multiprocessing AssertionError + # ("daemonic processes are not allowed to have children") + _numba_parallel_test_ = False + + + def extract_in_processes(self, nprocs, extract_randomness): + """ + Run *nprocs* processes extracting randomness + without explicit seeding. + """ + q = multiprocessing.Queue() + results = [] + + def target_inner(): + out = self._get_output(self._extract_iterations) + extract_randomness(seed=0, out=out) + return out + + def target(): + try: + out = target_inner() + q.put(out) + except Exception as e: + # Ensure an exception in a child gets reported + # in the parent. + q.put(e) + raise + + if hasattr(multiprocessing, 'get_context'): + # The test works only in fork context. + mpc = multiprocessing.get_context('fork') + else: + mpc = multiprocessing + procs = [mpc.Process(target=target) + for i in range(nprocs)] + for p in procs: + p.start() + # Need to dequeue before joining, otherwise the large size of the + # enqueued objects will lead to deadlock. + for i in range(nprocs): + results.append(q.get(timeout=5)) + for p in procs: + p.join() + + # Exercise parent process as well; this will detect if the + # same state was reused for one of the children. + results.append(target_inner()) + for res in results: + if isinstance(res, Exception): + self.fail("Exception in child: %s" % (res,)) + + return results + + def check_implicit_initialization(self, extract_randomness): + """ + The PRNG in new processes should be implicitly initialized + with system entropy, to avoid reproducing the same sequences. + """ + results = self.extract_in_processes(2, extract_randomness) + + # All processes gave a different, valid random sequence + self.check_several_outputs(results, same_expected=False) + + def test_py_implicit_initialization(self): + self.check_implicit_initialization(py_extract_randomness) + + def test_np_implicit_initialization(self): + self.check_implicit_initialization(np_extract_randomness) + + +class TestNumPyRandomAPI(TestCase): + API_CALL_TESTS = {"np.random.beta": {'a': 1., 'b': 2., 'size': 3}, + "np.random.binomial": {'n': 1, 'p': 0.3, 'size': 3}, + "np.random.chisquare": {'df': 2., 'size': 3}, + "np.random.choice": {'a': 2, 'size': 3}, + "np.random.dirichlet": {'alpha': (2,), 'size': 3}, + "np.random.exponential": {'scale': 1., 'size': 3}, + "np.random.f": {'dfnum': 1., 'dfden': 2., 'size': 3}, + "np.random.gamma": {'shape': 2, 'scale': 2.0, 'size': 3}, + "np.random.geometric": {'p': 1., 'size': 3}, + "np.random.gumbel": {'loc': 0., 'scale': 1., 'size': 3}, + "np.random.hypergeometric": {'ngood': 1, 'nbad': 1, + 'nsample': 1, 'size': 3}, + "np.random.laplace": {'loc': 0., 'scale': 1., 'size': 3}, + "np.random.logistic": {'loc': 0., 'scale': 1., 'size': 3}, + "np.random.lognormal": {'mean': 0., 'sigma': 1., 'size': 3}, + "np.random.logseries": {'p': 0.5, 'size': 3}, + "np.random.multinomial": {'n': 1, 'pvals': (1,), 'size': 3}, + "np.random.negative_binomial": {'n': 1, 'p': 0.5}, + "np.random.noncentral_chisquare": {'df': 1., 'nonc': 1., + 'size': 3}, + "np.random.normal": {'loc': 0., 'scale': 1., 'size': 3}, + "np.random.pareto": {'a': 2., 'size': 3}, + # NOTE: The NumPy impl of permutation "takes no keyword + # arguments". + # "np.random.permutation": {'x': (1, 2, 3)}, + "np.random.poisson": {'lam': 1., 'size': 3}, + "np.random.power": {'a': 2., 'size': 3}, + # NOTE: The NumPy impl of rand essentially takes *args so kwargs + # are unsupported. + # "np.random.rand": {'d0': 1, 'd1': 2, ...}} + "np.random.randint": {'low': 1, 'high': 2, 'size': 3}, + # NOTE: The NumPy impl of randn essentially takes *args so + # kwargs are unsupported. + # "np.random.randn": {'d0': 1, 'd1': 2, ...}} + "np.random.random": {'size': 3}, + "np.random.random_sample": {'size': 3}, + "np.random.ranf": {'size': 3}, + "np.random.rayleigh": {'scale': 1., 'size': 3}, + "np.random.sample": {'size': 3}, + "np.random.seed": {'seed': 4}, + # NOTE: The NumPy impl of shuffle "takes no keyword arguments". + # "np.random.shuffle" + "np.random.standard_cauchy": {'size': 3}, + "np.random.standard_exponential": {'size': 3}, + "np.random.standard_gamma": {'shape': 2., 'size': 3}, + "np.random.standard_normal": {'size': 3}, + "np.random.standard_t": {'df': 2., 'size': 3}, + "np.random.triangular": {'left': 1., 'mode': 2., 'right': 3., + 'size': 3}, + "np.random.uniform": {'low': 1., 'high': 2., 'size': 3}, + "np.random.vonmises": {'mu': 1., 'kappa': 2., 'size': 3}, + "np.random.wald": {'mean': 1., 'scale': 2., 'size': 3}, + "np.random.weibull": {'a': 1., 'size': 3}, + "np.random.zipf": {'a': 2., 'size': 3},} + + def test_call_by_name(self): + # Checks that the NumPy impls in Numba can be used via call-by-name + # args, see issue numba#9053. + # + # Checking call-by-name has to be done somewhat manually as the NumPy + # numpy.random.* functions do not have signatures, see numpy#8734. + + # Herein, it doesn't matter what the values are, the names and types + # just have to make sense. + + for fn, args in self.API_CALL_TESTS.items(): + argstr = ', '.join([f'{k}={v}' for k, v in args.items()]) + template = dedent(f""" + def foo(): + return {fn}({argstr}) + """) + l = {} + exec(template, {'np': np}, l) + # The answer doesn't matter, these are tested in the tests above, + # the purpose of this test is to ensure that the code compiles with + # the args presented via name, i.e. the overloads are defined + # correctly with respect to the public API of the function. + func = l['foo'] + func() + njit(func).compile(()) + + def test_call_distributions_with_empty_size(self): + # Check that the distributions can be called with size=() as per the + # NumPy API, see issue numba#8975. + for fn, args in self.API_CALL_TESTS.items(): + if 'size' not in args: + continue + + args['size'] = () + argstr = ', '.join([f'{k}={v}' for k, v in args.items()]) + template = dedent(f""" + def foo(): + return {fn}({argstr}) + """) + l = {} + exec(template, {'np': np}, l) + + # Store result obtained by calling the np and nb functions. + # We are not interested by the values, they were tested above + # but we want to ensure that the function compiles, and that + # the shapes and types are the same between numpy and numba. + func = l['foo'] + np_val = func() + + nb_func = njit(func).compile(()) + nb_val = nb_func() + + if isinstance(nb_val, np.ndarray): + self.assertEqual(nb_val.shape, np_val.shape) + else: + self.assertIsInstance(nb_val, np_val.__class__) + + + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_range.py b/venv/lib/python3.10/site-packages/numba/tests/test_range.py new file mode 100644 index 0000000000000000000000000000000000000000..b97e36fd4edc2afaceed5f13736cd3b340ac2993 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_range.py @@ -0,0 +1,187 @@ +import unittest + +import sys + +import numpy + +from numba import jit, njit +from numba.core import types, utils +from numba.tests.support import tag + +from numba.core.inline_closurecall import length_of_iterator + +def loop1(n): + s = 0 + for i in range(n): + s += i + return s + + +def loop2(a, b): + s = 0 + for i in range(a, b): + s += i + return s + + +def loop3(a, b, c): + s = 0 + for i in range(a, b, c): + s += i + return s + + +def range_len1(n): + return len(range(n)) + +def range_len2(a, b): + return len(range(a, b)) + +def range_len3(a, b, c): + return len(range(a, b, c)) +def range_iter_len1(a): + return length_of_iterator(iter(range(a))) + +def range_iter_len2(a): + return length_of_iterator(iter(a)) + +def range_attrs(start, stop, step): + r1 = range(start) + r2 = range(start, stop) + r3 = range(start, stop, step) + tmp = [] + for r in (r1, r2, r3): + tmp.append((r.start, r.stop, r.step)) + return tmp + +def range_contains(val, start, stop, step): + r1 = range(start) + r2 = range(start, stop) + r3 = range(start, stop, step) + return [val in r for r in (r1, r2, r3)] + + +class TestRange(unittest.TestCase): + + def test_loop1_int16(self): + pyfunc = loop1 + cfunc = njit((types.int16,))(pyfunc) + self.assertTrue(cfunc(5), pyfunc(5)) + + def test_loop2_int16(self): + pyfunc = loop2 + cfunc = njit((types.int16, types.int16))(pyfunc) + self.assertTrue(cfunc(1, 6), pyfunc(1, 6)) + + def test_loop3_int32(self): + pyfunc = loop3 + cfunc = njit((types.int32, types.int32, types.int32))(pyfunc) + arglist = [ + (1, 2, 1), + (2, 8, 3), + (-10, -11, -10), + (-10, -10, -2), + ] + for args in arglist: + self.assertEqual(cfunc(*args), pyfunc(*args)) + + def test_range_len1(self): + pyfunc = range_len1 + typelist = [types.int16, types.int32, types.int64] + arglist = [5, 0, -5] + for typ in typelist: + cfunc = njit((typ,))(pyfunc) + for arg in arglist: + self.assertEqual(cfunc(typ(arg)), pyfunc(typ(arg))) + + def test_range_len2(self): + pyfunc = range_len2 + typelist = [types.int16, types.int32, types.int64] + arglist = [(1,6), (6,1), (-5, -1)] + for typ in typelist: + cfunc = njit((typ, typ))(pyfunc) + for args in arglist: + args_ = tuple(typ(x) for x in args) + self.assertEqual(cfunc(*args_), pyfunc(*args_)) + + def test_range_len3(self): + pyfunc = range_len3 + typelist = [types.int16, types.int32, types.int64] + arglist = [ + (1, 2, 1), + (2, 8, 3), + (-10, -11, -10), + (-10, -10, -2), + ] + for typ in typelist: + cfunc = njit((typ, typ, typ))(pyfunc) + for args in arglist: + args_ = tuple(typ(x) for x in args) + self.assertEqual(cfunc(*args_), pyfunc(*args_)) + + def test_range_iter_len1(self): + range_func = range_len1 + range_iter_func = range_iter_len1 + typelist = [types.int16, types.int32, types.int64] + arglist = [5, 0, -5] + for typ in typelist: + cfunc = njit((typ,))(range_iter_func) + for arg in arglist: + self.assertEqual(cfunc(typ(arg)), range_func(typ(arg))) + + def test_range_iter_list(self): + range_iter_func = range_iter_len2 + cfunc = njit((types.List(types.intp, reflected=True),))(range_iter_func) + arglist = [1, 2, 3, 4, 5] + self.assertEqual(cfunc(arglist), len(arglist)) + + def test_range_attrs(self): + pyfunc = range_attrs + arglist = [(0, 0, 1), + (0, -1, 1), + (-1, 1, 1), + (-1, 4, 1), + (-1, 4, 10), + (5, -5, -2),] + + cfunc = njit((types.int64, types.int64, types.int64),)(pyfunc) + for arg in arglist: + self.assertEqual(cfunc(*arg), pyfunc(*arg)) + + def test_range_contains(self): + pyfunc = range_contains + arglist = [(0, 0, 1), + (-1, 0, 1), + (1, 0, -1), + (0, -1, 1), + (0, 1, -1), + (-1, 1, 1), + (-1, 4, 1), + (-1, 4, 10), + (5, -5, -2),] + + bool_vals = [True, False] + int_vals = [-10, -6, -5, -4, -2, -1, 0, + 1, 2, 4, 5, 6, 10] + float_vals = [-1.1, -1.0, 0.0, 1.0, 1.1] + complex_vals = [1 + 0j, 1 + 1j, 1.1 + 0j, 1.0 + 1.1j] + + vallist = (bool_vals + int_vals + float_vals + + complex_vals) + + cfunc = njit(pyfunc) + for arg in arglist: + for val in vallist: + self.assertEqual(cfunc(val, *arg), pyfunc(val, *arg)) + + non_numeric_vals = [{'a': 1}, [1, ], 'abc', (1,)] + + cfunc_obj = jit(pyfunc, forceobj=True) + for arg in arglist: + for val in non_numeric_vals: + self.assertEqual(cfunc_obj(val, *arg), pyfunc(val, *arg)) + + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_recarray_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/test_recarray_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..06464b470f3ee56dd6435b9218cc4ae63c596fa4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_recarray_usecases.py @@ -0,0 +1,146 @@ +import sys +import unittest + +import numpy as np + +from numba import njit +from numba.core import types +from numba.tests.support import captured_stdout, TestCase +from numba.np import numpy_support + + +def usecase1(arr1, arr2): + """Base on https://github.com/numba/numba/issues/370 + + Modified to add test-able side effect. + """ + n1 = arr1.size + n2 = arr2.size + + for i1 in range(n1): + st1 = arr1[i1] + for i2 in range(n2): + st2 = arr2[i2] + st2.row += st1.p * st2.p + st1.row - st1.col + + st1.p += st2.p + st1.col -= st2.col + + +def usecase2(x, N): + """ + Base on test1 of https://github.com/numba/numba/issues/381 + """ + for k in range(N): + y = x[k] + print(y.f1, y.s1, y.f2) + + +def usecase3(x, N): + """ + Base on test2 of https://github.com/numba/numba/issues/381 + """ + for k in range(N): + print(x.f1[k], x.s1[k], x.f2[k]) + + +def usecase4(x, N): + """ + Base on test3 of https://github.com/numba/numba/issues/381 + """ + for k in range(N): + y = x[k] + print(y.f1, x.s1[k], y.f2) + + +def usecase5(x, N): + """ + Base on test4 of https://github.com/numba/numba/issues/381 + """ + for k in range(N): + print(x[k].f1, x.s1[k], x[k].f2) + + +class TestRecordUsecase(TestCase): + + def setUp(self): + fields = [('f1', '0 (True). + + Note: The exact statistic varies across platform. + + NOTE: Tests using this `check` method need to run in subprocesses as + `njit` sets up the module pass manager etc once and the overrides have + no effect else. + """ + + with override_config('LLVM_REFPRUNE_PASS', '1'): + cres = njit((*argtys,))(func).overloads[(*argtys,)] + + pstats = cres.metadata.get('prune_stats', None) + self.assertIsNotNone(pstats) + + for k, v in prune_types.items(): + stat = getattr(pstats, k, None) + self.assertIsNotNone(stat) + msg = f'failed checking {k}' + if v: + self.assertGreater(stat, 0, msg=msg) + else: + self.assertEqual(stat, 0, msg=msg) + + @TestCase.run_test_in_subprocess + def test_basic_block_1(self): + # some nominally involved control flow and ops, there's only basic_block + # opportunities present here. + def func(n): + a = np.zeros(n) + acc = 0 + if n > 4: + b = a[1:] + acc += b[1] + else: + c = a[:-1] + acc += c[0] + return acc + + self.check(func, (types.intp), basicblock=True) + + @TestCase.run_test_in_subprocess + def test_diamond_1(self): + # most basic?! diamond + def func(n): + a = np.ones(n) + x = 0 + if n > 2: + x = a.sum() + return x + 1 + + # disable fanout pruning + with set_refprune_flags('per_bb,diamond'): + self.check(func, (types.intp), basicblock=True, diamond=True, + fanout=False, fanout_raise=False) + + @TestCase.run_test_in_subprocess + def test_diamond_2(self): + # more complex diamonds + def func(n): + con = [] + for i in range(n): + con.append(np.arange(i)) + c = 0.0 + for arr in con: + c += arr.sum() / (1 + arr.size) + return c + + # disable fanout pruning + with set_refprune_flags('per_bb,diamond'): + self.check(func, (types.intp), basicblock=True, diamond=True, + fanout=False, fanout_raise=False) + + @TestCase.run_test_in_subprocess + def test_fanout_1(self): + # most basic?! fan-out + def func(n): + a = np.zeros(n) + b = np.zeros(n) + x = (a, b) + acc = 0. + for i in x: + acc += i[0] + return acc + + self.check(func, (types.intp), basicblock=True, fanout=True) + + @TestCase.run_test_in_subprocess + def test_fanout_2(self): + # fanout with raise + def func(n): + a = np.zeros(n) + b = np.zeros(n) + x = (a, b) + for i in x: + if n: + raise ValueError + return x + + with set_refprune_flags('per_bb,fanout'): + self.check(func, (types.intp), basicblock=True, diamond=False, + fanout=True, fanout_raise=False) + + @TestCase.run_test_in_subprocess + def test_fanout_3(self): + # fanout with raise + def func(n): + ary = np.arange(n) + # basically an impl of array.sum + c = 0 + # The raise is from StopIteration of next(iterator) implicit in + # the for loop + for v in np.nditer(ary): + c += v.item() + return 1 + + with set_refprune_flags('per_bb,fanout_raise'): + self.check(func, (types.intp), basicblock=True, diamond=False, + fanout=False, fanout_raise=True) + + +class TestRefPruneFlags(TestCase): + def setUp(self): + warnings.simplefilter('error', NumbaInvalidConfigWarning) + + def tearDown(self): + warnings.resetwarnings() + + def test_warn_invalid_flags(self): + with set_refprune_flags('abc,per_bb,cde'): + with self.assertWarns(NumbaInvalidConfigWarning) as cm: + optval = _parse_refprune_flags() + self.assertEqual(len(cm.warnings), 2) + self.assertIn('abc', str(cm.warnings[0].message)) + self.assertIn('cde', str(cm.warnings[1].message)) + self.assertEqual(optval, llvm.RefPruneSubpasses.PER_BB) + + def test_valid_flag(self): + with set_refprune_flags('per_bb, diamond, fanout,fanout_raise'): + optval = _parse_refprune_flags() + self.assertEqual(optval, llvm.RefPruneSubpasses.ALL) + + def test_the_all_flag(self): + with set_refprune_flags('all'): + optval = _parse_refprune_flags() + self.assertEqual(optval, llvm.RefPruneSubpasses.ALL) + + def test_some_flags(self): + with set_refprune_flags('per_bb, fanout'): + optval = _parse_refprune_flags() + enumcls = llvm.RefPruneSubpasses + self.assertEqual(optval, enumcls.PER_BB | enumcls.FANOUT) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_remove_dead.py b/venv/lib/python3.10/site-packages/numba/tests/test_remove_dead.py new file mode 100644 index 0000000000000000000000000000000000000000..b1ab55ce2d5e38d36bfffcf05fceb07be1567efc --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_remove_dead.py @@ -0,0 +1,436 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +import numba +import numba.parfors.parfor +from numba import njit, jit +from numba.core import ir_utils +from numba.core import types, ir, compiler +from numba.core.registry import cpu_target +from numba.core.ir_utils import (copy_propagate, apply_copy_propagate, + get_name_var_table, remove_dels, remove_dead, + remove_call_handlers, alias_func_extensions) +from numba.core.typed_passes import type_inference_stage +from numba.core.compiler_machinery import FunctionPass, register_pass, PassManager +from numba.core.untyped_passes import (ExtractByteCode, TranslateByteCode, FixupArgs, + IRProcessing, DeadBranchPrune, + RewriteSemanticConstants, GenericRewrites, + WithLifting, PreserveIR, InlineClosureLikes) + +from numba.core.typed_passes import (NopythonTypeInference, AnnotateTypes, + NopythonRewrites, PreParforPass, ParforPass, + DumpParforDiagnostics, NativeLowering, + IRLegalization, NoPythonBackend, NativeLowering) +import numpy as np +from numba.tests.support import skip_parfors_unsupported, needs_blas, TestCase +import unittest + + +def test_will_propagate(b, z, w): + x1 = 3 + x = x1 + if b > 0: + y = z + w + else: + y = 0 + a = 2 * x + return a < b + +def null_func(a,b,c,d): + False + +@numba.njit +def dummy_aliased_func(A): + return A + +def alias_ext_dummy_func(lhs_name, args, alias_map, arg_aliases): + ir_utils._add_alias(lhs_name, args[0].name, alias_map, arg_aliases) + +def findLhsAssign(func_ir, var): + for label, block in func_ir.blocks.items(): + for i, inst in enumerate(block.body): + if isinstance(inst, ir.Assign) and inst.target.name==var: + return True + + return False + +class TestRemoveDead(TestCase): + + _numba_parallel_test_ = False + + def compile_parallel(self, func, arg_types): + return njit(arg_types, parallel=True, fastmath=True)(func) + + def test1(self): + typingctx = cpu_target.typing_context + targetctx = cpu_target.target_context + test_ir = compiler.run_frontend(test_will_propagate) + + typingctx.refresh() + targetctx.refresh() + args = (types.int64, types.int64, types.int64) + typemap, _, calltypes, _ = type_inference_stage(typingctx, targetctx, test_ir, args, None) + remove_dels(test_ir.blocks) + in_cps, out_cps = copy_propagate(test_ir.blocks, typemap) + apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), typemap, calltypes) + + remove_dead(test_ir.blocks, test_ir.arg_names, test_ir) + self.assertFalse(findLhsAssign(test_ir, "x")) + + def test2(self): + def call_np_random_seed(): + np.random.seed(2) + + def seed_call_exists(func_ir): + for inst in func_ir.blocks[0].body: + if (isinstance(inst, ir.Assign) and + isinstance(inst.value, ir.Expr) and + inst.value.op == 'call' and + func_ir.get_definition(inst.value.func).attr == 'seed'): + return True + return False + + test_ir = compiler.run_frontend(call_np_random_seed) + remove_dead(test_ir.blocks, test_ir.arg_names, test_ir) + self.assertTrue(seed_call_exists(test_ir)) + + def run_array_index_test(self, func): + A1 = np.arange(6).reshape(2,3) + A2 = A1.copy() + i = 0 + pfunc = self.compile_parallel(func, (numba.typeof(A1), numba.typeof(i))) + + func(A1, i) + pfunc(A2, i) + np.testing.assert_array_equal(A1, A2) + + def test_alias_ravel(self): + def func(A, i): + B = A.ravel() + B[i] = 3 + + self.run_array_index_test(func) + + def test_alias_flat(self): + def func(A, i): + B = A.flat + B[i] = 3 + + self.run_array_index_test(func) + + def test_alias_transpose1(self): + def func(A, i): + B = A.T + B[i,0] = 3 + + self.run_array_index_test(func) + + def test_alias_transpose2(self): + def func(A, i): + B = A.transpose() + B[i,0] = 3 + + self.run_array_index_test(func) + + def test_alias_transpose3(self): + def func(A, i): + B = np.transpose(A) + B[i,0] = 3 + + self.run_array_index_test(func) + + @skip_parfors_unsupported + @needs_blas + def test_alias_ctypes(self): + # use xxnrm2 to test call a C function with ctypes + from numba.np.linalg import _BLAS + xxnrm2 = _BLAS().numba_xxnrm2(types.float64) + + def remove_dead_xxnrm2(rhs, lives, call_list): + if call_list == [xxnrm2]: + return rhs.args[4].name not in lives + return False + + # adding this handler has no-op effect since this function won't match + # anything else but it's a bit cleaner to save the state and recover + old_remove_handlers = remove_call_handlers[:] + remove_call_handlers.append(remove_dead_xxnrm2) + + def func(ret): + a = np.ones(4) + xxnrm2(100, 4, a.ctypes, 1, ret.ctypes) + + A1 = np.zeros(1) + A2 = A1.copy() + + try: + pfunc = self.compile_parallel(func, (numba.typeof(A1),)) + numba.njit(func)(A1) + pfunc(A2) + finally: + # recover global state + remove_call_handlers[:] = old_remove_handlers + + self.assertEqual(A1[0], A2[0]) + + def test_alias_reshape1(self): + def func(A, i): + B = np.reshape(A, (3,2)) + B[i,0] = 3 + + self.run_array_index_test(func) + + def test_alias_reshape2(self): + def func(A, i): + B = A.reshape(3,2) + B[i,0] = 3 + + self.run_array_index_test(func) + + def test_alias_func_ext(self): + def func(A, i): + B = dummy_aliased_func(A) + B[i, 0] = 3 + + # save global state + old_ext_handlers = alias_func_extensions.copy() + try: + alias_func_extensions[('dummy_aliased_func', + 'numba.tests.test_remove_dead')] = alias_ext_dummy_func + self.run_array_index_test(func) + finally: + # recover global state + ir_utils.alias_func_extensions = old_ext_handlers + + def test_rm_dead_rhs_vars(self): + """make sure lhs variable of assignment is considered live if used in + rhs (test for #6715). + """ + def func(): + for i in range(3): + a = (lambda j: j)(i) + a = np.array(a) + return a + + self.assertEqual(func(), numba.njit(func)()) + + @skip_parfors_unsupported + def test_alias_parfor_extension(self): + """Make sure aliases are considered in remove dead extension for + parfors. + """ + def func(): + n = 11 + numba.parfors.parfor.init_prange() + A = np.empty(n) + B = A # create alias to A + for i in numba.prange(n): + A[i] = i + + return B + + @register_pass(analysis_only=False, mutates_CFG=True) + class LimitedParfor(FunctionPass): + _name = "limited_parfor" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + parfor_pass = numba.parfors.parfor.ParforPass( + state.func_ir, + state.typemap, + state.calltypes, + state.return_type, + state.typingctx, + state.flags.auto_parallel, + state.flags, + state.metadata, + state.parfor_diagnostics + ) + remove_dels(state.func_ir.blocks) + parfor_pass.array_analysis.run(state.func_ir.blocks) + parfor_pass._convert_loop(state.func_ir.blocks) + remove_dead(state.func_ir.blocks, + state.func_ir.arg_names, + state.func_ir, + state.typemap) + numba.parfors.parfor.get_parfor_params(state.func_ir.blocks, + parfor_pass.options.fusion, + parfor_pass.nested_fusion_info) + return True + + class TestPipeline(compiler.Compiler): + """Test pipeline that just converts prange() to parfor and calls + remove_dead(). Copy propagation can replace B in the example code + which this pipeline avoids. + """ + def define_pipelines(self): + name = 'test parfor aliasing' + pm = PassManager(name) + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(FixupArgs, "fix up args") + pm.add_pass(IRProcessing, "processing IR") + pm.add_pass(WithLifting, "Handle with contexts") + # pre typing + if not self.state.flags.no_rewrites: + pm.add_pass(GenericRewrites, "nopython rewrites") + pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants") + pm.add_pass(DeadBranchPrune, "dead branch pruning") + pm.add_pass(InlineClosureLikes, + "inline calls to locally defined closures") + # typing + pm.add_pass(NopythonTypeInference, "nopython frontend") + + # lower + pm.add_pass(NativeLowering, "native lowering") + pm.add_pass(NoPythonBackend, "nopython mode backend") + pm.finalize() + return [pm] + + test_res = numba.jit(pipeline_class=TestPipeline)(func)() + py_res = func() + np.testing.assert_array_equal(test_res, py_res) + + +class TestSSADeadBranchPrune(TestCase): + """ + Test issues that required dead-branch-prune on SSA IR + """ + def test_issue_9706(self): + @njit + def foo(x, y=None): + if y is not None: + return x + y + else: + y = x + return x + y + + @njit + def foo_manual_ssa(x, y=None): + if y is not None: + return x + y + else: + # avoid changing type of `y` + y_ = x + return x + y_ + + self.assertEqual(foo(3, None), foo_manual_ssa(3, None)) + self.assertEqual(foo(3, 10), foo_manual_ssa(3, 10)) + + def test_issue_6541(self): + @njit + def f(xs, out=None): + N, = xs.shape + if out is None: + out = np.arange(N) + else: + assert np.all((0 <= out) & (out < N)) + out[:] = N + return out + + expected = f(np.array([3, 1, 2])) + out = np.arange(3, dtype='i8') + got = f(np.array([3, 1, 2]), out=out) + self.assertIs(got, out) + self.assertPreciseEqual(got, expected) + out = None + got = f(np.array([3, 1, 2]), out=out) + self.assertPreciseEqual(got, expected) + + def test_issue_7482(self): + @njit + def compute(smth, weights, default=0.0): + if weights is None: + return None + + if len(weights) == 0: + return default + + idx = smth > weights + weights = weights[idx] + + return default * weights + + self.assertIsNone(compute(smth=1, weights=None)) + kwargs = dict(smth=1, weights=np.arange(5), default=np.zeros(1)) + self.assertEqual(compute(**kwargs), + compute.py_func(**kwargs)) + + def test_issue_5661(self): + @njit + def foo(a, b=None): + if b is None: + b = 1 + elif b < a: + b += 1 + + return a + b + + args_list = [ + (1, 2), + (2, 1), + (1,), + ] + for args in args_list: + self.assertEqual(foo(*args), foo.py_func(*args)) + + # Variation + # https://github.com/numba/numba/issues/5661#issuecomment-697902475 + def make(decor=njit): + @decor + def inner(state): + if state is None: + state = 0 + else: + state += 1 + return state + + @decor + def fn(): + state = None + for i in range(10): + state = inner(state) + return state + + return fn() + + self.assertEqual(make(), make(lambda x: x)) + + def test_issue_9742(self): + CONST = 32 + + @jit + def foo(): + # This is a prune by value case, conditional is a compile time + # evaluatable constant. + conditional = CONST // 2 + collect = [] + while conditional: + collect.append(conditional) + conditional //= 2 + + return collect + + self.assertEqual(foo(), foo.py_func()) + + def test_issue_9742_variant(self): + CONST = 32 + + @jit + def foo(): + collect = [] + # This is a prune by value case, conditional is a compile time + # evaluatable constant. + x = CONST + 1 + if x: + collect.append(x) + return collect + + self.assertEqual(foo(), foo.py_func()) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_repr.py b/venv/lib/python3.10/site-packages/numba/tests/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..29e4b9104dc07582c4579c95ef125861e99e05b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_repr.py @@ -0,0 +1,63 @@ +import unittest +import numpy as np + +from numba.tests.support import TestCase +from numba import typeof +from numba.core import types +from numba.typed import List, Dict + +NB_TYPES = [ + types.Array, + types.NestedArray, + types.bool_, + types.unicode_type, + types.Record, + types.UnicodeCharSeq, + types.UniTuple, + types.List, + types.Tuple, + types.DictType, + types.ListType, + types.Set, +] + list(types.number_domain) + + +class TestRepr(TestCase): + def setUp(self) -> None: + tys_ns = {ty.__name__: ty for ty in NB_TYPES if hasattr(ty, "__name__")} + tys_ns.update({ty.name: ty for ty in NB_TYPES if hasattr(ty, "name")}) + self.tys_ns = tys_ns + + def check_repr(self, val): + ty = typeof(val) + ty2 = eval(repr(ty), self.tys_ns) + self.assertEqual(ty, ty2) + + def test_types(self): + # define some values for the test cases + rec_dtype = [("a", "f8"), ("b", "U8"), ("c", "i8", (2, 3))] + nb_dict = Dict() + nb_dict['a'] = 1 + # tests cases: list of different types + list comp of number types + val_types_cases = [ + True, + "a", + (1, 2), + (1, "a"), + [1, "a"], + ([1, "a"], [2, "b"]), + ((1, 2), (3, "b")), + ((1, 2), (3, [1, 2])), + np.ones(3), + np.array([(1, "a", np.ones((2, 3)))], dtype=rec_dtype), + nb_dict, + List([1, 2]), + {1, 2}, + ] + [number(1.1) for number in types.number_domain] + + for val in val_types_cases: + self.check_repr(val) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_return_values.py b/venv/lib/python3.10/site-packages/numba/tests/test_return_values.py new file mode 100644 index 0000000000000000000000000000000000000000..b3496d28311319bd5327c65d5dc4067392830e6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_return_values.py @@ -0,0 +1,74 @@ +""" +Test return values +""" + + +import math + +import unittest +from numba import jit +from numba.core import types +from numba.core.errors import TypingError, NumbaTypeError + + +enable_pyobj_flags = {'forceobj': True} +no_pyobj_flags = {'nopython': True} + + +def get_nopython_func(): + return abs + +def get_pyobj_func(): + return open + +def get_module_func(): + return math.floor + + +class TestReturnValues(unittest.TestCase): + + def test_nopython_func(self, flags=enable_pyobj_flags): + # Test returning func that is supported in nopython mode + pyfunc = get_nopython_func + cfunc = jit((), **flags)(pyfunc) + if flags == enable_pyobj_flags: + result = cfunc() + self.assertEqual(result, abs) + else: + self.fail("Unexpected successful compilation.") + + def test_nopython_func_npm(self): + with self.assertRaises(NumbaTypeError): + self.test_nopython_func(flags=no_pyobj_flags) + + def test_pyobj_func(self, flags=enable_pyobj_flags): + # Test returning func that is only supported in object mode + pyfunc = get_pyobj_func + cfunc = jit((), **flags)(pyfunc) + if flags == enable_pyobj_flags: + result = cfunc() + self.assertEqual(result, open) + else: + self.fail("Unexpected successful compilation.") + + def test_pyobj_func_npm(self): + with self.assertRaises(TypingError): + self.test_pyobj_func(flags=no_pyobj_flags) + + def test_module_func(self, flags=enable_pyobj_flags): + # Test returning imported func that is only supported in object mode + pyfunc = get_module_func + cfunc = jit((), **flags)(pyfunc) + if flags == enable_pyobj_flags: + result = cfunc() + self.assertEqual(result, math.floor) + else: + self.fail("Unexpected successful compilation.") + + def test_module_func_npm(self): + with self.assertRaises(NumbaTypeError): + self.test_module_func(flags=no_pyobj_flags) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_runtests.py b/venv/lib/python3.10/site-packages/numba/tests/test_runtests.py new file mode 100644 index 0000000000000000000000000000000000000000..9695cc51ca47687e502419d974a21e05d82116e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_runtests.py @@ -0,0 +1,284 @@ +import os +import sys +import subprocess + +from numba import cuda +import unittest +import itertools + +try: + import git # noqa: F401 from gitpython package +except ImportError: + has_gitpython = False +else: + has_gitpython = True + +try: + import yaml # from pyyaml package +except ImportError: + has_pyyaml = False +else: + has_pyyaml = True + + +class TestCase(unittest.TestCase): + """These test cases are meant to test the Numba test infrastructure itself. + Therefore, the logic used here shouldn't use numba.testing, but only the + upstream unittest, and run the numba test suite only in a subprocess.""" + + def get_testsuite_listing(self, args, *, subp_kwargs=None): + """ + Use `subp_kwargs` to pass extra argument to `subprocess.check_output`. + """ + subp_kwargs = subp_kwargs or {} + cmd = [sys.executable, '-m', 'numba.runtests', '-l'] + list(args) + out_bytes = subprocess.check_output(cmd, **subp_kwargs) + lines = out_bytes.decode('UTF-8').splitlines() + lines = [line for line in lines if line.strip()] + return lines + + def check_listing_prefix(self, prefix): + listing = self.get_testsuite_listing([prefix]) + for ln in listing[:-1]: + errmsg = '{!r} not startswith {!r}'.format(ln, prefix) + self.assertTrue(ln.startswith(prefix), msg=errmsg) + + def check_testsuite_size(self, args, minsize): + """ + Check that the reported numbers of tests are at least *minsize*. + """ + lines = self.get_testsuite_listing(args) + last_line = lines[-1] + self.assertTrue('tests found' in last_line) + number = int(last_line.split(' ')[0]) + # There may be some "skipped" messages at the beginning, + # so do an approximate check. + self.assertIn(len(lines), range(number + 1, number + 20)) + self.assertGreaterEqual(number, minsize) + return lines + + def check_all(self, ids): + lines = self.check_testsuite_size(ids, 5000) + # CUDA should be included by default + self.assertTrue(any('numba.cuda.tests.' in line for line in lines)) + # As well as subpackage + self.assertTrue(any('numba.tests.npyufunc.test_' in line + for line in lines),) + + def _get_numba_tests_from_listing(self, listing): + """returns a filter on strings starting with 'numba.', useful for + selecting the 'numba' test names from a test listing.""" + return filter(lambda x: x.startswith('numba.'), listing) + + def test_default(self): + self.check_all([]) + + def test_all(self): + self.check_all(['numba.tests']) + + def test_cuda(self): + # Even without CUDA enabled, there is at least one test + # (in numba.cuda.tests.nocuda) + minsize = 100 if cuda.is_available() else 1 + self.check_testsuite_size(['numba.cuda.tests'], minsize) + + @unittest.skipIf(not cuda.is_available(), "NO CUDA") + def test_cuda_submodules(self): + self.check_listing_prefix('numba.cuda.tests.cudadrv') + self.check_listing_prefix('numba.cuda.tests.cudapy') + self.check_listing_prefix('numba.cuda.tests.nocuda') + self.check_listing_prefix('numba.cuda.tests.cudasim') + + def test_module(self): + self.check_testsuite_size(['numba.tests.test_storeslice'], 2) + self.check_testsuite_size(['numba.tests.test_nested_calls'], 10) + # Several modules + self.check_testsuite_size(['numba.tests.test_nested_calls', + 'numba.tests.test_storeslice'], 12) + + def test_subpackage(self): + self.check_testsuite_size(['numba.tests.npyufunc'], 50) + + def test_random(self): + self.check_testsuite_size( + ['--random', '0.1', 'numba.tests.npyufunc'], 5) + + def test_include_exclude_tags(self): + def get_count(arg_list): + lines = self.get_testsuite_listing(arg_list) + self.assertIn('tests found', lines[-1]) + count = int(lines[-1].split()[0]) + self.assertTrue(count > 0) + return count + + tags = ['long_running', 'long_running, important'] + + total = get_count(['numba.tests']) + + for tag in tags: + included = get_count(['--tags', tag, 'numba.tests']) + excluded = get_count(['--exclude-tags', tag, 'numba.tests']) + self.assertEqual(total, included + excluded) + + # check syntax with `=` sign in + included = get_count(['--tags=%s' % tag, 'numba.tests']) + excluded = get_count(['--exclude-tags=%s' % tag, 'numba.tests']) + self.assertEqual(total, included + excluded) + + def test_check_shard(self): + tmpAll = self.get_testsuite_listing([]) + tmp1 = self.get_testsuite_listing(['-j', '0:2']) + tmp2 = self.get_testsuite_listing(['-j', '1:2']) + + lAll = set(self._get_numba_tests_from_listing(tmpAll)) + l1 = set(self._get_numba_tests_from_listing(tmp1)) + l2 = set(self._get_numba_tests_from_listing(tmp2)) + + # The difference between two adjacent shards should be less than 5% of + # the total + self.assertLess(abs(len(l2) - len(l1)), len(lAll) / 20) + self.assertLess(len(l1), len(lAll)) + self.assertLess(len(l2), len(lAll)) + + def test_check_sharding_equivalent(self): + # get some shards + sharded = list() + for i in range(3): + subset = self.get_testsuite_listing(['-j', '{}:3'.format(i)]) + slist = [*self._get_numba_tests_from_listing(subset)] + sharded.append(slist) + + # get the always running tests + tmp = self.get_testsuite_listing(['--tag', 'always_test']) + always_running = set(self._get_numba_tests_from_listing(tmp)) + + # make sure there is at least one test that always runs + self.assertGreaterEqual(len(always_running), 1) + + # check that each shard contains no repeats + sharded_sets = [set(x) for x in sharded] + for i in range(len(sharded)): + self.assertEqual(len(sharded_sets[i]), len(sharded[i])) + + # check that the always running tests are in every shard, and then + # remove them from the shards + for shard in sharded_sets: + for test in always_running: + self.assertIn(test, shard) + shard.remove(test) + self.assertNotIn(test, shard) + + # check that there is no overlap between the shards + for a, b in itertools.combinations(sharded_sets, 2): + self.assertFalse(a & b) + + # check that the sum of the shards and the always running tests is the + # same as the full listing + + sum_of_parts = set() + for x in sharded_sets: + sum_of_parts.update(x) + sum_of_parts.update(always_running) + + full_listing = set(self._get_numba_tests_from_listing( + self.get_testsuite_listing([]))) + + self.assertEqual(sum_of_parts, full_listing) + + @unittest.skipUnless(has_gitpython, "Requires gitpython") + def test_gitdiff(self): + # Check for git + try: + subprocess.call("git", + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + except FileNotFoundError: + self.skipTest("no git available") + + # default + outs = self.get_testsuite_listing(['-g']) + self.assertNotIn("Git diff by common ancestor", outs) + # using ancestor + outs = self.get_testsuite_listing(['-g=ancestor']) + self.assertIn("Git diff by common ancestor", outs) + # misspelled ancestor + subp_kwargs = dict(stderr=subprocess.DEVNULL) + with self.assertRaises(subprocess.CalledProcessError): + self.get_testsuite_listing(['-g=ancest'], subp_kwargs=subp_kwargs) + + @unittest.skipUnless(has_pyyaml, "Requires pyyaml") + def test_azure_config(self): + from yaml import Loader + base_path = os.path.dirname(os.path.abspath(__file__)) + azure_pipe = os.path.join(base_path, '..', '..', 'azure-pipelines.yml') + if not os.path.isfile(azure_pipe): + self.skipTest("'azure-pipelines.yml' is not available") + with open(os.path.abspath(azure_pipe), 'rt') as f: + data = f.read() + pipe_yml = yaml.load(data, Loader=Loader) + + templates = pipe_yml['jobs'] + # first look at the items in the first two templates, this is osx/linux + start_indexes = [] + for tmplt in templates[:2]: + matrix = tmplt['parameters']['matrix'] + for setup in matrix.values(): + start_indexes.append(setup['TEST_START_INDEX']) + + # next look at the items in the windows only template + winpath = ['..', '..', 'buildscripts', 'azure', 'azure-windows.yml'] + azure_windows = os.path.join(base_path, *winpath) + if not os.path.isfile(azure_windows): + self.skipTest("'azure-windows.yml' is not available") + with open(os.path.abspath(azure_windows), 'rt') as f: + data = f.read() + windows_yml = yaml.load(data, Loader=Loader) + + # There's only one template in windows and its keyed differently to the + # above, get its matrix. + matrix = windows_yml['jobs'][0]['strategy']['matrix'] + for setup in matrix.values(): + start_indexes.append(setup['TEST_START_INDEX']) + + # sanity checks + # 1. That the TEST_START_INDEX is unique + self.assertEqual(len(start_indexes), len(set(start_indexes))) + # 2. That the TEST_START_INDEX is a complete range + lim_start_index = max(start_indexes) + 1 + expected = [*range(lim_start_index)] + self.assertEqual(sorted(start_indexes), expected) + # 3. That the number of indexes matches the declared test count + self.assertEqual(lim_start_index, pipe_yml['variables']['TEST_COUNT']) + + def test_no_compilation_on_list(self): + # Checks that the test suite doesn't do any CPU-side compilation on + # listing of tests. + code = """if 1: + from unittest import mock + from llvmlite import binding as llvm + error = RuntimeError("Detected compilation during test listing") + with mock.patch.object(llvm.ExecutionEngine, 'finalize_object', + side_effect=error): + import numba + {0} + """ + + # Run with a jit function in the test to demonstrate failure + with self.assertRaises(subprocess.CalledProcessError) as raises: + cmd = [sys.executable, "-c", code.format("numba.njit(lambda:0)()")] + subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + timeout=60) + self.assertIn("Detected compilation during test listing", + raises.exception.stdout.decode('UTF-8')) + + # Run to validate the test suite does not trigger compilation during + # listing. + cmd = [sys.executable, "-c", code.format("numba.test('-l')")] + subprocess.check_call(cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL,) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_serialize.py b/venv/lib/python3.10/site-packages/numba/tests/test_serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa99e439fcc84d31dd62ca719a0a669d4ef6225 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_serialize.py @@ -0,0 +1,331 @@ +import contextlib +import gc +import pickle +import runpy +import subprocess +import sys +import unittest +from multiprocessing import get_context + +import numba +from numba.core.errors import TypingError +from numba.tests.support import TestCase +from numba.core.target_extension import resolve_dispatcher_from_str +from numba.cloudpickle import dumps, loads + + +class TestDispatcherPickling(TestCase): + + def run_with_protocols(self, meth, *args, **kwargs): + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + meth(proto, *args, **kwargs) + + @contextlib.contextmanager + def simulate_fresh_target(self): + hwstr = 'cpu' + dispatcher_cls = resolve_dispatcher_from_str(hwstr) + old_descr = dispatcher_cls.targetdescr + # Simulate fresh targetdescr + dispatcher_cls.targetdescr = type(dispatcher_cls.targetdescr)(hwstr) + try: + yield + finally: + # Be sure to reinstantiate old descriptor, otherwise other + # objects may be out of sync. + dispatcher_cls.targetdescr = old_descr + + def check_call(self, proto, func, expected_result, args): + def check_result(func): + if (isinstance(expected_result, type) + and issubclass(expected_result, Exception)): + self.assertRaises(expected_result, func, *args) + else: + self.assertPreciseEqual(func(*args), expected_result) + + # Control + check_result(func) + pickled = pickle.dumps(func, proto) + with self.simulate_fresh_target(): + new_func = pickle.loads(pickled) + check_result(new_func) + + def test_call_with_sig(self): + from .serialize_usecases import add_with_sig + self.run_with_protocols(self.check_call, add_with_sig, 5, (1, 4)) + # Compilation has been disabled => float inputs will be coerced to int + self.run_with_protocols(self.check_call, add_with_sig, 5, (1.2, 4.2)) + + def test_call_without_sig(self): + from .serialize_usecases import add_without_sig + self.run_with_protocols(self.check_call, add_without_sig, 5, (1, 4)) + self.run_with_protocols(self.check_call, add_without_sig, 5.5, (1.2, 4.3)) + # Object mode is enabled + self.run_with_protocols(self.check_call, add_without_sig, "abc", ("a", "bc")) + + def test_call_nopython(self): + from .serialize_usecases import add_nopython + self.run_with_protocols(self.check_call, add_nopython, 5.5, (1.2, 4.3)) + # Object mode is disabled + self.run_with_protocols(self.check_call, add_nopython, TypingError, (object(), object())) + + def test_call_nopython_fail(self): + from .serialize_usecases import add_nopython_fail + # Compilation fails + self.run_with_protocols(self.check_call, add_nopython_fail, TypingError, (1, 2)) + + def test_call_objmode_with_global(self): + from .serialize_usecases import get_global_objmode + self.run_with_protocols(self.check_call, get_global_objmode, 7.5, (2.5,)) + + def test_call_closure(self): + from .serialize_usecases import closure + inner = closure(1) + self.run_with_protocols(self.check_call, inner, 6, (2, 3)) + + def check_call_closure_with_globals(self, **jit_args): + from .serialize_usecases import closure_with_globals + inner = closure_with_globals(3.0, **jit_args) + self.run_with_protocols(self.check_call, inner, 7.0, (4.0,)) + + def test_call_closure_with_globals_nopython(self): + self.check_call_closure_with_globals(nopython=True) + + def test_call_closure_with_globals_objmode(self): + self.check_call_closure_with_globals(forceobj=True) + + def test_call_closure_calling_other_function(self): + from .serialize_usecases import closure_calling_other_function + inner = closure_calling_other_function(3.0) + self.run_with_protocols(self.check_call, inner, 11.0, (4.0, 6.0)) + + def test_call_closure_calling_other_closure(self): + from .serialize_usecases import closure_calling_other_closure + inner = closure_calling_other_closure(3.0) + self.run_with_protocols(self.check_call, inner, 8.0, (4.0,)) + + def test_call_dyn_func(self): + from .serialize_usecases import dyn_func + # Check serializing a dynamically-created function + self.run_with_protocols(self.check_call, dyn_func, 36, (6,)) + + def test_call_dyn_func_objmode(self): + from .serialize_usecases import dyn_func_objmode + # Same with an object mode function + self.run_with_protocols(self.check_call, dyn_func_objmode, 36, (6,)) + + def test_renamed_module(self): + from .serialize_usecases import get_renamed_module + # Issue #1559: using a renamed module (e.g. `import numpy as np`) + # should not fail serializing + expected = get_renamed_module(0.0) + self.run_with_protocols(self.check_call, get_renamed_module, + expected, (0.0,)) + + def test_other_process(self): + """ + Check that reconstructing doesn't depend on resources already + instantiated in the original process. + """ + from .serialize_usecases import closure_calling_other_closure + func = closure_calling_other_closure(3.0) + pickled = pickle.dumps(func) + code = """if 1: + import pickle + + data = {pickled!r} + func = pickle.loads(data) + res = func(4.0) + assert res == 8.0, res + """.format(**locals()) + subprocess.check_call([sys.executable, "-c", code]) + + def test_reuse(self): + """ + Check that deserializing the same function multiple times re-uses + the same dispatcher object. + + Note that "same function" is intentionally under-specified. + """ + from .serialize_usecases import closure + func = closure(5) + pickled = pickle.dumps(func) + func2 = closure(6) + pickled2 = pickle.dumps(func2) + + f = pickle.loads(pickled) + g = pickle.loads(pickled) + h = pickle.loads(pickled2) + self.assertIs(f, g) + self.assertEqual(f(2, 3), 10) + g.disable_compile() + self.assertEqual(g(2, 4), 11) + + self.assertIsNot(f, h) + self.assertEqual(h(2, 3), 11) + + # Now make sure the original object doesn't exist when deserializing + func = closure(7) + func(42, 43) + pickled = pickle.dumps(func) + del func + gc.collect() + + f = pickle.loads(pickled) + g = pickle.loads(pickled) + self.assertIs(f, g) + self.assertEqual(f(2, 3), 12) + g.disable_compile() + self.assertEqual(g(2, 4), 13) + + def test_imp_deprecation(self): + """ + The imp module was deprecated in v3.4 in favour of importlib + """ + code = """if 1: + import pickle + import warnings + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', DeprecationWarning) + from numba import njit + @njit + def foo(x): + return x + 1 + foo(1) + serialized_foo = pickle.dumps(foo) + for x in w: + if 'serialize.py' in x.filename: + assert "the imp module is deprecated" not in x.msg + """ + subprocess.check_call([sys.executable, "-c", code]) + + +class TestSerializationMisc(TestCase): + def test_numba_unpickle(self): + # Test that _numba_unpickle is memorizing its output + from numba.core.serialize import _numba_unpickle + + random_obj = object() + bytebuf = pickle.dumps(random_obj) + hashed = hash(random_obj) + + got1 = _numba_unpickle(id(random_obj), bytebuf, hashed) + # not the original object + self.assertIsNot(got1, random_obj) + got2 = _numba_unpickle(id(random_obj), bytebuf, hashed) + # unpickled results are the same objects + self.assertIs(got1, got2) + + +class TestCloudPickleIssues(TestCase): + """This test case includes issues specific to the cloudpickle implementation. + """ + _numba_parallel_test_ = False + + def test_dynamic_class_reset_on_unpickle(self): + # a dynamic class + class Klass: + classvar = None + + def mutator(): + Klass.classvar = 100 + + def check(): + self.assertEqual(Klass.classvar, 100) + + saved = dumps(Klass) + mutator() + check() + loads(saved) + # Without the patch, each `loads(saved)` will reset `Klass.classvar` + check() + loads(saved) + check() + + @unittest.skipIf(__name__ == "__main__", + "Test cannot run as when module is __main__") + def test_main_class_reset_on_unpickle(self): + mp = get_context('spawn') + proc = mp.Process(target=check_main_class_reset_on_unpickle) + proc.start() + proc.join(timeout=60) + self.assertEqual(proc.exitcode, 0) + + def test_dynamic_class_reset_on_unpickle_new_proc(self): + # a dynamic class + class Klass: + classvar = None + + # serialize Klass in this process + saved = dumps(Klass) + + # Check the reset problem in a new process + mp = get_context('spawn') + proc = mp.Process(target=check_unpickle_dyn_class_new_proc, args=(saved,)) + proc.start() + proc.join(timeout=60) + self.assertEqual(proc.exitcode, 0) + + def test_dynamic_class_issue_7356(self): + cfunc = numba.njit(issue_7356) + self.assertEqual(cfunc(), (100, 100)) + + +class DynClass(object): + # For testing issue #7356 + a = None + + +def issue_7356(): + with numba.objmode(before="intp"): + DynClass.a = 100 + before = DynClass.a + with numba.objmode(after="intp"): + after = DynClass.a + return before, after + + +def check_main_class_reset_on_unpickle(): + # Load module and get its global dictionary + glbs = runpy.run_module( + "numba.tests.cloudpickle_main_class", + run_name="__main__", + ) + # Get the Klass and check it is from __main__ + Klass = glbs['Klass'] + assert Klass.__module__ == "__main__" + assert Klass.classvar != 100 + saved = dumps(Klass) + # mutate + Klass.classvar = 100 + # check + _check_dyn_class(Klass, saved) + + +def check_unpickle_dyn_class_new_proc(saved): + Klass = loads(saved) + assert Klass.classvar != 100 + # mutate + Klass.classvar = 100 + # check + _check_dyn_class(Klass, saved) + + +def _check_dyn_class(Klass, saved): + def check(): + if Klass.classvar != 100: + raise AssertionError("Check failed. Klass reset.") + + check() + loaded = loads(saved) + if loaded is not Klass: + raise AssertionError("Expected reuse") + # Without the patch, each `loads(saved)` will reset `Klass.classvar` + check() + loaded = loads(saved) + if loaded is not Klass: + raise AssertionError("Expected reuse") + check() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_sets.py b/venv/lib/python3.10/site-packages/numba/tests/test_sets.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5af8204c4f02254c2fd7dcd1d4e4c5aac9a413 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_sets.py @@ -0,0 +1,868 @@ +import unittest + +from collections import namedtuple +import contextlib +import itertools +import random +from numba.core.errors import TypingError + +import numpy as np + +from numba import jit, njit +from numba.tests.support import (TestCase, enable_pyobj_flags, MemoryLeakMixin, + compile_function) + + +Point = namedtuple('Point', ('a', 'b')) + + +def _build_set_literal_usecase(code, args): + code = code % {'initializer': ', '.join(repr(arg) for arg in args)} + return compile_function('build_set', code, globals()) + +def set_literal_return_usecase(args): + code = """if 1: + def build_set(): + return {%(initializer)s} + """ + return _build_set_literal_usecase(code, args) + +def set_literal_convert_usecase(args): + code = """if 1: + def build_set(): + my_set = {%(initializer)s} + return list(my_set) + """ + return _build_set_literal_usecase(code, args) + + +def empty_constructor_usecase(): + s = set() + s.add(1) + return len(s) + +def constructor_usecase(arg): + s = set(arg) + return len(s) + +def iterator_usecase(arg): + s = set(arg) + l = [] + for v in s: + l.append(v) + return l + +def update_usecase(a, b, c): + s = set() + s.update(a) + s.update(b) + s.update(c) + return list(s) + +def bool_usecase(arg): + # Remove one element to allow for empty sets. + s = set(arg[1:]) + return bool(s) + +def remove_usecase(a, b): + s = set(a) + for v in b: + s.remove(v) + return list(s) + +def discard_usecase(a, b): + s = set(a) + for v in b: + s.discard(v) + return list(s) + +def add_discard_usecase(a, u, v): + s = set(a) + for i in range(1000): + s.add(u) + s.discard(v) + return list(s) + +def pop_usecase(a): + s = set(a) + l = [] + while len(s) > 0: + l.append(s.pop()) + return l + +def contains_usecase(a, b): + s = set(a) + l = [] + for v in b: + l.append(v in s) + return l + +def difference_update_usecase(a, b): + s = set(a) + s.difference_update(set(b)) + return list(s) + +def intersection_update_usecase(a, b): + s = set(a) + s.intersection_update(set(b)) + return list(s) + +def symmetric_difference_update_usecase(a, b): + s = set(a) + s.symmetric_difference_update(set(b)) + return list(s) + +def isdisjoint_usecase(a, b): + return set(a).isdisjoint(set(b)) + +def issubset_usecase(a, b): + return set(a).issubset(set(b)) + +def issuperset_usecase(a, b): + return set(a).issuperset(set(b)) + +def clear_usecase(a): + s = set(a) + s.clear() + return len(s), list(s) + +def copy_usecase(a): + s = set(a) + ss = s.copy() + s.pop() + return len(ss), list(ss) + +def copy_usecase_empty(a): + s = set(a) + s.clear() + ss = s.copy() + s.add(a[0]) + return len(ss), list(ss) + +def copy_usecase_deleted(a, b): + s = set(a) + s.remove(b) + ss = s.copy() + s.pop() + return len(ss), list(ss) + +def difference_usecase(a, b): + sa = set(a) + s = sa.difference(set(b)) + return list(s) + +def intersection_usecase(a, b): + sa = set(a) + s = sa.intersection(set(b)) + return list(s) + +def symmetric_difference_usecase(a, b): + sa = set(a) + s = sa.symmetric_difference(set(b)) + return list(s) + +def union_usecase(a, b): + sa = set(a) + s = sa.union(set(b)) + return list(s) + +def set_return_usecase(a): + s = set(a) + return s + + +def noop(x): + pass + +def unbox_usecase(x): + """ + Expect a set of numbers + """ + res = 0 + for v in x: + res += v + return res + +def unbox_usecase2(x): + """ + Expect a set of tuples + """ + res = 0 + for v in x: + res += len(v) + return res + +def unbox_usecase3(x): + """ + Expect a (number, set of numbers) tuple. + """ + a, b = x + res = a + for v in b: + res += v + return res + +def unbox_usecase4(x): + """ + Expect a (number, set of tuples) tuple. + """ + a, b = x + res = a + for v in b: + res += len(v) + return res + + +def reflect_simple(sa, sb): + sa.add(42) + sa.update(sb) + return sa, len(sa), len(sb) + +def reflect_conditional(sa, sb): + # `sa` may or may not actually reflect a Python set + if len(sb) > 1: + sa = set((11., 22., 33., 44.)) + sa.add(42.) + sa.update(sb) + # Combine with a non-reflected set (to check method typing) + sc = set((55., 66.)) + sa.symmetric_difference_update(sc) + return sa, len(sa), len(sb) + +def reflect_exception(s): + s.add(42) + raise ZeroDivisionError + +def reflect_dual(sa, sb): + sa.add(sb.pop()) + return sa is sb + + +def unique_usecase(src): + seen = set() + res = [] + for v in src: + if v not in seen: + seen.add(v) + res.append(v) + return res + + +class BaseTest(MemoryLeakMixin, TestCase): + + def setUp(self): + super(BaseTest, self).setUp() + self.rnd = random.Random(42) + + def _range(self, stop): + return np.arange(int(stop)).tolist() + + def _random_choice(self, seq, n): + """ + Choose *n* possibly duplicate items from sequence. + """ + l = [self.rnd.choice(list(seq)) for i in range(n)] + if isinstance(seq, np.ndarray): + return np.array(l, dtype=seq.dtype) + else: + return l + + def duplicates_array(self, n): + """ + Get a 1d array with many duplicate values. + """ + a = self._range(np.sqrt(n)) + return self._random_choice(a, n) + + def sparse_array(self, n): + """ + Get a 1d array with values spread around. + """ + # Note two calls to sparse_array() should generate reasonable overlap + a = self._range(n ** 1.3) + return self._random_choice(a, n) + + def _assert_equal_unordered(self, a, b): + if isinstance(a, tuple): + self.assertIsInstance(b, tuple) + for u, v in zip(a, b): + self._assert_equal_unordered(u, v) + elif isinstance(a, list): + self.assertIsInstance(b, list) + self.assertPreciseEqual(sorted(a), sorted(b)) + else: + self.assertPreciseEqual(a, b) + + def unordered_checker(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + def check(*args): + expected = pyfunc(*args) + got = cfunc(*args) + self._assert_equal_unordered(expected, got) + return check + + +class TestSetLiterals(BaseTest): + + def check(self, pyfunc): + cfunc = njit(pyfunc) + expected = pyfunc() + got = cfunc() + self.assertPreciseEqual(expected, got) + return got, expected + + def test_build_set(self): + pyfunc = set_literal_return_usecase((1, 2, 3, 2)) + self.check(pyfunc) + + def test_build_heterogeneous_set(self, flags=enable_pyobj_flags): + pyfunc = set_literal_return_usecase((1, 2.0, 3j, 2)) + self.check(pyfunc) + pyfunc = set_literal_return_usecase((2.0, 2)) + got, expected = self.check(pyfunc) + self.assertIs(type(got.pop()), type(expected.pop())) + + def test_build_set_nopython(self): + arg = list(self.sparse_array(50)) + pyfunc = set_literal_convert_usecase(arg) + cfunc = jit(nopython=True)(pyfunc) + + expected = pyfunc() + got = cfunc() + self.assertPreciseEqual(sorted(expected), sorted(got)) + + +class TestSets(BaseTest): + + def test_constructor(self): + pyfunc = empty_constructor_usecase + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(), pyfunc()) + + pyfunc = constructor_usecase + cfunc = jit(nopython=True)(pyfunc) + def check(arg): + self.assertPreciseEqual(pyfunc(arg), cfunc(arg)) + + check(self.duplicates_array(200)) + check(self.sparse_array(200)) + + def test_set_return(self): + pyfunc = set_return_usecase + cfunc = jit(nopython=True)(pyfunc) + + arg = self.duplicates_array(200) + self.assertEqual(cfunc(arg), set(arg)) + + def test_iterator(self): + pyfunc = iterator_usecase + check = self.unordered_checker(pyfunc) + + check(self.duplicates_array(200)) + check(self.sparse_array(200)) + + def test_update(self): + pyfunc = update_usecase + check = self.unordered_checker(pyfunc) + + a = self.sparse_array(50) + b = self.duplicates_array(50) + c = self.sparse_array(50) + check(a, b, c) + + def test_remove(self): + pyfunc = remove_usecase + check = self.unordered_checker(pyfunc) + + a = self.sparse_array(50) + b = a[::10] + check(a, b) + + def test_remove_error(self): + # References are leaked on exception + self.disable_leak_check() + + pyfunc = remove_usecase + cfunc = jit(nopython=True)(pyfunc) + + # ensure that there will be a key error + items = tuple(set(self.sparse_array(3))) + a = items[1:] + b = (items[0],) + with self.assertRaises(KeyError): + cfunc(a, b) + + def test_discard(self): + pyfunc = discard_usecase + check = self.unordered_checker(pyfunc) + + a = self.sparse_array(50) + b = self.sparse_array(50) + check(a, b) + + def test_add_discard(self): + """ + Check that the insertion logic does not create an infinite lookup + chain with deleted entries (insertion should happen at the first + deleted entry, not at the free entry at the end of the chain). + See issue #1913. + """ + pyfunc = add_discard_usecase + check = self.unordered_checker(pyfunc) + + # ensure a and b are different + a = b = None + while a == b: + a, b = self.sparse_array(2) + check((a,), b, b) + + def test_pop(self): + pyfunc = pop_usecase + check = self.unordered_checker(pyfunc) + + check(self.sparse_array(50)) + + def test_contains(self): + pyfunc = contains_usecase + cfunc = jit(nopython=True)(pyfunc) + def check(a, b): + self.assertPreciseEqual(pyfunc(a, b), cfunc(a, b)) + + a = self.sparse_array(50) + b = self.sparse_array(50) + check(a, b) + + def _test_xxx_update(self, pyfunc): + check = self.unordered_checker(pyfunc) + + sizes = (1, 50, 500) + for na, nb in itertools.product(sizes, sizes): + a = self.sparse_array(na) + b = self.sparse_array(nb) + check(a, b) + + def test_difference_update(self): + self._test_xxx_update(difference_update_usecase) + + def test_intersection_update(self): + self._test_xxx_update(intersection_update_usecase) + + def test_symmetric_difference_update(self): + self._test_xxx_update(symmetric_difference_update_usecase) + + def _test_comparator(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + def check(a, b): + self.assertPreciseEqual(pyfunc(a, b), cfunc(a, b)) + + a, b = map(set, [self.sparse_array(10), self.sparse_array(15)]) + args = [a & b, a - b, a | b, a ^ b] + args = [tuple(x) for x in args] + for a, b in itertools.product(args, args): + check(a, b) + + def test_isdisjoint(self): + self._test_comparator(isdisjoint_usecase) + + def test_issubset(self): + self._test_comparator(issubset_usecase) + + def test_issuperset(self): + self._test_comparator(issuperset_usecase) + + def test_clear(self): + pyfunc = clear_usecase + check = self.unordered_checker(pyfunc) + + check(self.sparse_array(50)) + + def test_copy(self): + # Source set doesn't have any deleted entries + pyfunc = copy_usecase + check = self.unordered_checker(pyfunc) + check(self.sparse_array(50)) + + pyfunc = copy_usecase_empty + check = self.unordered_checker(pyfunc) + a = self.sparse_array(1) + check(a) + + # Source set has deleted entries + pyfunc = copy_usecase_deleted + check = self.unordered_checker(pyfunc) + check((1, 2, 4, 11), 2) + a = self.sparse_array(50) + check(a, a[len(a) // 2]) + + def test_bool(self): + pyfunc = bool_usecase + check = self.unordered_checker(pyfunc) + + check(self.sparse_array(1)) + check(self.sparse_array(2)) + + def _test_set_operator(self, pyfunc): + check = self.unordered_checker(pyfunc) + + a, b = (1, 2, 4, 11), (2, 3, 5, 11, 42) + check(a, b) + + sizes = (1, 50, 500) + for na, nb in itertools.product(sizes, sizes): + a = self.sparse_array(na) + b = self.sparse_array(nb) + check(a, b) + + def make_operator_usecase(self, op): + code = """if 1: + def operator_usecase(a, b): + s = set(a) %(op)s set(b) + return list(s) + """ % dict(op=op) + return compile_function('operator_usecase', code, globals()) + + def make_inplace_operator_usecase(self, op): + code = """if 1: + def inplace_operator_usecase(a, b): + sa = set(a) + sb = set(b) + sc = sa + sc %(op)s sb + return list(sc), list(sa) + """ % dict(op=op) + return compile_function('inplace_operator_usecase', code, globals()) + + def make_comparison_usecase(self, op): + code = """if 1: + def comparison_usecase(a, b): + return set(a) %(op)s set(b) + """ % dict(op=op) + return compile_function('comparison_usecase', code, globals()) + + def test_difference(self): + self._test_set_operator(difference_usecase) + + def test_intersection(self): + self._test_set_operator(intersection_usecase) + + def test_symmetric_difference(self): + self._test_set_operator(symmetric_difference_usecase) + + def test_union(self): + self._test_set_operator(union_usecase) + + def test_and(self): + self._test_set_operator(self.make_operator_usecase('&')) + + def test_or(self): + self._test_set_operator(self.make_operator_usecase('|')) + + def test_sub(self): + self._test_set_operator(self.make_operator_usecase('-')) + + def test_xor(self): + self._test_set_operator(self.make_operator_usecase('^')) + + def test_eq(self): + self._test_set_operator(self.make_comparison_usecase('==')) + + def test_ne(self): + self._test_set_operator(self.make_comparison_usecase('!=')) + + def test_le(self): + self._test_set_operator(self.make_comparison_usecase('<=')) + + def test_lt(self): + self._test_set_operator(self.make_comparison_usecase('<')) + + def test_ge(self): + self._test_set_operator(self.make_comparison_usecase('>=')) + + def test_gt(self): + self._test_set_operator(self.make_comparison_usecase('>')) + + def test_iand(self): + self._test_set_operator(self.make_inplace_operator_usecase('&=')) + + def test_ior(self): + self._test_set_operator(self.make_inplace_operator_usecase('|=')) + + def test_isub(self): + self._test_set_operator(self.make_inplace_operator_usecase('-=')) + + def test_ixor(self): + self._test_set_operator(self.make_inplace_operator_usecase('^=')) + + +class TestFloatSets(TestSets): + """ + Test sets with floating-point keys. + """ + # Only a few basic tests here, as the sanity of most operations doesn't + # depend on the key type. + + def _range(self, stop): + return np.arange(stop, dtype=np.float32) * np.float32(0.1) + + +class TestTupleSets(TestSets): + """ + Test sets with tuple keys. + """ + def _range(self, stop): + a = np.arange(stop, dtype=np.int64) + b = a & 0x5555555555555555 + c = (a & 0xaaaaaaaa).astype(np.int32) + d = ((a >> 32) & 1).astype(np.bool_) + return list(zip(b, c, d)) + + +class TestUnicodeSets(TestSets): + """ + Test sets with unicode keys. For the purpose of testing refcounted sets. + """ + def _range(self, stop): + return ['A{}'.format(i) for i in range(int(stop))] + + +class TestSetsInvalidDtype(TestSets): + + def _test_set_operator(self, pyfunc): + # it is invalid to apply some set operations on + # sets with different dtype + cfunc = jit(nopython=True)(pyfunc) + + a = set([1, 2, 4, 11]) + b = set(['a', 'b', 'c']) + msg = 'All Sets must be of the same type' + with self.assertRaisesRegex(TypingError, msg): + cfunc(a, b) + + +class TestSetsInvalid(TestSets): + + def symmetric_difference_usecase(a, b): + s = a.symmetric_difference(b) + return list(s) + + def difference_usecase(a, b): + s = a.difference(b) + return list(s) + + def intersection_usecase(a, b): + s = a.intersection(b) + return list(s) + + def union_usecase(a, b): + s = a.union(b) + return list(s) + + def _test_set_operator(self, pyfunc): + # it is invalid to apply some set operations on + # sets with different dtype + cfunc = jit(nopython=True)(pyfunc) + + a = set([1, 2, 4, 11]) + b = (1, 2, 3) + msg = 'All arguments must be Sets' + with self.assertRaisesRegex(TypingError, msg): + cfunc(a, b) + + def test_difference(self): + self._test_set_operator(TestSetsInvalid.difference_usecase) + + def test_intersection(self): + self._test_set_operator(TestSetsInvalid.intersection_usecase) + + def test_symmetric_difference(self): + self._test_set_operator(TestSetsInvalid.symmetric_difference_usecase) + + def test_union(self): + self._test_set_operator(TestSetsInvalid.union_usecase) + + def make_operator_usecase(self, op): + code = """if 1: + def operator_usecase(a, b): + s = a %(op)s b + return list(s) + """ % dict(op=op) + return compile_function('operator_usecase', code, globals()) + + def make_inplace_operator_usecase(self, op): + code = """if 1: + def inplace_operator_usecase(a, b): + sa = a + sb = b + sc = sa + sc %(op)s sb + return list(sc), list(sa) + """ % dict(op=op) + return compile_function('inplace_operator_usecase', code, globals()) + + def make_comparison_usecase(self, op): + code = """if 1: + def comparison_usecase(a, b): + return set(a) %(op)s b + """ % dict(op=op) + return compile_function('comparison_usecase', code, globals()) + + +class TestUnboxing(BaseTest): + """ + Test unboxing of Python sets into native Numba sets. + """ + + @contextlib.contextmanager + def assert_type_error(self, msg): + with self.assertRaises(TypeError) as raises: + yield + if msg is not None: + self.assertRegex(str(raises.exception), msg) + + def check_unary(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + def check(arg): + expected = pyfunc(arg) + got = cfunc(arg) + self.assertPreciseEqual(got, expected) + return check + + def test_numbers(self): + check = self.check_unary(unbox_usecase) + check(set([1, 2])) + check(set([1j, 2.5j])) + # Check allocation and sizing + check(set(range(100))) + + def test_tuples(self): + check = self.check_unary(unbox_usecase2) + check(set([(1, 2), (3, 4)])) + check(set([(1, 2j), (3, 4j)])) + + def test_set_inside_tuple(self): + check = self.check_unary(unbox_usecase3) + check((1, set([2, 3, 4]))) + + def test_set_of_tuples_inside_tuple(self): + check = self.check_unary(unbox_usecase4) + check((1, set([(2,), (3,)]))) + + def test_errors(self): + # Error checking should ensure the set is homogeneous + msg = "can't unbox heterogeneous set" + pyfunc = noop + cfunc = jit(nopython=True)(pyfunc) + val = set([1, 2.5]) + with self.assert_type_error(msg): + cfunc(val) + # The set hasn't been changed (bogus reflecting) + self.assertEqual(val, set([1, 2.5])) + with self.assert_type_error(msg): + cfunc(set([1, 2j])) + # Same when the set is nested in a tuple or namedtuple + with self.assert_type_error(msg): + cfunc((1, set([1, 2j]))) + with self.assert_type_error(msg): + cfunc(Point(1, set([1, 2j]))) + # Tuples of different size. + # Note the check is really on the tuple side. + lst = set([(1,), (2, 3)]) + # Depending on which tuple is examined first, we could get + # a IndexError or a ValueError. + with self.assertRaises((IndexError, ValueError)) as raises: + cfunc(lst) + + +class TestSetReflection(BaseTest): + """ + Test reflection of native Numba sets on Python set objects. + """ + + def check_reflection(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + samples = [(set([1., 2., 3., 4.]), set([0.])), + (set([1., 2., 3., 4.]), set([5., 6., 7., 8., 9.])), + ] + for dest, src in samples: + expected = set(dest) + got = set(dest) + pyres = pyfunc(expected, src) + with self.assertRefCount(got, src): + cres = cfunc(got, src) + self.assertPreciseEqual(cres, pyres) + self.assertPreciseEqual(expected, got) + self.assertEqual(pyres[0] is expected, cres[0] is got) + del pyres, cres + + def test_reflect_simple(self): + self.check_reflection(reflect_simple) + + def test_reflect_conditional(self): + self.check_reflection(reflect_conditional) + + def test_reflect_exception(self): + """ + When the function exits with an exception, sets should still be + reflected. + """ + pyfunc = reflect_exception + cfunc = jit(nopython=True)(pyfunc) + s = set([1, 2, 3]) + with self.assertRefCount(s): + with self.assertRaises(ZeroDivisionError): + cfunc(s) + self.assertPreciseEqual(s, set([1, 2, 3, 42])) + + def test_reflect_same_set(self): + """ + When the same set object is reflected twice, behaviour should + be consistent. + """ + pyfunc = reflect_dual + cfunc = jit(nopython=True)(pyfunc) + pyset = set([1, 2, 3]) + cset = pyset.copy() + expected = pyfunc(pyset, pyset) + got = cfunc(cset, cset) + self.assertPreciseEqual(expected, got) + self.assertPreciseEqual(pyset, cset) + self.assertRefCountEqual(pyset, cset) + + def test_reflect_clean(self): + """ + When the set wasn't mutated, no reflection should take place. + """ + cfunc = jit(nopython=True)(noop) + # Use a complex, as Python integers can be cached + s = set([12.5j]) + ids = [id(x) for x in s] + cfunc(s) + self.assertEqual([id(x) for x in s], ids) + + +class TestExamples(BaseTest): + """ + Examples of using sets. + """ + + def test_unique(self): + pyfunc = unique_usecase + check = self.unordered_checker(pyfunc) + + check(self.duplicates_array(200)) + check(self.sparse_array(200)) + + def test_type_coercion_from_update(self): + # see issue #6621 + def impl(): + i = np.uint64(1) + R = set() + R.update({1, 2, 3}) + R.add(i) + return R + check = self.unordered_checker(impl) + check() + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_slices.py b/venv/lib/python3.10/site-packages/numba/tests/test_slices.py new file mode 100644 index 0000000000000000000000000000000000000000..a6badd1ac2adf38f989674a4d15c57b7c83731c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_slices.py @@ -0,0 +1,259 @@ +from functools import partial +import itertools +from itertools import chain, product, starmap +import sys + +import numpy as np + +from numba import jit, literally, njit, typeof, TypingError +from numba.core import utils, types +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.core.types.functions import _header_lead +import unittest + + +def slice_passing(sl): + return sl.start, sl.stop, sl.step + +def slice_constructor(*args): + sl = slice(*args) + return sl.start, sl.stop, sl.step + +def slice_construct_and_use(args, l): + sl = slice(*args) + return l[sl] + +def slice_indices(s, *indargs): + return s.indices(*indargs) + +class TestSlices(MemoryLeakMixin, TestCase): + + def test_slice_passing(self): + """ + Check passing a slice object to a Numba function. + """ + # NOTE this also checks slice attributes + def check(a, b, c, d, e, f): + sl = slice(a, b, c) + got = cfunc(sl) + self.assertPreciseEqual(got, (d, e, f)) + + maxposint = sys.maxsize + maxnegint = -maxposint - 1 + cfunc = jit(nopython=True)(slice_passing) + + # Positive steps + start_cases = [(None, 0), (42, 42), (-1, -1)] + stop_cases = [(None, maxposint), (9, 9), (-11, -11)] + step_cases = [(None, 1), (12, 12)] + for (a, d), (b, e), (c, f) in itertools.product(start_cases, + stop_cases, + step_cases): + check(a, b, c, d, e, f) + + # Negative steps + start_cases = [(None, maxposint), (42, 42), (-1, -1)] + stop_cases = [(None, maxnegint), (9, 9), (-11, -11)] + step_cases = [(-1, -1), (-12, -12)] + for (a, d), (b, e), (c, f) in itertools.product(start_cases, + stop_cases, + step_cases): + check(a, b, c, d, e, f) + + # Some member is neither integer nor None + with self.assertRaises(TypeError): + cfunc(slice(1.5, 1, 1)) + + def test_slice_constructor(self): + """ + Test the 'happy path' for slice() constructor in nopython mode. + """ + maxposint = sys.maxsize + maxnegint = -maxposint - 1 + a = np.arange(10) + cfunc = jit(nopython=True)(slice_constructor) + cfunc_use = jit(nopython=True)(slice_construct_and_use) + for args, expected in [ + ((None,), (0, maxposint, 1)), + ((5,), (0, 5, 1)), + ((None, None), (0, maxposint, 1)), + ((1, None), (1, maxposint, 1)), + ((None, 2), (0, 2, 1)), + ((1, 2), (1, 2, 1)), + ((None, None, 3), (0, maxposint, 3)), + ((None, 2, 3), (0, 2, 3)), + ((1, None, 3), (1, maxposint, 3)), + ((1, 2, 3), (1, 2, 3)), + ((None, None, -1), (maxposint, maxnegint, -1)), + ((10, None, -1), (10, maxnegint, -1)), + ((None, 5, -1), (maxposint, 5, -1)), + ((10, 5, -1), (10, 5, -1)), + ]: + got = cfunc(*args) + self.assertPreciseEqual(got, expected) + usage = slice_construct_and_use(args, a) + cusage = cfunc_use(args, a) + self.assertPreciseEqual(usage, cusage) + + def test_slice_constructor_cases(self): + """ + Test that slice constructor behaves same in python and compiled code. + """ + options = (None, -1, 0, 1) + arg_cases = chain.from_iterable( + product(options, repeat=n) for n in range(5) + ) + array = np.arange(10) + + cfunc = jit(nopython=True)(slice_construct_and_use) + + self.disable_leak_check() + for args in arg_cases: + try: + expected = slice_construct_and_use(args, array) + except TypeError as py_type_e: + # Catch cases of 0, or more than 3 arguments. + # This becomes a typing error in numba + n_args = len(args) + self.assertRegex( + str(py_type_e), + r"slice expected at (most|least) (3|1) arguments?, got {}" + .format(n_args) + ) + with self.assertRaises(TypingError) as numba_e: + cfunc(args, array) + self.assertIn( + _header_lead, + str(numba_e.exception) + ) + self.assertIn( + ", ".join(str(typeof(arg)) for arg in args), + str(numba_e.exception) + ) + except Exception as py_e: + with self.assertRaises(type(py_e)) as numba_e: + cfunc(args, array) + self.assertIn( + str(py_e), + str(numba_e.exception) + ) + else: + self.assertPreciseEqual(expected, cfunc(args, array)) + + def test_slice_indices(self): + """Test that a numba slice returns same result for .indices as a python one.""" + slices = starmap( + slice, + product( + chain(range(-5, 5), (None,)), + chain(range(-5, 5), (None,)), + chain(range(-5, 5), (None,)) + ) + ) + lengths = range(-2, 3) + + cfunc = jit(nopython=True)(slice_indices) + + for s, l in product(slices, lengths): + try: + expected = slice_indices(s, l) + except Exception as py_e: + with self.assertRaises(type(py_e)) as numba_e: + cfunc(s, l) + self.assertIn( + str(py_e), + str(numba_e.exception) + ) + else: + self.assertPreciseEqual(expected, cfunc(s, l)) + + def test_slice_indices_examples(self): + """Tests for specific error cases.""" + cslice_indices = jit(nopython=True)(slice_indices) + + with self.assertRaises(TypingError) as e: + cslice_indices(slice(None), 1, 2, 3) + self.assertIn( + "indices() takes exactly one argument (3 given)", + str(e.exception) + ) + + with self.assertRaises(TypingError) as e: + cslice_indices(slice(None, None, 0), 1.2) + self.assertIn( + "'%s' object cannot be interpreted as an integer" % typeof(1.2), + str(e.exception) + ) + + def test_slice_from_constant(self): + test_tuple = (1, 2, 3, 4) + + for ts in itertools.product( + [None, 1, 2, 3], [None, 1, 2, 3], [None, 1, 2, -1, -2] + ): + ts = slice(*ts) + + @jit(nopython=True) + def test_fn(): + return test_tuple[ts] + + self.assertEqual(test_fn(), test_fn.py_func()) + + def test_literal_slice_distinct(self): + sl1 = types.misc.SliceLiteral(slice(1, None, None)) + sl2 = types.misc.SliceLiteral(slice(None, None, None)) + sl3 = types.misc.SliceLiteral(slice(1, None, None)) + + self.assertNotEqual(sl1, sl2) + self.assertEqual(sl1, sl3) + + def test_literal_slice_boxing(self): + # Tests that a literal slice can be + # returned from a JIT function. + @njit + def f(x): + return literally(x) + + slices = ( + slice(1, 4, 2), + slice(1, 2), + slice(1), + slice(None, 1, 1), + slice(1, None, 1), + slice(None, None, 1), + slice(None), + slice(None, None, None) + ) + for sl in slices: + self.assertEqual(sl, f(sl)) + + + def test_literal_slice_freevar(self): + # Tests passing a literal slice as a freevar + # in a closure. + z = slice(1, 2, 3) + @njit + def foo(): + return z + + self.assertEqual(z, foo()) + + def test_literal_slice_maxint(self): + # Tests that passing a slice with an integer + # that exceeds the maxint size throws a reasonable + # error message. + @njit() + def foo(z): + return literally(z) + + maxval = int(2**63) + with self.assertRaises(ValueError) as e: + foo(slice(None, None, -maxval-1)) + self.assertIn( + "Int value is too large", + str(e.exception) + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_sort.py b/venv/lib/python3.10/site-packages/numba/tests/test_sort.py new file mode 100644 index 0000000000000000000000000000000000000000..5a63c775984af1890f97b3ef5985d6bcb865c24b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_sort.py @@ -0,0 +1,1256 @@ +import copy +import itertools +import math +import random +import sys +import unittest + +import numpy as np + +from numba import jit, njit +from numba.core import utils, errors +from numba.tests.support import TestCase, MemoryLeakMixin + +from numba.misc.quicksort import make_py_quicksort, make_jit_quicksort +from numba.misc.mergesort import make_jit_mergesort +from numba.misc.timsort import make_py_timsort, make_jit_timsort, MergeRun + + +def make_temp_list(keys, n): + return [keys[0]] * n + +def make_temp_array(keys, n): + return np.empty(n, keys.dtype) + + +py_list_timsort = make_py_timsort(make_temp_list) + +py_array_timsort = make_py_timsort(make_temp_array) + +jit_list_timsort = make_jit_timsort(make_temp_list) + +jit_array_timsort = make_jit_timsort(make_temp_array) + +py_quicksort = make_py_quicksort() + +jit_quicksort = make_jit_quicksort() + + +def sort_usecase(val): + val.sort() + +def argsort_usecase(val): + return val.argsort() + +def argsort_kind_usecase(val, is_stable=False): + if is_stable: + return val.argsort(kind='mergesort') + else: + return val.argsort(kind='quicksort') + +def sorted_usecase(val): + return sorted(val) + +def sorted_reverse_usecase(val, b): + return sorted(val, reverse=b) + +def np_sort_usecase(val): + return np.sort(val) + +def np_argsort_usecase(val): + return np.argsort(val) + +def np_argsort_kind_usecase(val, is_stable=False): + if is_stable: + return np.argsort(val, kind='mergesort') + else: + return np.argsort(val, kind='quicksort') + +def list_sort_usecase(n): + np.random.seed(42) + l = [] + for i in range(n): + l.append(np.random.random()) + ll = l[:] + ll.sort() + return l, ll + +def list_sort_reverse_usecase(n, b): + np.random.seed(42) + l = [] + for i in range(n): + l.append(np.random.random()) + ll = l[:] + ll.sort(reverse=b) + return l, ll + + +class BaseSortingTest(object): + + def random_list(self, n, offset=10): + random.seed(42) + l = list(range(offset, offset + n)) + random.shuffle(l) + return l + + def sorted_list(self, n, offset=10): + return list(range(offset, offset + n)) + + def revsorted_list(self, n, offset=10): + return list(range(offset, offset + n))[::-1] + + def initially_sorted_list(self, n, m=None, offset=10): + if m is None: + m = n // 2 + l = self.sorted_list(m, offset) + l += self.random_list(n - m, offset=l[-1] + offset) + return l + + def duprandom_list(self, n, factor=None, offset=10): + random.seed(42) + if factor is None: + factor = int(math.sqrt(n)) + l = (list(range(offset, offset + (n // factor) + 1)) * (factor + 1))[:n] + assert len(l) == n + random.shuffle(l) + return l + + def dupsorted_list(self, n, factor=None, offset=10): + if factor is None: + factor = int(math.sqrt(n)) + l = (list(range(offset, offset + (n // factor) + 1)) * (factor + 1))[:n] + assert len(l) == n, (len(l), n) + l.sort() + return l + + def assertSorted(self, orig, result): + self.assertEqual(len(result), len(orig)) + # sorted() returns a list, so make sure we compare to another list + self.assertEqual(list(result), sorted(orig)) + + def assertSortedValues(self, orig, orig_values, result, result_values): + self.assertEqual(len(result), len(orig)) + self.assertEqual(list(result), sorted(orig)) + zip_sorted = sorted(zip(orig, orig_values), key=lambda x: x[0]) + zip_result = list(zip(result, result_values)) + self.assertEqual(zip_sorted, zip_result) + # Check stability + for i in range(len(zip_result) - 1): + (k1, v1), (k2, v2) = zip_result[i], zip_result[i + 1] + if k1 == k2: + # Assuming values are unique, which is enforced by the tests + self.assertLess(orig_values.index(v1), orig_values.index(v2)) + + def fibo(self): + a = 1 + b = 1 + while True: + yield a + a, b = b, a + b + + def make_sample_sorted_lists(self, n): + lists = [] + for offset in (20, 120): + lists.append(self.sorted_list(n, offset)) + lists.append(self.dupsorted_list(n, offset)) + return lists + + def make_sample_lists(self, n): + lists = [] + for offset in (20, 120): + lists.append(self.sorted_list(n, offset)) + lists.append(self.dupsorted_list(n, offset)) + lists.append(self.revsorted_list(n, offset)) + lists.append(self.duprandom_list(n, offset)) + return lists + + +class BaseTimsortTest(BaseSortingTest): + + def merge_init(self, keys): + f = self.timsort.merge_init + return f(keys) + + def test_binarysort(self): + n = 20 + def check(l, n, start=0): + res = self.array_factory(l) + f(res, res, 0, n, start) + self.assertSorted(l, res) + + f = self.timsort.binarysort + l = self.sorted_list(n) + check(l, n) + check(l, n, n//2) + l = self.revsorted_list(n) + check(l, n) + l = self.initially_sorted_list(n, n//2) + check(l, n) + check(l, n, n//2) + l = self.revsorted_list(n) + check(l, n) + l = self.random_list(n) + check(l, n) + l = self.duprandom_list(n) + check(l, n) + + def test_binarysort_with_values(self): + n = 20 + v = list(range(100, 100+n)) + + def check(l, n, start=0): + res = self.array_factory(l) + res_v = self.array_factory(v) + f(res, res_v, 0, n, start) + self.assertSortedValues(l, v, res, res_v) + + f = self.timsort.binarysort + l = self.sorted_list(n) + check(l, n) + check(l, n, n//2) + l = self.revsorted_list(n) + check(l, n) + l = self.initially_sorted_list(n, n//2) + check(l, n) + check(l, n, n//2) + l = self.revsorted_list(n) + check(l, n) + l = self.random_list(n) + check(l, n) + l = self.duprandom_list(n) + check(l, n) + + def test_count_run(self): + n = 16 + f = self.timsort.count_run + + def check(l, lo, hi): + n, desc = f(self.array_factory(l), lo, hi) + # Fully check invariants + if desc: + for k in range(lo, lo + n - 1): + a, b = l[k], l[k + 1] + self.assertGreater(a, b) + if lo + n < hi: + self.assertLessEqual(l[lo + n - 1], l[lo + n]) + else: + for k in range(lo, lo + n - 1): + a, b = l[k], l[k + 1] + self.assertLessEqual(a, b) + if lo + n < hi: + self.assertGreater(l[lo + n - 1], l[lo + n], l) + + + l = self.sorted_list(n, offset=100) + check(l, 0, n) + check(l, 1, n - 1) + check(l, 1, 2) + l = self.revsorted_list(n, offset=100) + check(l, 0, n) + check(l, 1, n - 1) + check(l, 1, 2) + l = self.random_list(n, offset=100) + for i in range(len(l) - 1): + check(l, i, n) + l = self.duprandom_list(n, offset=100) + for i in range(len(l) - 1): + check(l, i, n) + + def test_gallop_left(self): + n = 20 + f = self.timsort.gallop_left + + def check(l, key, start, stop, hint): + k = f(key, l, start, stop, hint) + # Fully check invariants + self.assertGreaterEqual(k, start) + self.assertLessEqual(k, stop) + if k > start: + self.assertLess(l[k - 1], key) + if k < stop: + self.assertGreaterEqual(l[k], key) + + def check_all_hints(l, key, start, stop): + for hint in range(start, stop): + check(l, key, start, stop, hint) + + def check_sorted_list(l): + l = self.array_factory(l) + for key in (l[5], l[15], l[0], -1000, l[-1], 1000): + check_all_hints(l, key, 0, n) + check_all_hints(l, key, 1, n - 1) + check_all_hints(l, key, 8, n - 8) + + l = self.sorted_list(n, offset=100) + check_sorted_list(l) + l = self.dupsorted_list(n, offset=100) + check_sorted_list(l) + + def test_gallop_right(self): + n = 20 + f = self.timsort.gallop_right + + def check(l, key, start, stop, hint): + k = f(key, l, start, stop, hint) + # Fully check invariants + self.assertGreaterEqual(k, start) + self.assertLessEqual(k, stop) + if k > start: + self.assertLessEqual(l[k - 1], key) + if k < stop: + self.assertGreater(l[k], key) + + def check_all_hints(l, key, start, stop): + for hint in range(start, stop): + check(l, key, start, stop, hint) + + def check_sorted_list(l): + l = self.array_factory(l) + for key in (l[5], l[15], l[0], -1000, l[-1], 1000): + check_all_hints(l, key, 0, n) + check_all_hints(l, key, 1, n - 1) + check_all_hints(l, key, 8, n - 8) + + l = self.sorted_list(n, offset=100) + check_sorted_list(l) + l = self.dupsorted_list(n, offset=100) + check_sorted_list(l) + + def test_merge_compute_minrun(self): + f = self.timsort.merge_compute_minrun + + for i in range(0, 64): + self.assertEqual(f(i), i) + for i in range(6, 63): + if 2**i > sys.maxsize: + break + self.assertEqual(f(2**i), 32) + for i in self.fibo(): + if i < 64: + continue + if i >= sys.maxsize: + break + k = f(i) + self.assertGreaterEqual(k, 32) + self.assertLessEqual(k, 64) + if i > 500: + # i/k is close to, but strictly less than, an exact power of 2 + quot = i // k + p = 2 ** utils.bit_length(quot) + self.assertLess(quot, p) + self.assertGreaterEqual(quot, 0.9 * p) + + def check_merge_lo_hi(self, func, a, b): + na = len(a) + nb = len(b) + + # Add sentinels at start and end, to check they weren't moved + orig_keys = [42] + a + b + [-42] + keys = self.array_factory(orig_keys) + ms = self.merge_init(keys) + ssa = 1 + ssb = ssa + na + + #new_ms = func(ms, keys, [], ssa, na, ssb, nb) + new_ms = func(ms, keys, keys, ssa, na, ssb, nb) + self.assertEqual(keys[0], orig_keys[0]) + self.assertEqual(keys[-1], orig_keys[-1]) + self.assertSorted(orig_keys[1:-1], keys[1:-1]) + # Check the MergeState result + self.assertGreaterEqual(len(new_ms.keys), len(ms.keys)) + self.assertGreaterEqual(len(new_ms.values), len(ms.values)) + self.assertIs(new_ms.pending, ms.pending) + self.assertGreaterEqual(new_ms.min_gallop, 1) + + def test_merge_lo_hi(self): + f_lo = self.timsort.merge_lo + f_hi = self.timsort.merge_hi + + # The larger sizes exercise galloping + for (na, nb) in [(12, 16), (40, 40), (100, 110), (1000, 1100)]: + for a, b in itertools.product(self.make_sample_sorted_lists(na), + self.make_sample_sorted_lists(nb)): + self.check_merge_lo_hi(f_lo, a, b) + self.check_merge_lo_hi(f_hi, b, a) + + def check_merge_at(self, a, b): + f = self.timsort.merge_at + # Prepare the array to be sorted + na = len(a) + nb = len(b) + # Add sentinels at start and end, to check they weren't moved + orig_keys = [42] + a + b + [-42] + ssa = 1 + ssb = ssa + na + + stack_sentinel = MergeRun(-42, -42) + + def run_merge_at(ms, keys, i): + new_ms = f(ms, keys, keys, i) + self.assertEqual(keys[0], orig_keys[0]) + self.assertEqual(keys[-1], orig_keys[-1]) + self.assertSorted(orig_keys[1:-1], keys[1:-1]) + # Check stack state + self.assertIs(new_ms.pending, ms.pending) + self.assertEqual(ms.pending[i], (ssa, na + nb)) + self.assertEqual(ms.pending[0], stack_sentinel) + return new_ms + + # First check with i == len(stack) - 2 + keys = self.array_factory(orig_keys) + ms = self.merge_init(keys) + # Push sentinel on stack, to check it wasn't touched + ms = self.timsort.merge_append(ms, stack_sentinel) + i = ms.n + ms = self.timsort.merge_append(ms, MergeRun(ssa, na)) + ms = self.timsort.merge_append(ms, MergeRun(ssb, nb)) + ms = run_merge_at(ms, keys, i) + self.assertEqual(ms.n, i + 1) + + # Now check with i == len(stack) - 3 + keys = self.array_factory(orig_keys) + ms = self.merge_init(keys) + # Push sentinel on stack, to check it wasn't touched + ms = self.timsort.merge_append(ms, stack_sentinel) + i = ms.n + ms = self.timsort.merge_append(ms, MergeRun(ssa, na)) + ms = self.timsort.merge_append(ms, MergeRun(ssb, nb)) + # A last run (trivial here) + last_run = MergeRun(ssb + nb, 1) + ms = self.timsort.merge_append(ms, last_run) + ms = run_merge_at(ms, keys, i) + self.assertEqual(ms.n, i + 2) + self.assertEqual(ms.pending[ms.n - 1], last_run) + + def test_merge_at(self): + # The larger sizes exercise galloping + for (na, nb) in [(12, 16), (40, 40), (100, 110), (500, 510)]: + for a, b in itertools.product(self.make_sample_sorted_lists(na), + self.make_sample_sorted_lists(nb)): + self.check_merge_at(a, b) + self.check_merge_at(b, a) + + def test_merge_force_collapse(self): + f = self.timsort.merge_force_collapse + + # Test with runs of ascending sizes, then descending sizes + sizes_list = [(8, 10, 15, 20)] + sizes_list.append(sizes_list[0][::-1]) + + for sizes in sizes_list: + for chunks in itertools.product(*(self.make_sample_sorted_lists(n) + for n in sizes)): + # Create runs of the given sizes + orig_keys = sum(chunks, []) + keys = self.array_factory(orig_keys) + ms = self.merge_init(keys) + pos = 0 + for c in chunks: + ms = self.timsort.merge_append(ms, MergeRun(pos, len(c))) + pos += len(c) + # Sanity check + self.assertEqual(sum(ms.pending[ms.n - 1]), len(keys)) + # Now merge the runs + ms = f(ms, keys, keys) + # Remaining run is the whole list + self.assertEqual(ms.n, 1) + self.assertEqual(ms.pending[0], MergeRun(0, len(keys))) + # The list is now sorted + self.assertSorted(orig_keys, keys) + + def test_run_timsort(self): + f = self.timsort.run_timsort + + for size_factor in (1, 10): + # Make lists to be sorted from three chunks of different kinds. + sizes = (15, 30, 20) + + all_lists = [self.make_sample_lists(n * size_factor) for n in sizes] + for chunks in itertools.product(*all_lists): + orig_keys = sum(chunks, []) + keys = self.array_factory(orig_keys) + f(keys) + # The list is now sorted + self.assertSorted(orig_keys, keys) + + def test_run_timsort_with_values(self): + # Run timsort, but also with a values array + f = self.timsort.run_timsort_with_values + + for size_factor in (1, 5): + chunk_size = 80 * size_factor + a = self.dupsorted_list(chunk_size) + b = self.duprandom_list(chunk_size) + c = self.revsorted_list(chunk_size) + orig_keys = a + b + c + orig_values = list(range(1000, 1000 + len(orig_keys))) + + keys = self.array_factory(orig_keys) + values = self.array_factory(orig_values) + f(keys, values) + # This checks sort stability + self.assertSortedValues(orig_keys, orig_values, keys, values) + + +class TestTimsortPurePython(BaseTimsortTest, TestCase): + + timsort = py_list_timsort + + # Much faster than a Numpy array in pure Python + array_factory = list + + +class TestTimsortArraysPurePython(BaseTimsortTest, TestCase): + + timsort = py_array_timsort + + def array_factory(self, lst): + return np.array(lst, dtype=np.int32) + + +class JITTimsortMixin(object): + + timsort = jit_array_timsort + + test_merge_at = None + test_merge_force_collapse = None + + def wrap_with_mergestate(self, timsort, func, _cache={}): + """ + Wrap *func* into another compiled function inserting a runtime-created + mergestate as the first function argument. + """ + key = timsort, func + if key in _cache: + return _cache[key] + + merge_init = timsort.merge_init + + @timsort.compile + def wrapper(keys, values, *args): + ms = merge_init(keys) + res = func(ms, keys, values, *args) + return res + + _cache[key] = wrapper + return wrapper + + +class TestTimsortArrays(JITTimsortMixin, BaseTimsortTest, TestCase): + + def array_factory(self, lst): + return np.array(lst, dtype=np.int32) + + def check_merge_lo_hi(self, func, a, b): + na = len(a) + nb = len(b) + + func = self.wrap_with_mergestate(self.timsort, func) + + # Add sentinels at start and end, to check they weren't moved + orig_keys = [42] + a + b + [-42] + keys = self.array_factory(orig_keys) + ssa = 1 + ssb = ssa + na + + new_ms = func(keys, keys, ssa, na, ssb, nb) + self.assertEqual(keys[0], orig_keys[0]) + self.assertEqual(keys[-1], orig_keys[-1]) + self.assertSorted(orig_keys[1:-1], keys[1:-1]) + + + +class BaseQuicksortTest(BaseSortingTest): + + # NOTE these tests assume a non-argsort quicksort. + + def test_insertion_sort(self): + n = 20 + def check(l, n): + res = self.array_factory([9999] + l + [-9999]) + f(res, res, 1, n) + self.assertEqual(res[0], 9999) + self.assertEqual(res[-1], -9999) + self.assertSorted(l, res[1:-1]) + + f = self.quicksort.insertion_sort + l = self.sorted_list(n) + check(l, n) + l = self.revsorted_list(n) + check(l, n) + l = self.initially_sorted_list(n, n//2) + check(l, n) + l = self.revsorted_list(n) + check(l, n) + l = self.random_list(n) + check(l, n) + l = self.duprandom_list(n) + check(l, n) + + def test_partition(self): + n = 20 + def check(l, n): + res = self.array_factory([9999] + l + [-9999]) + index = f(res, res, 1, n) + self.assertEqual(res[0], 9999) + self.assertEqual(res[-1], -9999) + pivot = res[index] + for i in range(1, index): + self.assertLessEqual(res[i], pivot) + for i in range(index + 1, n): + self.assertGreaterEqual(res[i], pivot) + + f = self.quicksort.partition + l = self.sorted_list(n) + check(l, n) + l = self.revsorted_list(n) + check(l, n) + l = self.initially_sorted_list(n, n//2) + check(l, n) + l = self.revsorted_list(n) + check(l, n) + l = self.random_list(n) + check(l, n) + l = self.duprandom_list(n) + check(l, n) + + def test_partition3(self): + # Test the unused partition3() function + n = 20 + def check(l, n): + res = self.array_factory([9999] + l + [-9999]) + lt, gt = f(res, 1, n) + self.assertEqual(res[0], 9999) + self.assertEqual(res[-1], -9999) + pivot = res[lt] + for i in range(1, lt): + self.assertLessEqual(res[i], pivot) + for i in range(lt, gt + 1): + self.assertEqual(res[i], pivot) + for i in range(gt + 1, n): + self.assertGreater(res[i], pivot) + + f = self.quicksort.partition3 + l = self.sorted_list(n) + check(l, n) + l = self.revsorted_list(n) + check(l, n) + l = self.initially_sorted_list(n, n//2) + check(l, n) + l = self.revsorted_list(n) + check(l, n) + l = self.random_list(n) + check(l, n) + l = self.duprandom_list(n) + check(l, n) + + def test_run_quicksort(self): + f = self.quicksort.run_quicksort + + for size_factor in (1, 5): + # Make lists to be sorted from two chunks of different kinds. + sizes = (15, 20) + + all_lists = [self.make_sample_lists(n * size_factor) for n in sizes] + for chunks in itertools.product(*all_lists): + orig_keys = sum(chunks, []) + keys = self.array_factory(orig_keys) + f(keys) + # The list is now sorted + self.assertSorted(orig_keys, keys) + + def test_run_quicksort_lt(self): + def lt(a, b): + return a > b + + f = self.make_quicksort(lt=lt).run_quicksort + + for size_factor in (1, 5): + # Make lists to be sorted from two chunks of different kinds. + sizes = (15, 20) + + all_lists = [self.make_sample_lists(n * size_factor) for n in sizes] + for chunks in itertools.product(*all_lists): + orig_keys = sum(chunks, []) + keys = self.array_factory(orig_keys) + f(keys) + # The list is now rev-sorted + self.assertSorted(orig_keys, keys[::-1]) + + # An imperfect comparison function, as LT(a, b) does not imply not LT(b, a). + # The sort should handle it gracefully. + def lt_floats(a, b): + return math.isnan(b) or a < b + + f = self.make_quicksort(lt=lt_floats).run_quicksort + + np.random.seed(42) + for size in (5, 20, 50, 500): + orig = np.random.random(size=size) * 100 + orig[np.random.random(size=size) < 0.1] = float('nan') + orig_keys = list(orig) + keys = self.array_factory(orig_keys) + f(keys) + non_nans = orig[~np.isnan(orig)] + # Non-NaNs are sorted at the front + self.assertSorted(non_nans, keys[:len(non_nans)]) + + +class TestQuicksortPurePython(BaseQuicksortTest, TestCase): + + quicksort = py_quicksort + make_quicksort = staticmethod(make_py_quicksort) + + # Much faster than a Numpy array in pure Python + array_factory = list + + +class TestQuicksortArrays(BaseQuicksortTest, TestCase): + + quicksort = jit_quicksort + make_quicksort = staticmethod(make_jit_quicksort) + + def array_factory(self, lst): + return np.array(lst, dtype=np.float64) + +class TestQuicksortMultidimensionalArrays(BaseSortingTest, TestCase): + + quicksort = make_jit_quicksort(is_np_array=True) + make_quicksort = staticmethod(make_jit_quicksort) + + def assertSorted(self, orig, result): + self.assertEqual(orig.shape, result.shape) + self.assertPreciseEqual(orig, result) + + def array_factory(self, lst, shape=None): + array = np.array(lst, dtype=np.float64) + if shape is None: + return array.reshape(-1, array.shape[0]) + else: + return array.reshape(shape) + + def get_shapes(self, n): + shapes = [] + if n == 1: + return shapes + + for i in range(2, int(math.sqrt(n)) + 1): + if n % i == 0: + shapes.append((n // i, i)) + shapes.append((i, n // i)) + _shapes = self.get_shapes(n // i) + for _shape in _shapes: + shapes.append((i,) + _shape) + shapes.append(_shape + (i,)) + + return shapes + + def test_run_quicksort(self): + f = self.quicksort.run_quicksort + + for size_factor in (1, 5): + # Make lists to be sorted from two chunks of different kinds. + sizes = (15, 20) + + all_lists = [self.make_sample_lists(n * size_factor) for n in sizes] + for chunks in itertools.product(*all_lists): + orig_keys = sum(chunks, []) + shape_list = self.get_shapes(len(orig_keys)) + shape_list.append(None) + for shape in shape_list: + keys = self.array_factory(orig_keys, shape=shape) + keys_copy = self.array_factory(orig_keys, shape=shape) + f(keys) + keys_copy.sort() + # The list is now sorted + self.assertSorted(keys_copy, keys) + + def test_run_quicksort_lt(self): + def lt(a, b): + return a > b + + f = self.make_quicksort(lt=lt, is_np_array=True).run_quicksort + + for size_factor in (1, 5): + # Make lists to be sorted from two chunks of different kinds. + sizes = (15, 20) + + all_lists = [self.make_sample_lists(n * size_factor) for n in sizes] + for chunks in itertools.product(*all_lists): + orig_keys = sum(chunks, []) + shape_list = self.get_shapes(len(orig_keys)) + shape_list.append(None) + for shape in shape_list: + keys = self.array_factory(orig_keys, shape=shape) + keys_copy = -self.array_factory(orig_keys, shape=shape) + f(keys) + # The list is now rev-sorted + keys_copy.sort() + keys_copy = -keys_copy + self.assertSorted(keys_copy, keys) + + # An imperfect comparison function, as LT(a, b) does not imply not LT(b, a). + # The sort should handle it gracefully. + def lt_floats(a, b): + return math.isnan(b) or a < b + + f = self.make_quicksort(lt=lt_floats, is_np_array=True).run_quicksort + + np.random.seed(42) + for size in (5, 20, 50, 500): + orig = np.random.random(size=size) * 100 + orig[np.random.random(size=size) < 0.1] = float('nan') + orig_keys = list(orig) + shape_list = self.get_shapes(len(orig_keys)) + shape_list.append(None) + for shape in shape_list: + keys = self.array_factory(orig_keys, shape=shape) + keys_copy = self.array_factory(orig_keys, shape=shape) + f(keys) + keys_copy.sort() + # Non-NaNs are sorted at the front + self.assertSorted(keys_copy, keys) + +class TestNumpySort(TestCase): + + def setUp(self): + np.random.seed(42) + + def int_arrays(self): + for size in (5, 20, 50, 500): + yield np.random.randint(99, size=size) + + def float_arrays(self): + for size in (5, 20, 50, 500): + yield np.random.random(size=size) * 100 + # Now with NaNs. Numpy sorts them at the end. + for size in (5, 20, 50, 500): + orig = np.random.random(size=size) * 100 + orig[np.random.random(size=size) < 0.1] = float('nan') + yield orig + # 90% of values are NaNs. + for size in (50, 500): + orig = np.random.random(size=size) * 100 + orig[np.random.random(size=size) < 0.9] = float('nan') + yield orig + + def has_duplicates(self, arr): + """ + Whether the array has duplicates. Takes NaNs into account. + """ + if np.count_nonzero(np.isnan(arr)) > 1: + return True + if np.unique(arr).size < arr.size: + return True + return False + + def check_sort_inplace(self, pyfunc, cfunc, val): + expected = copy.copy(val) + got = copy.copy(val) + pyfunc(expected) + cfunc(got) + self.assertPreciseEqual(got, expected) + + def check_sort_copy(self, pyfunc, cfunc, val): + orig = copy.copy(val) + expected = pyfunc(val) + got = cfunc(val) + self.assertPreciseEqual(got, expected) + # The original wasn't mutated + self.assertPreciseEqual(val, orig) + + def check_argsort(self, pyfunc, cfunc, val, kwargs={}): + orig = copy.copy(val) + expected = pyfunc(val, **kwargs) + got = cfunc(val, **kwargs) + self.assertPreciseEqual(orig[got], np.sort(orig), + msg="the array wasn't argsorted") + # Numba and Numpy results may differ if there are duplicates + # in the array + if not self.has_duplicates(orig): + self.assertPreciseEqual(got, expected) + # The original wasn't mutated + self.assertPreciseEqual(val, orig) + + def test_array_sort_int(self): + pyfunc = sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for orig in self.int_arrays(): + self.check_sort_inplace(pyfunc, cfunc, orig) + + def test_array_sort_float(self): + pyfunc = sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for orig in self.float_arrays(): + self.check_sort_inplace(pyfunc, cfunc, orig) + + def test_array_sort_complex(self): + pyfunc = sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for real in self.float_arrays(): + imag = real[::] + np.random.shuffle(imag) + orig = np.array([complex(*x) for x in zip(real, imag)]) + self.check_sort_inplace(pyfunc, cfunc, orig) + + def test_np_sort_int(self): + pyfunc = np_sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for orig in self.int_arrays(): + self.check_sort_copy(pyfunc, cfunc, orig) + + def test_np_sort_float(self): + pyfunc = np_sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for size in (5, 20, 50, 500): + orig = np.random.random(size=size) * 100 + orig[np.random.random(size=size) < 0.1] = float('nan') + self.check_sort_copy(pyfunc, cfunc, orig) + + def test_np_sort_complex(self): + pyfunc = np_sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for size in (5, 20, 50, 500): + real = np.random.random(size=size) * 100 + imag = np.random.random(size=size) * 100 + real[np.random.random(size=size) < 0.1] = float('nan') + imag[np.random.random(size=size) < 0.1] = float('nan') + orig = np.array([complex(*x) for x in zip(real, imag)]) + self.check_sort_copy(pyfunc, cfunc, orig) + + def test_argsort_int(self): + def check(pyfunc): + cfunc = jit(nopython=True)(pyfunc) + for orig in self.int_arrays(): + self.check_argsort(pyfunc, cfunc, orig) + + check(argsort_usecase) + check(np_argsort_usecase) + + def test_argsort_kind_int(self): + def check(pyfunc, is_stable): + cfunc = jit(nopython=True)(pyfunc) + for orig in self.int_arrays(): + self.check_argsort(pyfunc, cfunc, orig, + dict(is_stable=is_stable)) + + check(argsort_kind_usecase, is_stable=True) + check(np_argsort_kind_usecase, is_stable=True) + check(argsort_kind_usecase, is_stable=False) + check(np_argsort_kind_usecase, is_stable=False) + + def test_argsort_float(self): + def check(pyfunc): + cfunc = jit(nopython=True)(pyfunc) + for orig in self.float_arrays(): + self.check_argsort(pyfunc, cfunc, orig) + + check(argsort_usecase) + check(np_argsort_usecase) + + def test_argsort_float_supplemental(self): + def check(pyfunc, is_stable): + cfunc = jit(nopython=True)(pyfunc) + for orig in self.float_arrays(): + self.check_argsort(pyfunc, cfunc, orig, + dict(is_stable=is_stable)) + + check(argsort_kind_usecase, is_stable=True) + check(np_argsort_kind_usecase, is_stable=True) + check(argsort_kind_usecase, is_stable=False) + check(np_argsort_kind_usecase, is_stable=False) + + def test_argsort_complex(self): + def check(pyfunc): + cfunc = jit(nopython=True)(pyfunc) + for real in self.float_arrays(): + imag = real[::] + np.random.shuffle(imag) + orig = np.array([complex(*x) for x in zip(real, imag)]) + self.check_argsort(pyfunc, cfunc, orig) + + check(argsort_usecase) + check(np_argsort_usecase) + + def test_argsort_complex_supplemental(self): + def check(pyfunc, is_stable): + cfunc = jit(nopython=True)(pyfunc) + for real in self.float_arrays(): + imag = real[::] + np.random.shuffle(imag) + orig = np.array([complex(*x) for x in zip(real, imag)]) + self.check_argsort(pyfunc, cfunc, orig, + dict(is_stable=is_stable)) + + check(argsort_kind_usecase, is_stable=True) + check(np_argsort_kind_usecase, is_stable=True) + check(argsort_kind_usecase, is_stable=False) + check(np_argsort_kind_usecase, is_stable=False) + + def test_bad_array(self): + cfunc = jit(nopython=True)(np_sort_usecase) + msg = '.*Argument "a" must be array-like.*' + with self.assertRaisesRegex(errors.TypingError, msg) as raises: + cfunc(None) + + +class TestPythonSort(TestCase): + + def test_list_sort(self): + pyfunc = list_sort_usecase + cfunc = jit(nopython=True)(pyfunc) + + for size in (20, 50, 500): + orig, ret = cfunc(size) + self.assertEqual(sorted(orig), ret) + self.assertNotEqual(orig, ret) # sanity check + + def test_list_sort_reverse(self): + pyfunc = list_sort_reverse_usecase + cfunc = jit(nopython=True)(pyfunc) + + for size in (20, 50, 500): + for b in (False, True): + orig, ret = cfunc(size, b) + self.assertEqual(sorted(orig, reverse=b), ret) + self.assertNotEqual(orig, ret) # sanity check + + def test_sorted(self): + pyfunc = sorted_usecase + cfunc = jit(nopython=True)(pyfunc) + + for size in (20, 50, 500): + orig = np.random.random(size=size) * 100 + expected = sorted(orig) + got = cfunc(orig) + self.assertPreciseEqual(got, expected) + self.assertNotEqual(list(orig), got) # sanity check + + def test_sorted_reverse(self): + pyfunc = sorted_reverse_usecase + cfunc = jit(nopython=True)(pyfunc) + size = 20 + + orig = np.random.random(size=size) * 100 + for b in (False, True): + expected = sorted(orig, reverse=b) + got = cfunc(orig, b) + self.assertPreciseEqual(got, expected) + self.assertNotEqual(list(orig), got) # sanity check + + +class TestMergeSort(TestCase): + def setUp(self): + np.random.seed(321) + + def check_argsort_stable(self, sorter, low, high, count): + # make data with high possibility of duplicated key + data = np.random.randint(low, high, count) + expect = np.argsort(data, kind='mergesort') + got = sorter(data) + np.testing.assert_equal(expect, got) + + def test_argsort_stable(self): + arglist = [ + (-2, 2, 5), + (-5, 5, 10), + (0, 10, 101), + (0, 100, 1003), + ] + imp = make_jit_mergesort(is_argsort=True) + toplevel = imp.run_mergesort + sorter = njit(lambda arr: toplevel(arr)) + for args in arglist: + self.check_argsort_stable(sorter, *args) + + +nop_compiler = lambda x:x + + +class TestSortSlashSortedWithKey(MemoryLeakMixin, TestCase): + + def test_01(self): + + a = [3, 1, 4, 1, 5, 9] + + @njit + def external_key(z): + return 1. / z + + @njit + def foo(x, key=None): + new_x = x[:] + new_x.sort(key=key) + return sorted(x[:], key=key), new_x + + self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:])) + self.assertPreciseEqual(foo(a[:], external_key), + foo.py_func(a[:], external_key)) + + def test_02(self): + + a = [3, 1, 4, 1, 5, 9] + + @njit + def foo(x): + def closure_key(z): + return 1. / z + new_x = x[:] + new_x.sort(key=closure_key) + return sorted(x[:], key=closure_key), new_x + + self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:])) + + def test_03(self): + + a = [3, 1, 4, 1, 5, 9] + + def gen(compiler): + + @compiler + def bar(x, func): + new_x = x[:] + new_x.sort(key=func) + return sorted(x[:], key=func), new_x + + @compiler + def foo(x): + def closure_escapee_key(z): + return 1. / z + return bar(x, closure_escapee_key) + + return foo + + self.assertPreciseEqual(gen(njit)(a[:]), gen(nop_compiler)(a[:])) + + def test_04(self): + + a = ['a','b','B','b','C','A'] + + @njit + def external_key(z): + return z.upper() + + @njit + def foo(x, key=None): + new_x = x[:] + new_x.sort(key=key) + return sorted(x[:], key=key), new_x + + self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:])) + self.assertPreciseEqual(foo(a[:], external_key), + foo.py_func(a[:], external_key)) + + def test_05(self): + + a = ['a','b','B','b','C','A'] + + @njit + def external_key(z): + return z.upper() + + @njit + def foo(x, key=None, reverse=False): + new_x = x[:] + new_x.sort(key=key, reverse=reverse) + return (sorted(x[:], key=key, reverse=reverse), new_x) + + for key, rev in itertools.product((None, external_key), + (True, False, 1, -12, 0)): + self.assertPreciseEqual(foo(a[:], key, rev), + foo.py_func(a[:], key, rev)) + + def test_optional_on_key(self): + a = [3, 1, 4, 1, 5, 9] + + @njit + def foo(x, predicate): + if predicate: + def closure_key(z): + return 1. / z + else: + closure_key = None + + new_x = x[:] + new_x.sort(key=closure_key) + + return (sorted(x[:], key=closure_key), new_x) + + with self.assertRaises(errors.TypingError) as raises: + TF = True + foo(a[:], TF) + + msg = "Key must concretely be None or a Numba JIT compiled function" + self.assertIn(msg, str(raises.exception)) + + def test_exceptions_sorted(self): + + @njit + def foo_sorted(x, key=None, reverse=False): + return sorted(x[:], key=key, reverse=reverse) + + @njit + def foo_sort(x, key=None, reverse=False): + new_x = x[:] + new_x.sort(key=key, reverse=reverse) + return new_x + + @njit + def external_key(z): + return 1. / z + + a = [3, 1, 4, 1, 5, 9] + + for impl in (foo_sort, foo_sorted): + + # check illegal key + with self.assertRaises(errors.TypingError) as raises: + impl(a, key="illegal") + + expect = "Key must be None or a Numba JIT compiled function" + self.assertIn(expect, str(raises.exception)) + + # check illegal reverse + with self.assertRaises(errors.TypingError) as raises: + impl(a, key=external_key, reverse="go backwards") + + expect = "an integer is required for 'reverse'" + self.assertIn(expect, str(raises.exception)) + + +class TestArrayArgsort(MemoryLeakMixin, TestCase): + """Tests specific to array.argsort""" + + def test_exceptions(self): + + @njit + def nonliteral_kind(kind): + np.arange(5).argsort(kind=kind) + + # check non-literal kind + with self.assertRaises(errors.TypingError) as raises: + # valid spelling but not literal + nonliteral_kind('quicksort') + + expect = '"kind" must be a string literal' + self.assertIn(expect, str(raises.exception)) + + @njit + def unsupported_kwarg(): + np.arange(5).argsort(foo='') + + with self.assertRaises(errors.TypingError) as raises: + unsupported_kwarg() + + expect = "Unsupported keywords: ['foo']" + self.assertIn(expect, str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ssa.py b/venv/lib/python3.10/site-packages/numba/tests/test_ssa.py new file mode 100644 index 0000000000000000000000000000000000000000..49e28c05f2aaa6e1aee6411bba6700784da4d01e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ssa.py @@ -0,0 +1,631 @@ +""" +Tests for SSA reconstruction +""" +import sys +import copy +import logging + +import numpy as np + +from numba import njit, jit, types +from numba.core import errors, ir +from numba.core.compiler_machinery import FunctionPass, register_pass +from numba.core.compiler import DefaultPassBuilder, CompilerBase +from numba.core.untyped_passes import ReconstructSSA, PreserveIR +from numba.core.typed_passes import NativeLowering +from numba.extending import overload +from numba.tests.support import MemoryLeakMixin, TestCase, override_config + + +_DEBUG = False + +if _DEBUG: + # Enable debug logger on SSA reconstruction + ssa_logger = logging.getLogger("numba.core.ssa") + ssa_logger.setLevel(level=logging.DEBUG) + ssa_logger.addHandler(logging.StreamHandler(sys.stderr)) + + +class SSABaseTest(TestCase): + + def check_func(self, func, *args): + got = func(*copy.deepcopy(args)) + exp = func.py_func(*copy.deepcopy(args)) + self.assertEqual(got, exp) + + +class TestSSA(SSABaseTest): + """ + Contains tests to help isolate problems in SSA + """ + + def test_argument_name_reused(self): + @njit + def foo(x): + x += 1 + return x + + self.check_func(foo, 123) + + def test_if_else_redefine(self): + @njit + def foo(x, y): + z = x * y + if x < y: + z = x + else: + z = y + return z + + self.check_func(foo, 3, 2) + self.check_func(foo, 2, 3) + + def test_sum_loop(self): + @njit + def foo(n): + c = 0 + for i in range(n): + c += i + return c + + self.check_func(foo, 0) + self.check_func(foo, 10) + + def test_sum_loop_2vars(self): + @njit + def foo(n): + c = 0 + d = n + for i in range(n): + c += i + d += n + return c, d + + self.check_func(foo, 0) + self.check_func(foo, 10) + + def test_sum_2d_loop(self): + @njit + def foo(n): + c = 0 + for i in range(n): + for j in range(n): + c += j + c += i + return c + + self.check_func(foo, 0) + self.check_func(foo, 10) + + def check_undefined_var(self, should_warn): + @njit + def foo(n): + if n: + if n > 0: + c = 0 + return c + else: + # variable c is not defined in this branch + c += 1 + return c + + if should_warn: + with self.assertWarns(errors.NumbaWarning) as warns: + # n=1 so we won't actually run the branch with the uninitialized + self.check_func(foo, 1) + self.assertIn("Detected uninitialized variable c", + str(warns.warning)) + else: + self.check_func(foo, 1) + + with self.assertRaises(UnboundLocalError): + foo.py_func(0) + + def test_undefined_var(self): + with override_config('ALWAYS_WARN_UNINIT_VAR', 0): + self.check_undefined_var(should_warn=False) + with override_config('ALWAYS_WARN_UNINIT_VAR', 1): + self.check_undefined_var(should_warn=True) + + def test_phi_propagation(self): + @njit + def foo(actions): + n = 1 + + i = 0 + ct = 0 + while n > 0 and i < len(actions): + n -= 1 + + while actions[i]: + if actions[i]: + if actions[i]: + n += 10 + actions[i] -= 1 + else: + if actions[i]: + n += 20 + actions[i] += 1 + + ct += n + ct += n + return ct, n + + self.check_func(foo, np.array([1, 2])) + + def test_unhandled_undefined(self): + def function1(arg1, arg2, arg3, arg4, arg5): + # This function is auto-generated. + if arg1: + var1 = arg2 + var2 = arg3 + var3 = var2 + var4 = arg1 + return + else: + if arg2: + if arg4: + var5 = arg4 # noqa: F841 + return + else: + var6 = var4 + return + return var6 + else: + if arg5: + if var1: + if arg5: + var1 = var6 + return + else: + var7 = arg2 # noqa: F841 + return arg2 + return + else: + if var2: + arg5 = arg2 + return arg1 + else: + var6 = var3 + return var4 + return + return + else: + var8 = var1 + return + return var8 + var9 = var3 # noqa: F841 + var10 = arg5 # noqa: F841 + return var1 + + # The argument values is not critical for re-creating the bug + # because the bug is in compile-time. + expect = function1(2, 3, 6, 0, 7) + got = njit(function1)(2, 3, 6, 0, 7) + self.assertEqual(expect, got) + + +class TestReportedSSAIssues(SSABaseTest): + # Tests from issues + # https://github.com/numba/numba/issues?q=is%3Aopen+is%3Aissue+label%3ASSA + + def test_issue2194(self): + + @njit + def foo(): + V = np.empty(1) + s = np.uint32(1) + + for i in range(s): + V[i] = 1 + for i in range(s, 1): + pass + + self.check_func(foo, ) + + def test_issue3094(self): + + @njit + def doit(x): + return x + + @njit + def foo(pred): + if pred: + x = True + else: + x = False + # do something with x + return doit(x) + + self.check_func(foo, False) + + def test_issue3931(self): + + @njit + def foo(arr): + for i in range(1): + arr = arr.reshape(3 * 2) + arr = arr.reshape(3, 2) + return (arr) + + np.testing.assert_allclose(foo(np.zeros((3, 2))), + foo.py_func(np.zeros((3, 2)))) + + def test_issue3976(self): + + def overload_this(a): + return 'dummy' + + @njit + def foo(a): + if a: + s = 5 + s = overload_this(s) + else: + s = 'b' + + return s + + @overload(overload_this) + def ol(a): + return overload_this + + self.check_func(foo, True) + + def test_issue3979(self): + + @njit + def foo(A, B): + x = A[0] + y = B[0] + for i in A: + x = i + for i in B: + y = i + return x, y + + self.check_func(foo, (1, 2), ('A', 'B')) + + def test_issue5219(self): + + def overload_this(a, b=None): + if isinstance(b, tuple): + b = b[0] + return b + + @overload(overload_this) + def ol(a, b=None): + b_is_tuple = isinstance(b, (types.Tuple, types.UniTuple)) + + def impl(a, b=None): + if b_is_tuple is True: + b = b[0] + return b + return impl + + @njit + def test_tuple(a, b): + overload_this(a, b) + + self.check_func(test_tuple, 1, (2, )) + + def test_issue5223(self): + + @njit + def bar(x): + if len(x) == 5: + return x + x = x.copy() + for i in range(len(x)): + x[i] += 1 + return x + + a = np.ones(5) + a.flags.writeable = False + + np.testing.assert_allclose(bar(a), bar.py_func(a)) + + def test_issue5243(self): + + @njit + def foo(q): + lin = np.array((0.1, 0.6, 0.3)) + stencil = np.zeros((3, 3)) + stencil[0, 0] = q[0, 0] + return lin[0] + + self.check_func(foo, np.zeros((2, 2))) + + def test_issue5482_missing_variable_init(self): + # Test error that lowering fails because variable is missing + # a definition before use. + @njit("(intp, intp, intp)") + def foo(x, v, n): + for i in range(n): + if i == 0: + if i == x: + pass + else: + problematic = v + else: + if i == x: + pass + else: + problematic = problematic + v + return problematic + + def test_issue5482_objmode_expr_null_lowering(self): + # Existing pipelines will not have the Expr.null in objmode. + # We have to create a custom pipeline to force a SSA reconstruction + # and stripping. + from numba.core.compiler import CompilerBase, DefaultPassBuilder + from numba.core.untyped_passes import ReconstructSSA, IRProcessing + from numba.core.typed_passes import PreLowerStripPhis + + class CustomPipeline(CompilerBase): + def define_pipelines(self): + pm = DefaultPassBuilder.define_objectmode_pipeline(self.state) + # Force SSA reconstruction and stripping + pm.add_pass_after(ReconstructSSA, IRProcessing) + pm.add_pass_after(PreLowerStripPhis, ReconstructSSA) + pm.finalize() + return [pm] + + @jit("(intp, intp, intp)", looplift=False, + pipeline_class=CustomPipeline) + def foo(x, v, n): + for i in range(n): + if i == n: + if i == x: + pass + else: + problematic = v + else: + if i == x: + pass + else: + problematic = problematic + v + return problematic + + def test_issue5493_unneeded_phi(self): + # Test error that unneeded phi is inserted because variable does not + # have a dominance definition. + data = (np.ones(2), np.ones(2)) + A = np.ones(1) + B = np.ones((1,1)) + + def foo(m, n, data): + if len(data) == 1: + v0 = data[0] + else: + v0 = data[0] + # Unneeded PHI node for `problematic` would be placed here + for _ in range(1, len(data)): + v0 += A + + for t in range(1, m): + for idx in range(n): + t = B + + if idx == 0: + if idx == n - 1: + pass + else: + problematic = t + else: + if idx == n - 1: + pass + else: + problematic = problematic + t + return problematic + + expect = foo(10, 10, data) + res1 = njit(foo)(10, 10, data) + res2 = jit(forceobj=True, looplift=False)(foo)(10, 10, data) + np.testing.assert_array_equal(expect, res1) + np.testing.assert_array_equal(expect, res2) + + def test_issue5623_equal_statements_in_same_bb(self): + + def foo(pred, stack): + i = 0 + c = 1 + + if pred is True: + stack[i] = c + i += 1 + stack[i] = c + i += 1 + + python = np.array([0, 666]) + foo(True, python) + + nb = np.array([0, 666]) + njit(foo)(True, nb) + + expect = np.array([1, 1]) + + np.testing.assert_array_equal(python, expect) + np.testing.assert_array_equal(nb, expect) + + def test_issue5678_non_minimal_phi(self): + # There should be only one phi for variable "i" + + from numba.core.compiler import CompilerBase, DefaultPassBuilder + from numba.core.untyped_passes import ( + ReconstructSSA, FunctionPass, register_pass, + ) + + phi_counter = [] + + @register_pass(mutates_CFG=False, analysis_only=True) + class CheckSSAMinimal(FunctionPass): + # A custom pass to count the number of phis + + _name = self.__class__.__qualname__ + ".CheckSSAMinimal" + + def __init__(self): + super().__init__(self) + + def run_pass(self, state): + ct = 0 + for blk in state.func_ir.blocks.values(): + ct += len(list(blk.find_exprs('phi'))) + phi_counter.append(ct) + return True + + class CustomPipeline(CompilerBase): + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(CheckSSAMinimal, ReconstructSSA) + pm.finalize() + return [pm] + + @njit(pipeline_class=CustomPipeline) + def while_for(n, max_iter=1): + a = np.empty((n,n)) + i = 0 + while i <= max_iter: + for j in range(len(a)): + for k in range(len(a)): + a[j,k] = j + k + i += 1 + return a + + # Runs fine? + self.assertPreciseEqual(while_for(10), while_for.py_func(10)) + # One phi? + self.assertEqual(phi_counter, [1]) + + def test_issue9242_use_not_dom_def(self): + from numba.core.ir import FunctionIR + from numba.core.compiler_machinery import ( + AnalysisPass, + register_pass, + ) + + def check(fir: FunctionIR): + [blk, *_] = fir.blocks.values() + var = blk.scope.get("d") + defn = fir.get_definition(var) + self.assertEqual(defn.op, "phi") + self.assertIn(ir.UNDEFINED, defn.incoming_values) + + @register_pass(mutates_CFG=False, analysis_only=True) + class SSACheck(AnalysisPass): + """ + Check SSA on variable `d` + """ + + _name = "SSA_Check" + + def __init__(self): + AnalysisPass.__init__(self) + + def run_pass(self, state): + check(state.func_ir) + return False + + class SSACheckPipeline(CompilerBase): + """Inject SSACheck pass into the default pipeline following the SSA + pass + """ + + def define_pipelines(self): + pipeline = DefaultPassBuilder.define_nopython_pipeline( + self.state, "ssa_check_custom_pipeline") + + pipeline._finalized = False + pipeline.add_pass_after(SSACheck, ReconstructSSA) + + pipeline.finalize() + return [pipeline] + + @njit(pipeline_class=SSACheckPipeline) + def py_func(a): + c = a > 0 + if c: + d = a + 5 # d is only defined here; undef in the else branch + + return c and d > 0 + + py_func(10) + + +class TestSROAIssues(MemoryLeakMixin, TestCase): + # This tests issues related to the SROA optimization done in lowering, which + # reduces time spent in the LLVM SROA pass. The optimization is related to + # SSA and tries to reduce the number of alloca statements for variables with + # only a single assignment. + def test_issue7258_multiple_assignment_post_SSA(self): + # This test adds a pass that will duplicate assignment statements to + # variables named "foobar". + # In the reported issue, the bug will cause a memory leak. + cloned = [] + + @register_pass(analysis_only=False, mutates_CFG=True) + class CloneFoobarAssignments(FunctionPass): + # A pass that clones variable assignments into "foobar" + _name = "clone_foobar_assignments_pass" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + mutated = False + for blk in state.func_ir.blocks.values(): + to_clone = [] + # find assignments to "foobar" + for assign in blk.find_insts(ir.Assign): + if assign.target.name == "foobar": + to_clone.append(assign) + # clone + for assign in to_clone: + clone = copy.deepcopy(assign) + blk.insert_after(clone, assign) + mutated = True + # keep track of cloned statements + cloned.append(clone) + return mutated + + class CustomCompiler(CompilerBase): + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline( + self.state, "custom_pipeline", + ) + pm._finalized = False + # Insert the cloning pass after SSA + pm.add_pass_after(CloneFoobarAssignments, ReconstructSSA) + # Capture IR post lowering + pm.add_pass_after(PreserveIR, NativeLowering) + pm.finalize() + return [pm] + + @njit(pipeline_class=CustomCompiler) + def udt(arr): + foobar = arr + 1 # this assignment will be cloned + return foobar + + arr = np.arange(10) + # Verify that the function works as expected + self.assertPreciseEqual(udt(arr), arr + 1) + # Verify that the expected statement is cloned + self.assertEqual(len(cloned), 1) + self.assertEqual(cloned[0].target.name, "foobar") + # Verify in the Numba IR that the expected statement is cloned + nir = udt.overloads[udt.signatures[0]].metadata['preserved_ir'] + self.assertEqual(len(nir.blocks), 1, + "only one block") + [blk] = nir.blocks.values() + assigns = blk.find_insts(ir.Assign) + foobar_assigns = [stmt for stmt in assigns + if stmt.target.name == "foobar"] + self.assertEqual( + len(foobar_assigns), 2, + "expected two assignment statements into 'foobar'", + ) + self.assertEqual( + foobar_assigns[0], foobar_assigns[1], + "expected the two assignment statements to be the same", + ) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_stencils.py b/venv/lib/python3.10/site-packages/numba/tests/test_stencils.py new file mode 100644 index 0000000000000000000000000000000000000000..a06a737a944ab58bf4d974f04dcc270e8a00fd24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_stencils.py @@ -0,0 +1,3223 @@ +# +# Copyright (c) 2017 Intel Corporation +# SPDX-License-Identifier: BSD-2-Clause +# + +import numpy as np +from contextlib import contextmanager + +import numba +from numba import njit, stencil +from numba.core import types, registry +from numba.core.compiler import compile_extra, Flags +from numba.core.cpu import ParallelOptions +from numba.tests.support import skip_parfors_unsupported, _32bit +from numba.core.errors import LoweringError, TypingError, NumbaValueError +import unittest + + +skip_unsupported = skip_parfors_unsupported + + +@stencil +def stencil1_kernel(a): + return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0]) + + +@stencil(neighborhood=((-5, 0), )) +def stencil2_kernel(a): + cum = a[-5] + for i in range(-4, 1): + cum += a[i] + return 0.3 * cum + + +@stencil(cval=1.0) +def stencil3_kernel(a): + return 0.25 * a[-2, 2] + + +@stencil +def stencil_multiple_input_kernel(a, b): + return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] + + b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0]) + + +@stencil +def stencil_multiple_input_kernel_var(a, b, w): + return w * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] + + b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0]) + + +@stencil +def stencil_multiple_input_mixed_types_2d(a, b, f): + return a[0, 0] if f[0, 0] else b[0, 0] + + +@stencil(standard_indexing=("b",)) +def stencil_with_standard_indexing_1d(a, b): + return a[-1] * b[0] + a[0] * b[1] + + +@stencil(standard_indexing=("b",)) +def stencil_with_standard_indexing_2d(a, b): + return (a[0, 1] * b[0, 1] + a[1, 0] * b[1, 0] + + a[0, -1] * b[0, -1] + a[-1, 0] * b[-1, 0]) + + +@njit +def addone_njit(a): + return a + 1 + + +if not _32bit: # prevent compilation on unsupported 32bit targets + @njit(parallel=True) + def addone_pjit(a): + return a + 1 + + +class TestStencilBase(unittest.TestCase): + + _numba_parallel_test_ = False + + def __init__(self, *args): + # flags for njit() + self.cflags = Flags() + self.cflags.nrt = True + + super(TestStencilBase, self).__init__(*args) + + def _compile_this(self, func, sig, flags): + return compile_extra(registry.cpu_target.typing_context, + registry.cpu_target.target_context, func, sig, + None, flags, {}) + + def compile_parallel(self, func, sig, **kws): + flags = Flags() + flags.nrt = True + options = True if not kws else kws + flags.auto_parallel = ParallelOptions(options) + return self._compile_this(func, sig, flags) + + def compile_njit(self, func, sig): + return self._compile_this(func, sig, flags=self.cflags) + + def compile_all(self, pyfunc, *args, **kwargs): + sig = tuple([numba.typeof(x) for x in args]) + # compile with parallel=True + cpfunc = self.compile_parallel(pyfunc, sig) + # compile a standard njit of the original function + cfunc = self.compile_njit(pyfunc, sig) + return cfunc, cpfunc + + def check(self, no_stencil_func, pyfunc, *args): + cfunc, cpfunc = self.compile_all(pyfunc, *args) + # results without stencil macro + expected = no_stencil_func(*args) + # python result + py_output = pyfunc(*args) + + # njit result + njit_output = cfunc.entry_point(*args) + + # parfor result + parfor_output = cpfunc.entry_point(*args) + + np.testing.assert_almost_equal(py_output, expected, decimal=3) + np.testing.assert_almost_equal(njit_output, expected, decimal=3) + np.testing.assert_almost_equal(parfor_output, expected, decimal=3) + + # make sure parfor set up scheduling + self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str()) + + +class TestStencil(TestStencilBase): + + def __init__(self, *args, **kwargs): + super(TestStencil, self).__init__(*args, **kwargs) + + @skip_unsupported + def test_stencil1(self): + """Tests whether the optional out argument to stencil calls works. + """ + def test_with_out(n): + A = np.arange(n**2).reshape((n, n)) + B = np.zeros(n**2).reshape((n, n)) + B = stencil1_kernel(A, out=B) + return B + + def test_without_out(n): + A = np.arange(n**2).reshape((n, n)) + B = stencil1_kernel(A) + return B + + def test_impl_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.zeros(n**2).reshape((n, n)) + for i in range(1, n - 1): + for j in range(1, n - 1): + B[i, j] = 0.25 * (A[i, j + 1] + + A[i + 1, j] + A[i, j - 1] + A[i - 1, j]) + return B + + n = 100 + self.check(test_impl_seq, test_with_out, n) + self.check(test_impl_seq, test_without_out, n) + + @skip_unsupported + def test_stencil2(self): + """Tests whether the optional neighborhood argument to the stencil + decorate works. + """ + def test_seq(n): + A = np.arange(n) + B = stencil2_kernel(A) + return B + + def test_impl_seq(n): + A = np.arange(n) + B = np.zeros(n) + for i in range(5, len(A)): + B[i] = 0.3 * sum(A[i - 5:i + 1]) + return B + + n = 100 + self.check(test_impl_seq, test_seq, n) + # variable length neighborhood in numba.stencil call + # only supported in parallel path + + def test_seq(n, w): + A = np.arange(n) + + def stencil2_kernel(a, w): + cum = a[-w] + for i in range(-w + 1, w + 1): + cum += a[i] + return 0.3 * cum + B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ))(A, w) + return B + + def test_impl_seq(n, w): + A = np.arange(n) + B = np.zeros(n) + for i in range(w, len(A) - w): + B[i] = 0.3 * sum(A[i - w:i + w + 1]) + return B + n = 100 + w = 5 + cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp)) + expected = test_impl_seq(n, w) + # parfor result + parfor_output = cpfunc.entry_point(n, w) + np.testing.assert_almost_equal(parfor_output, expected, decimal=3) + self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str()) + # test index_offsets + + def test_seq(n, w, offset): + A = np.arange(n) + + def stencil2_kernel(a, w): + cum = a[-w + 1] + for i in range(-w + 1, w + 1): + cum += a[i + 1] + return 0.3 * cum + B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ), + index_offsets=(-offset, ))(A, w) + return B + + offset = 1 + cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp, + types.intp)) + parfor_output = cpfunc.entry_point(n, w, offset) + np.testing.assert_almost_equal(parfor_output, expected, decimal=3) + self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str()) + # test slice in kernel + + def test_seq(n, w, offset): + A = np.arange(n) + + def stencil2_kernel(a, w): + return 0.3 * np.sum(a[-w + 1:w + 2]) + B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ), + index_offsets=(-offset, ))(A, w) + return B + + offset = 1 + cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp, + types.intp)) + parfor_output = cpfunc.entry_point(n, w, offset) + np.testing.assert_almost_equal(parfor_output, expected, decimal=3) + self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str()) + + @skip_unsupported + def test_stencil3(self): + """Tests whether a non-zero optional cval argument to the stencil + decorator works. Also tests integer result type. + """ + def test_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = stencil3_kernel(A) + return B + + test_njit = njit(test_seq) + test_par = njit(test_seq, parallel=True) + + n = 5 + seq_res = test_seq(n) + njit_res = test_njit(n) + par_res = test_par(n) + + self.assertTrue(seq_res[0, 0] == 1.0 and seq_res[4, 4] == 1.0) + self.assertTrue(njit_res[0, 0] == 1.0 and njit_res[4, 4] == 1.0) + self.assertTrue(par_res[0, 0] == 1.0 and par_res[4, 4] == 1.0) + + @skip_unsupported + def test_stencil_standard_indexing_1d(self): + """Tests standard indexing with a 1d array. + """ + def test_seq(n): + A = np.arange(n) + B = [3.0, 7.0] + C = stencil_with_standard_indexing_1d(A, B) + return C + + def test_impl_seq(n): + A = np.arange(n) + B = [3.0, 7.0] + C = np.zeros(n) + + for i in range(1, n): + C[i] = A[i - 1] * B[0] + A[i] * B[1] + return C + + n = 100 + self.check(test_impl_seq, test_seq, n) + + @skip_unsupported + def test_stencil_standard_indexing_2d(self): + """Tests standard indexing with a 2d array and multiple stencil calls. + """ + def test_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.ones((3, 3)) + C = stencil_with_standard_indexing_2d(A, B) + D = stencil_with_standard_indexing_2d(C, B) + return D + + def test_impl_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.ones((3, 3)) + C = np.zeros(n**2).reshape((n, n)) + D = np.zeros(n**2).reshape((n, n)) + + for i in range(1, n - 1): + for j in range(1, n - 1): + C[i, j] = (A[i, j + 1] * B[0, 1] + A[i + 1, j] * B[1, 0] + + A[i, j - 1] * B[0, -1] + A[i - 1, j] * B[-1, 0]) + for i in range(1, n - 1): + for j in range(1, n - 1): + D[i, j] = (C[i, j + 1] * B[0, 1] + C[i + 1, j] * B[1, 0] + + C[i, j - 1] * B[0, -1] + C[i - 1, j] * B[-1, 0]) + return D + + n = 5 + self.check(test_impl_seq, test_seq, n) + + @skip_unsupported + def test_stencil_multiple_inputs(self): + """Tests whether multiple inputs of the same size work. + """ + def test_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.arange(n**2).reshape((n, n)) + C = stencil_multiple_input_kernel(A, B) + return C + + def test_impl_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.arange(n**2).reshape((n, n)) + C = np.zeros(n**2).reshape((n, n)) + for i in range(1, n - 1): + for j in range(1, n - 1): + C[i, j] = 0.25 * \ + (A[i, j + 1] + A[i + 1, j] + + A[i, j - 1] + A[i - 1, j] + + B[i, j + 1] + B[i + 1, j] + + B[i, j - 1] + B[i - 1, j]) + return C + + n = 3 + self.check(test_impl_seq, test_seq, n) + # test stencil with a non-array input + + def test_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.arange(n**2).reshape((n, n)) + w = 0.25 + C = stencil_multiple_input_kernel_var(A, B, w) + return C + self.check(test_impl_seq, test_seq, n) + + @skip_unsupported + def test_stencil_mixed_types(self): + def test_impl_seq(n): + A = np.arange(n ** 2).reshape((n, n)) + B = n ** 2 - np.arange(n ** 2).reshape((n, n)) + S = np.eye(n, dtype=np.bool_) + O = np.zeros((n, n), dtype=A.dtype) + for i in range(0, n): + for j in range(0, n): + O[i, j] = A[i, j] if S[i, j] else B[i, j] + return O + + def test_seq(n): + A = np.arange(n ** 2).reshape((n, n)) + B = n ** 2 - np.arange(n ** 2).reshape((n, n)) + S = np.eye(n, dtype=np.bool_) + O = stencil_multiple_input_mixed_types_2d(A, B, S) + return O + + n = 3 + self.check(test_impl_seq, test_seq, n) + + @skip_unsupported + def test_stencil_call(self): + """Tests 2D numba.stencil calls. + """ + def test_impl1(n): + A = np.arange(n**2).reshape((n, n)) + B = np.zeros(n**2).reshape((n, n)) + numba.stencil(lambda a: 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + + a[-1, 0]))(A, out=B) + return B + + def test_impl2(n): + A = np.arange(n**2).reshape((n, n)) + B = np.zeros(n**2).reshape((n, n)) + + def sf(a): + return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0]) + B = numba.stencil(sf)(A) + return B + + def test_impl_seq(n): + A = np.arange(n**2).reshape((n, n)) + B = np.zeros(n**2).reshape((n, n)) + for i in range(1, n - 1): + for j in range(1, n - 1): + B[i, j] = 0.25 * (A[i, j + 1] + A[i + 1, j] + + A[i, j - 1] + A[i - 1, j]) + return B + + n = 100 + self.check(test_impl_seq, test_impl1, n) + self.check(test_impl_seq, test_impl2, n) + + @skip_unsupported + def test_stencil_call_1D(self): + """Tests 1D numba.stencil calls. + """ + def test_impl(n): + A = np.arange(n) + B = np.zeros(n) + numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A, out=B) + return B + + def test_impl_seq(n): + A = np.arange(n) + B = np.zeros(n) + for i in range(1, n - 1): + B[i] = 0.3 * (A[i - 1] + A[i] + A[i + 1]) + return B + + n = 100 + self.check(test_impl_seq, test_impl, n) + + @skip_unsupported + def test_stencil_call_const(self): + """Tests numba.stencil call that has an index that can be inferred as + constant from a unary expr. Otherwise, this would raise an error since + neighborhood length is not specified. + """ + def test_impl1(n): + A = np.arange(n) + B = np.zeros(n) + c = 1 + numba.stencil(lambda a,c : 0.3 * (a[-c] + a[0] + a[c]))(A, c, out=B) + return B + + def test_impl2(n): + A = np.arange(n) + B = np.zeros(n) + c = 2 + numba.stencil( + lambda a,c : 0.3 * (a[1 - c] + a[0] + a[c - 1]))(A, c, out=B) + return B + + # recursive expr case + def test_impl3(n): + A = np.arange(n) + B = np.zeros(n) + c = 2 + numba.stencil( + lambda a,c : 0.3 * (a[-c + 1] + a[0] + a[c - 1]))(A, c, out=B) + return B + + # multi-constant case + def test_impl4(n): + A = np.arange(n) + B = np.zeros(n) + d = 1 + c = 2 + numba.stencil( + lambda a,c,d : 0.3 * (a[-c + d] + a[0] + a[c - d]))(A, c, d, + out=B) + return B + + def test_impl_seq(n): + A = np.arange(n) + B = np.zeros(n) + c = 1 + for i in range(1, n - 1): + B[i] = 0.3 * (A[i - c] + A[i] + A[i + c]) + return B + + n = 100 + # constant inference is only possible in parallel path + cpfunc1 = self.compile_parallel(test_impl1, (types.intp,)) + cpfunc2 = self.compile_parallel(test_impl2, (types.intp,)) + cpfunc3 = self.compile_parallel(test_impl3, (types.intp,)) + cpfunc4 = self.compile_parallel(test_impl4, (types.intp,)) + expected = test_impl_seq(n) + # parfor result + parfor_output1 = cpfunc1.entry_point(n) + parfor_output2 = cpfunc2.entry_point(n) + parfor_output3 = cpfunc3.entry_point(n) + parfor_output4 = cpfunc4.entry_point(n) + np.testing.assert_almost_equal(parfor_output1, expected, decimal=3) + np.testing.assert_almost_equal(parfor_output2, expected, decimal=3) + np.testing.assert_almost_equal(parfor_output3, expected, decimal=3) + np.testing.assert_almost_equal(parfor_output4, expected, decimal=3) + + # check error in regular Python path + with self.assertRaises(NumbaValueError) as e: + test_impl4(4) + + self.assertIn("stencil kernel index is not constant, " + "'neighborhood' option required", str(e.exception)) + # check error in njit path + # TODO: ValueError should be thrown instead of LoweringError + with self.assertRaises((LoweringError, NumbaValueError)) as e: + njit(test_impl4)(4) + + self.assertIn("stencil kernel index is not constant, " + "'neighborhood' option required", str(e.exception)) + + @skip_unsupported + def test_stencil_parallel_off(self): + """Tests 1D numba.stencil calls without parallel translation + turned off. + """ + def test_impl(A): + return numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A) + + cpfunc = self.compile_parallel(test_impl, (numba.float64[:],), + stencil=False) + self.assertNotIn('@do_scheduling', cpfunc.library.get_llvm_str()) + + @skip_unsupported + def test_stencil_nested1(self): + """Tests whether nested stencil decorator works. + """ + @njit(parallel=True) + def test_impl(n): + @stencil + def fun(a): + c = 2 + return a[-c + 1] + B = fun(n) + return B + + def test_impl_seq(n): + B = np.zeros(len(n), dtype=int) + for i in range(1, len(n)): + B[i] = n[i - 1] + return B + + n = np.arange(10) + np.testing.assert_equal(test_impl(n), test_impl_seq(n)) + + @skip_unsupported + def test_out_kwarg_w_cval(self): + """ Issue #3518, out kwarg did not work with cval.""" + # test const value that matches the arg dtype, and one that can be cast + const_vals = [7, 7.0] + + def kernel(a): + return (a[0, 0] - a[1, 0]) + + for const_val in const_vals: + stencil_fn = numba.stencil(kernel, cval=const_val) + + def wrapped(): + A = np.arange(12).reshape((3, 4)) + ret = np.ones_like(A) + stencil_fn(A, out=ret) + return ret + + # stencil function case + A = np.arange(12).reshape((3, 4)) + expected = np.full_like(A, -4) + expected[-1, :] = const_val + ret = np.ones_like(A) + stencil_fn(A, out=ret) + np.testing.assert_almost_equal(ret, expected) + + # wrapped function case, check njit, then njit(parallel=True) + impls = self.compile_all(wrapped,) + for impl in impls: + got = impl.entry_point() + np.testing.assert_almost_equal(got, expected) + + # now check exceptions for cval dtype mismatch with out kwarg dtype + stencil_fn = numba.stencil(kernel, cval=1j) + + def wrapped(): + A = np.arange(12).reshape((3, 4)) + ret = np.ones_like(A) + stencil_fn(A, out=ret) + return ret + + A = np.arange(12).reshape((3, 4)) + ret = np.ones_like(A) + with self.assertRaises(NumbaValueError) as e: + stencil_fn(A, out=ret) + msg = "cval type does not match stencil return type." + self.assertIn(msg, str(e.exception)) + + for compiler in [self.compile_njit, self.compile_parallel]: + try: + compiler(wrapped,()) + except (NumbaValueError, LoweringError) as e: + self.assertIn(msg, str(e)) + else: + raise AssertionError("Expected error was not raised") + + @skip_unsupported + def test_out_kwarg_w_cval_np_attr(self): + """ Test issue #7286 where the cval is a np attr/string-based numerical + constant""" + for cval in (np.nan, np.inf, -np.inf, float('inf'), -float('inf')): + def kernel(a): + return (a[0, 0] - a[1, 0]) + + stencil_fn = numba.stencil(kernel, cval=cval) + + def wrapped(): + A = np.arange(12.).reshape((3, 4)) + ret = np.ones_like(A) + stencil_fn(A, out=ret) + return ret + + # stencil function case + A = np.arange(12.).reshape((3, 4)) + expected = np.full_like(A, -4) + expected[-1, :] = cval + ret = np.ones_like(A) + stencil_fn(A, out=ret) + np.testing.assert_almost_equal(ret, expected) + + # wrapped function case, check njit, then njit(parallel=True) + impls = self.compile_all(wrapped,) + for impl in impls: + got = impl.entry_point() + np.testing.assert_almost_equal(got, expected) + + +@skip_unsupported +class TestManyStencils(TestStencilBase): + # NOTE: the original implementation of this test used manipulations of the + # Python AST repr of a kernel to create another implementation of the + # stencil being tested so to act as another reference point when + # comparing the various forms of @stencil calls. This implementation was + # based on the cPython 3.7 version of the AST and proved too much effort to + # continuously port to newer python versions. Ahead of dropping Python 3.7 + # support, all the kernel invocations were translated via the ``astor`` + # package ``astor.to_source()`` function to pure python source and this + # source was hardcoded into the tests themselves. In the following tests, + # regions demarked with dashed lines (----) and with the header + # "Autogenerated kernel" correspond to these translations. + + def __init__(self, *args, **kwargs): + super(TestManyStencils, self).__init__(*args, **kwargs) + + def check_against_expected(self, pyfunc, expected, *args, **kwargs): + """ + For a given kernel: + + The expected result is available from argument `expected`. + + The following results are then computed: + * from a pure @stencil decoration of the kernel. + * from the njit of a trivial wrapper function around the pure @stencil + decorated function. + * from the njit(parallel=True) of a trivial wrapper function around + the pure @stencil decorated function. + + The results are then compared. + """ + + options = kwargs.get('options', dict()) + expected_exception = kwargs.get('expected_exception') + + # DEBUG print output arrays + DEBUG_OUTPUT = False + + # collect fails + should_fail = [] + should_not_fail = [] + + # runner that handles fails + @contextmanager + def errorhandler(exty=None, usecase=None): + try: + yield + except Exception as e: + if exty is not None: + lexty = exty if hasattr(exty, '__iter__') else [exty, ] + found = False + for ex in lexty: + found |= isinstance(e, ex) + if not found: + raise + else: + should_not_fail.append( + (usecase, "%s: %s" % + (type(e), str(e)))) + else: + if exty is not None: + should_fail.append(usecase) + + if isinstance(expected_exception, dict): + stencil_ex = expected_exception['stencil'] + njit_ex = expected_exception['njit'] + parfor_ex = expected_exception['parfor'] + else: + stencil_ex = expected_exception + njit_ex = expected_exception + parfor_ex = expected_exception + + stencil_args = {'func_or_mode': pyfunc} + stencil_args.update(options) + + stencilfunc_output = None + with errorhandler(stencil_ex, "@stencil"): + stencil_func_impl = stencil(**stencil_args) + # stencil result + stencilfunc_output = stencil_func_impl(*args) + + # wrapped stencil impl, could this be generated? + if len(args) == 1: + def wrap_stencil(arg0): + return stencil_func_impl(arg0) + elif len(args) == 2: + def wrap_stencil(arg0, arg1): + return stencil_func_impl(arg0, arg1) + elif len(args) == 3: + def wrap_stencil(arg0, arg1, arg2): + return stencil_func_impl(arg0, arg1, arg2) + else: + raise ValueError( + "Up to 3 arguments can be provided, found %s" % + len(args)) + + sig = tuple([numba.typeof(x) for x in args]) + + njit_output = None + with errorhandler(njit_ex, "njit"): + wrapped_cfunc = self.compile_njit(wrap_stencil, sig) + # njit result + njit_output = wrapped_cfunc.entry_point(*args) + + parfor_output = None + with errorhandler(parfor_ex, "parfors"): + wrapped_cpfunc = self.compile_parallel(wrap_stencil, sig) + # parfor result + parfor_output = wrapped_cpfunc.entry_point(*args) + + if DEBUG_OUTPUT: + print("\n@stencil_output:\n", stencilfunc_output) + print("\nnjit_output:\n", njit_output) + print("\nparfor_output:\n", parfor_output) + + try: + if not stencil_ex: + np.testing.assert_almost_equal( + stencilfunc_output, expected, decimal=1) + self.assertEqual(expected.dtype, stencilfunc_output.dtype) + except Exception as e: + should_not_fail.append( + ('@stencil', "%s: %s" % + (type(e), str(e)))) + print("@stencil failed: %s" % str(e)) + + try: + if not njit_ex: + np.testing.assert_almost_equal( + njit_output, expected, decimal=1) + self.assertEqual(expected.dtype, njit_output.dtype) + except Exception as e: + should_not_fail.append(('njit', "%s: %s" % (type(e), str(e)))) + print("@njit failed: %s" % str(e)) + + try: + if not parfor_ex: + np.testing.assert_almost_equal( + parfor_output, expected, decimal=1) + self.assertEqual(expected.dtype, parfor_output.dtype) + try: + self.assertIn( + '@do_scheduling', + wrapped_cpfunc.library.get_llvm_str()) + except AssertionError: + msg = 'Could not find `@do_scheduling` in LLVM IR' + raise AssertionError(msg) + except Exception as e: + should_not_fail.append( + ('parfors', "%s: %s" % + (type(e), str(e)))) + print("@njit(parallel=True) failed: %s" % str(e)) + + if DEBUG_OUTPUT: + print("\n\n") + + if should_fail: + msg = ["%s" % x for x in should_fail] + raise RuntimeError(("The following implementations should have " + "raised an exception but did not:\n%s") % msg) + + if should_not_fail: + impls = ["%s" % x[0] for x in should_not_fail] + errs = ''.join(["%s: Message: %s\n\n" % + x for x in should_not_fail]) + str1 = ("The following implementations should not have raised an " + "exception but did:\n%s\n" % impls) + str2 = "Errors were:\n\n%s" % errs + raise RuntimeError(str1 + str2) + + def check_exceptions(self, pyfunc, *args, **kwargs): + """ + For a given kernel: + + The expected result is computed from a pyStencil version of the + stencil. + + The following results are then computed: + * from a pure @stencil decoration of the kernel. + * from the njit of a trivial wrapper function around the pure @stencil + decorated function. + * from the njit(parallel=True) of a trivial wrapper function around + the pure @stencil decorated function. + + The results are then compared. + """ + options = kwargs.get('options', dict()) + expected_exception = kwargs.get('expected_exception') + + # collect fails + should_fail = [] + should_not_fail = [] + + # runner that handles fails + @contextmanager + def errorhandler(exty=None, usecase=None): + try: + yield + except Exception as e: + if exty is not None: + lexty = exty if hasattr(exty, '__iter__') else [exty, ] + found = False + for ex in lexty: + found |= isinstance(e, ex) + if not found: + raise + else: + should_not_fail.append( + (usecase, "%s: %s" % + (type(e), str(e)))) + else: + if exty is not None: + should_fail.append(usecase) + + if isinstance(expected_exception, dict): + stencil_ex = expected_exception['stencil'] + njit_ex = expected_exception['njit'] + parfor_ex = expected_exception['parfor'] + else: + stencil_ex = expected_exception + njit_ex = expected_exception + parfor_ex = expected_exception + + stencil_args = {'func_or_mode': pyfunc} + stencil_args.update(options) + + with errorhandler(stencil_ex, "@stencil"): + stencil_func_impl = stencil(**stencil_args) + # stencil result + stencil_func_impl(*args) + + # wrapped stencil impl, could this be generated? + if len(args) == 1: + def wrap_stencil(arg0): + return stencil_func_impl(arg0) + elif len(args) == 2: + def wrap_stencil(arg0, arg1): + return stencil_func_impl(arg0, arg1) + elif len(args) == 3: + def wrap_stencil(arg0, arg1, arg2): + return stencil_func_impl(arg0, arg1, arg2) + else: + raise ValueError( + "Up to 3 arguments can be provided, found %s" % + len(args)) + + sig = tuple([numba.typeof(x) for x in args]) + + with errorhandler(njit_ex, "njit"): + wrapped_cfunc = self.compile_njit(wrap_stencil, sig) + # njit result + wrapped_cfunc.entry_point(*args) + + with errorhandler(parfor_ex, "parfors"): + wrapped_cpfunc = self.compile_parallel(wrap_stencil, sig) + # parfor result + wrapped_cpfunc.entry_point(*args) + + if should_fail: + msg = ["%s" % x for x in should_fail] + raise RuntimeError(("The following implementations should have " + "raised an exception but did not:\n%s") % msg) + + if should_not_fail: + impls = ["%s" % x[0] for x in should_not_fail] + errs = ''.join(["%s: Message: %s\n\n" % + x for x in should_not_fail]) + str1 = ("The following implementations should not have raised an " + "exception but did:\n%s\n" % impls) + str2 = "Errors were:\n\n%s" % errs + raise RuntimeError(str1 + str2) + + def exception_dict(self, **kwargs): + d = dict() + d['pyStencil'] = None + d['stencil'] = None + d['njit'] = None + d['parfor'] = None + for k, v in kwargs.items(): + d[k] = v + return d + + def check_stencil_arrays(self, *args, **kwargs): + neighborhood = kwargs.get('neighborhood') + init_shape = args[0].shape + if neighborhood is not None: + if len(init_shape) != len(neighborhood): + raise ValueError('Invalid neighborhood supplied') + for x in args[1:]: + if hasattr(x, 'shape'): + if init_shape != x.shape: + raise ValueError('Input stencil arrays do not commute') + + def test_basic00(self): + """rel index""" + def kernel(a): + return a[0, 0] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic01(self): + """rel index add const""" + def kernel(a): + return a[0, 1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic02(self): + """rel index add const""" + def kernel(a): + return a[0, -1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + -1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic03(self): + """rel index add const""" + def kernel(a): + return a[1, 0] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = a[__a + 1, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic04(self): + """rel index add const""" + def kernel(a): + return a[-1, 0] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(1, a.shape[0]): + __b0[__a, __b] = a[__a + -1, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic05(self): + """rel index add const""" + def kernel(a): + return a[-1, 1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(1, a.shape[0]): + __b0[__a, __b] = a[__a + -1, __b + 1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic06(self): + """rel index add const""" + def kernel(a): + return a[1, -1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1]): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = a[__a + 1, __b + -1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic07(self): + """rel index add const""" + def kernel(a): + return a[1, 1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = a[__a + 1, __b + 1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic08(self): + """rel index add const""" + def kernel(a): + return a[-1, -1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1]): + for __a in range(1, a.shape[0]): + __b0[__a, __b] = a[__a + -1, __b + -1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic09(self): + """rel index add const""" + def kernel(a): + return a[-2, 2] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 2): + for __a in range(2, a.shape[0]): + __b0[__a, __b] = a[__a + -2, __b + 2] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic10(self): + """rel index add const""" + def kernel(a): + return a[0, 0] + a[1, 0] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = a[__a + 0, __b + 0] + a[__a + 1, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic11(self): + """rel index add const""" + def kernel(a): + return a[-1, 0] + a[1, 0] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(1, a.shape[0] - 1): + __b0[__a, __b] = a[__a + -1, __b + 0] + a[__a + 1, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic12(self): + """rel index add const""" + def kernel(a): + return a[-1, 1] + a[1, -1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(1, a.shape[0] - 1): + __b0[__a, __b] = a[__a + -1, __b + 1] + a[__a + 1, __b + -1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic13(self): + """rel index add const""" + def kernel(a): + return a[-1, -1] + a[1, 1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(1, a.shape[0] - 1): + __b0[__a, __b] = a[__a + -1, __b + -1] + a[__a + 1, __b + 1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic14(self): + """rel index add domain change const""" + def kernel(a): + return a[0, 0] + 1j + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 0] + 1.0j + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic14b(self): + """rel index add domain change const""" + def kernel(a): + t = 1.j + return a[0, 0] + t + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + t = 1.0j + __b0[__a, __b] = a[__a + 0, __b + 0] + t + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic15(self): + """two rel index, add const""" + def kernel(a): + return a[0, 0] + a[1, 0] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + a[__a + 1, __b + 0] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic17(self): + """two rel index boundary test, add const""" + def kernel(a): + return a[0, 0] + a[2, 0] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0] - 2): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + a[__a + 2, __b + 0] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic18(self): + """two rel index boundary test, add const""" + def kernel(a): + return a[0, 0] + a[-2, 0] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(2, a.shape[0]): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + a[__a + -2, __b + 0] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic19(self): + """two rel index boundary test, add const""" + def kernel(a): + return a[0, 0] + a[0, 3] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 3): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + a[__a + 0, __b + 3] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic20(self): + """two rel index boundary test, add const""" + def kernel(a): + return a[0, 0] + a[0, -3] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(3, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + a[__a + 0, __b + -3] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic21(self): + """same rel, add const""" + def kernel(a): + return a[0, 0] + a[0, 0] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + a[__a + 0, __b + 0] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic22(self): + """rel idx const expr folding, add const""" + def kernel(a): + return a[1 + 0, 0] + a[0, 0] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 1, __b + 0] + + a[__a + 0, __b + 0] + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic23(self): + """rel idx, work in body""" + def kernel(a): + x = np.sin(10 + a[2, 1]) + return a[1 + 0, 0] + a[0, 0] + x + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0] - 2): + x = np.sin(10 + a[__a + 2, __b + 1]) + __b0[__a, __b] = (a[__a + 1, __b + 0] + + a[__a + 0, __b + 0] + x) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic23a(self): + """rel idx, dead code should not impact rel idx""" + def kernel(a): + x = np.sin(10 + a[2, 1]) # noqa: F841 # dead code expected + return a[1 + 0, 0] + a[0, 0] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0] - 2): + x = np.sin(10 + a[__a + 2, __b + 1]) # noqa: F841 + __b0[__a, __b] = a[__a + 1, __b + 0] + a[__a + 0, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic24(self): + """1d idx on 2d arr""" + a = np.arange(12).reshape(3, 4) + + def kernel(a): + return a[0] + 1. + + self.check_exceptions(kernel, a, expected_exception=[TypingError,]) + + def test_basic25(self): + """no idx on 2d arr""" + a = np.arange(12).reshape(3, 4) + + def kernel(a): + return 1. + self.check_exceptions(kernel, a, expected_exception=[ValueError, + NumbaValueError,]) + + def test_basic26(self): + """3d arr""" + + def kernel(a): + return a[0, 0, 0] - a[0, 1, 0] + 1. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __c in range(0, a.shape[2]): + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b, __c] = (a[__a + 0, __b + 0, __c + 0] - + a[__a + 0, __b + 1, __c + 0] + + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(64).reshape(4, 8, 2) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic27(self): + """4d arr""" + def kernel(a): + return a[0, 0, 0, 0] - a[0, 1, 0, -1] + 1. + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __d in range(1, a.shape[3]): + for __c in range(0, a.shape[2]): + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b, __c, __d] = (a[__a + 0, __b + 0, + __c + 0, __d + 0] - + a[__a + 0, __b + 1, + __c + 0, __d + -1] + + 1.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(128).reshape(4, 8, 2, 2) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic28(self): + """type widen """ + def kernel(a): + return a[0, 0] + np.float64(10.) + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 0] + np.float64(10.0) + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4).astype(np.float32) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic29(self): + """const index from func """ + a = np.arange(12.).reshape(3, 4) + + def kernel(a): + return a[0, int(np.cos(0))] + self.check_exceptions(kernel, a, expected_exception=[ValueError, + NumbaValueError, + LoweringError]) + + def test_basic30(self): + """signed zeros""" + def kernel(a): + return a[-0, -0] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + -0, __b + -0] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12).reshape(3, 4).astype(np.float32) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic31(self): + """does a const propagate? 2D""" + def kernel(a): + t = 1 + return a[t, 0] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0] - 1): + t = 1 + __b0[__a, __b] = a[__a + t, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12).reshape(3, 4).astype(np.float32) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + @unittest.skip("constant folding not implemented") + def test_basic31b(self): + """does a const propagate?""" + a = np.arange(12.).reshape(3, 4) # noqa: F841 + + def kernel(a): + s = 1 + t = 1 - s + return a[t, 0] + + #TODO: add check should this be implemented + + def test_basic31c(self): + """does a const propagate? 1D""" + def kernel(a): + t = 1 + return a[t] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __a in range(0, a.shape[0] - 1): + t = 1 + __b0[__a,] = a[__a + t] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic32(self): + """typed int index""" + a = np.arange(12.).reshape(3, 4) + + def kernel(a): + return a[np.int8(1), 0] + self.check_exceptions(kernel, a, expected_exception=[ValueError, + NumbaValueError, + LoweringError]) + + def test_basic33(self): + """add 0d array""" + def kernel(a): + return a[0, 0] + np.array(1) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 0] + np.array(1) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic34(self): + """More complex rel index with dependency on addition rel index""" + def kernel(a): + g = 4. + a[0, 1] + return g + (a[0, 1] + a[1, 0] + a[0, -1] + np.sin(a[-2, 0])) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(2, a.shape[0] - 1): + g = 4.0 + a[__a + 0, __b + 1] + __b0[__a, __b] = g + (a[__a + 0, __b + 1] + + a[__a + 1, __b + 0] + + a[__a + 0, __b + -1] + + np.sin(a[__a + -2, __b + 0])) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(144).reshape(12, 12) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic35(self): + """simple cval where cval is int but castable to dtype of float""" + def kernel(a): + return a[0, 1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 5, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 1] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a, options={'cval': 5}) + + def test_basic36(self): + """more complex with cval""" + def kernel(a): + return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 5.0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 1] + + a[__a + 0, __b + -1] + + a[__a + 1, __b + -1] + + a[__a + 1, __b + -1]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a, options={'cval': 5}) + + def test_basic37(self): + """cval is expr""" + def kernel(a): + return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 68.0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 1] + + a[__a + 0, __b + -1] + + a[__a + 1, __b + -1] + + a[__a + 1, __b + -1]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a, + options={'cval': 5 + 63.}) + + def test_basic38(self): + """cval is complex""" + def kernel(a): + return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1] + a = np.arange(12.).reshape(3, 4) + ex = self.exception_dict( + stencil=NumbaValueError, + parfor=NumbaValueError, + njit=NumbaValueError) + self.check_exceptions(kernel, a, options={'cval': 1.j}, + expected_exception=ex) + + def test_basic39(self): + """cval is func expr""" + def kernel(a): + return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1] + + cval = np.sin(3.) + np.cos(2) + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, cval, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 1] + + a[__a + 0, __b + -1] + + a[__a + 1, __b + -1] + + a[__a + 1, __b + -1]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a, + options={'cval': cval}) + + def test_basic40(self): + """2 args!""" + def kernel(a, b): + return a[0, 1] + b[0, -2] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(2, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 1] + b[__a + 0, __b + -2] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b) + + def test_basic41(self): + """2 args! rel arrays wildly not same size!""" + def kernel(a, b): + return a[0, 1] + b[0, -2] + a = np.arange(12.).reshape(3, 4) + b = np.arange(1.).reshape(1, 1) + self.check_exceptions(kernel, a, b, expected_exception=[ValueError, + AssertionError]) + + def test_basic42(self): + """2 args! rel arrays very close in size""" + def kernel(a, b): + return a[0, 1] + b[0, -2] + a = np.arange(12.).reshape(3, 4) + b = np.arange(9.).reshape(3, 3) + self.check_exceptions(kernel, a, b, expected_exception=[ValueError, + AssertionError]) + + def test_basic43(self): + """2 args more complexity""" + def kernel(a, b): + return a[0, 1] + a[1, 2] + b[-2, 0] + b[0, -1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 2): + for __a in range(2, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 1] + + a[__a + 1, __b + 2] + + b[__a + -2, __b + 0] + + b[__a + 0, __b + -1]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(30.).reshape(5, 6) + b = np.arange(30.).reshape(5, 6) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b) + + def test_basic44(self): + """2 args, has assignment before use""" + def kernel(a, b): + a[0, 1] = 12 + return a[0, 1] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + self.check_exceptions(kernel, a, b, expected_exception=[NumbaValueError, + LoweringError]) + + def test_basic45(self): + """2 args, has assignment and then cross dependency""" + def kernel(a, b): + a[0, 1] = 12 + return a[0, 1] + a[1, 0] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + self.check_exceptions(kernel, a, b, expected_exception=[NumbaValueError, + LoweringError]) + + def test_basic46(self): + """2 args, has cross relidx assignment""" + def kernel(a, b): + a[0, 1] = b[1, 2] + return a[0, 1] + a[1, 0] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + self.check_exceptions(kernel, a, b, expected_exception=[NumbaValueError, + LoweringError]) + + def test_basic47(self): + """3 args""" + def kernel(a, b, c): + return a[0, 1] + b[1, 0] + c[-1, 0] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, c, neighborhood): + self.check_stencil_arrays(a, b, c, neighborhood=neighborhood) + __retdtype = kernel(a, b, c) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(1, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 1] + + b[__a + 1, __b + 0] + + c[__a + -1, __b + 0]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + c = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, c, None) + self.check_against_expected(kernel, expected, a, b, c) + + # matches pyStencil, but all ought to fail + # probably hard to detect? + def test_basic48(self): + """2 args, has assignment before use via memory alias""" + def kernel(a): + c = a.T + c[:, :] = 10 + return a[0, 1] + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a,neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + c = a.T + c[:, :] = 10 + __b0[__a, __b] = a[__a + 0, __b + 1] + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic49(self): + """2 args, standard_indexing on second""" + def kernel(a, b): + return a[0, 1] + b[0, 3] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 1] + b[0, 3] + return __b0 + + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b'}) + + @unittest.skip("dynamic range checking not implemented") + def test_basic50(self): + """2 args, standard_indexing OOB""" + def kernel(a, b): + return a[0, 1] + b[0, 15] + #TODO: add check should this be implemented + + def test_basic51(self): + """2 args, standard_indexing, no relidx""" + def kernel(a, b): + return a[0, 1] + b[0, 2] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + self.check_exceptions(kernel, a, b, + options={'standard_indexing': ['a', 'b']}, + expected_exception=[ValueError, NumbaValueError]) + + def test_basic52(self): + """3 args, standard_indexing on middle arg """ + def kernel(a, b, c): + return a[0, 1] + b[0, 1] + c[1, 2] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, c, neighborhood): + self.check_stencil_arrays(a, c, neighborhood=neighborhood) + __retdtype = kernel(a, b, c) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 2): + for __a in range(0, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + 0, __b + 1] + b[0, 1] + + c[__a + 1, __b + 2]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(4.).reshape(2, 2) + c = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, c, None) + self.check_against_expected(kernel, expected, a, b, c, + options={'standard_indexing': 'b'}) + + def test_basic53(self): + """2 args, standard_indexing on variable that does not exist""" + def kernel(a, b): + return a[0, 1] + b[0, 2] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + ex = self.exception_dict( + stencil=Exception, + parfor=NumbaValueError, + njit=Exception) + self.check_exceptions(kernel, a, b, options={'standard_indexing': 'c'}, + expected_exception=ex) + + def test_basic54(self): + """2 args, standard_indexing, index from var""" + def kernel(a, b): + t = 2 + return a[0, 1] + b[0, t] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + t = 2 + __b0[__a, __b] = a[__a + 0, __b + 1] + b[0, t] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b'}) + + def test_basic55(self): + """2 args, standard_indexing, index from more complex var""" + def kernel(a, b): + s = 1 + t = 2 - s + return a[0, 1] + b[0, t] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + s = 1 + t = 2 - s + __b0[__a, __b] = a[__a + 0, __b + 1] + b[0, t] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b'}) + + def test_basic56(self): + """2 args, standard_indexing, added complexity """ + def kernel(a, b): + s = 1 + acc = 0 + for k in b[0, :]: + acc += k + t = 2 - s - 1 + return a[0, 1] + b[0, t] + acc + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + s = 1 + acc = 0 + for k in b[(0), :]: + acc += k + t = 2 - s - 1 + __b0[__a, __b] = a[__a + 0, __b + 1] + b[0, t] + acc + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b'}) + + def test_basic57(self): + """2 args, standard_indexing, split index operation """ + def kernel(a, b): + c = b[0] + return a[0, 1] + c[1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + c = b[0] + __b0[__a, __b] = a[__a + 0, __b + 1] + c[1] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b'}) + + def test_basic58(self): + """2 args, standard_indexing, split index with broadcast mutation """ + def kernel(a, b): + c = b[0] + 1 + return a[0, 1] + c[1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + c = b[0] + 1 + __b0[__a, __b] = a[__a + 0, __b + 1] + c[1] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b'}) + + def test_basic59(self): + """3 args, mix of array, relative and standard indexing and const""" + def kernel(a, b, c): + return a[0, 1] + b[1, 1] + c + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, c, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b, c) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 1] + b[1, 1] + c + return __b0 + # ---------------------------------------------------------------------- + + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + c = 10 + expected = __kernel(a, b, c, None) + self.check_against_expected(kernel, expected, a, b, c, + options={'standard_indexing': ['b', 'c']}) + + def test_basic60(self): + """3 args, mix of array, relative and standard indexing, + tuple pass through""" + def kernel(a, b, c): + return a[0, 1] + b[1, 1] + c[0] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + c = (10,) + # parfors does not support tuple args for stencil kernels + ex = self.exception_dict(parfor=NumbaValueError) + self.check_exceptions(kernel, a, b, c, + options={'standard_indexing': ['b', 'c']}, + expected_exception=ex) + + def test_basic61(self): + """2 args, standard_indexing on first""" + def kernel(a, b): + return a[0, 1] + b[1, 1] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + self.check_exceptions(kernel, a, b, + options={'standard_indexing': 'a'}, + expected_exception=Exception) + + def test_basic62(self): + """2 args, standard_indexing and cval""" + def kernel(a, b): + return a[0, 1] + b[1, 1] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 10.0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = a[__a + 0, __b + 1] + b[1, 1] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(12.).reshape(3, 4) + b = np.arange(12.).reshape(3, 4) + expected = __kernel(a, b, None) + self.check_against_expected(kernel, expected, a, b, + options={'standard_indexing': 'b', + 'cval': 10.}) + + def test_basic63(self): + """2 args, standard_indexing applied to relative, should fail, + non-const idx""" + def kernel(a, b): + return a[0, b[0, 1]] + a = np.arange(12.).reshape(3, 4) + b = np.arange(12).reshape(3, 4) + ex = self.exception_dict( + stencil=NumbaValueError, + parfor=NumbaValueError, + njit=NumbaValueError) + self.check_exceptions(kernel, a, b, options={'standard_indexing': 'b'}, + expected_exception=ex) + + # stencil, njit, parfors all fail. Does this make sense? + def test_basic64(self): + """1 arg that uses standard_indexing""" + def kernel(a): + return a[0, 0] + a = np.arange(12.).reshape(3, 4) + self.check_exceptions(kernel, a, options={'standard_indexing': 'a'}, + expected_exception=[ValueError, NumbaValueError]) + + def test_basic65(self): + """basic induced neighborhood test""" + def kernel(a): + cumul = 0 + for i in range(-29, 1): + cumul += a[i] + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(29, a.shape[0]): + cumul = 0 + for i in range(-29, 1): + cumul += a[__an + i] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-29, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + # Should this work? a[0] is out of neighborhood? + def test_basic66(self): + """basic const neighborhood test""" + def kernel(a): + cumul = 0 + for i in range(-29, 1): + cumul += a[0] + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(29, a.shape[0]): + cumul = 0 + for i in range(-29, 1): + cumul += a[__an + 0] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-29, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic67(self): + """basic 2d induced neighborhood test""" + def kernel(a): + cumul = 0 + for i in range(-5, 1): + for j in range(-10, 1): + cumul += a[i, j] + return cumul / (10 * 5) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(10, a.shape[1]): + for __an in range(5, a.shape[0]): + cumul = 0 + for i in range(-5, 1): + for j in range(-10, 1): + cumul += a[__an + i, __bn + j] + __b0[__an, __bn] = cumul / 50 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + nh = ((-5, 0), (-10, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic67b(self): + """basic 2d induced 1D neighborhood""" + def kernel(a): + cumul = 0 + for j in range(-10, 1): + cumul += a[0, j] + return cumul / (10 * 5) + a = np.arange(10. * 20.).reshape(10, 20) + self.check_exceptions(kernel, a, options={'neighborhood': ((-10, 0),)}, + expected_exception=[TypingError, ValueError]) + + # Should this work or is it UB? a[i, 0] is out of neighborhood? + def test_basic68(self): + """basic 2d one induced, one cost neighborhood test""" + def kernel(a): + cumul = 0 + for i in range(-5, 1): + for j in range(-10, 1): + cumul += a[i, 0] + return cumul / (10 * 5) + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(10, a.shape[1]): + for __an in range(5, a.shape[0]): + cumul = 0 + for i in range(-5, 1): + for j in range(-10, 1): + cumul += a[__an + i, __bn + 0] + __b0[__an, __bn] = cumul / 50 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + nh = ((-5, 0), (-10, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + # Should this work or is it UB? a[0, 0] is out of neighborhood? + def test_basic69(self): + """basic 2d two cost neighborhood test""" + def kernel(a): + cumul = 0 + for i in range(-5, 1): + for j in range(-10, 1): + cumul += a[0, 0] + return cumul / (10 * 5) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(10, a.shape[1]): + for __an in range(5, a.shape[0]): + cumul = 0 + for i in range(-5, 1): + for j in range(-10, 1): + cumul += a[__an + 0, __bn + 0] + __b0[__an, __bn] = cumul / 50 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + nh = ((-5, 0), (-10, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic70(self): + """neighborhood adding complexity""" + def kernel(a): + cumul = 0 + zz = 12. + for i in range(-5, 1): + t = zz + i + for j in range(-10, 1): + cumul += a[i, j] + t + return cumul / (10 * 5) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(10, a.shape[1]): + for __an in range(5, a.shape[0]): + cumul = 0 + zz = 12.0 + for i in range(-5, 1): + t = zz + i + for j in range(-10, 1): + cumul += a[__an + i, __bn + j] + t + __b0[__an, __bn] = cumul / 50 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + nh = ((-5, 0), (-10, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic71(self): + """neighborhood, type change""" + def kernel(a): + cumul = 0 + for i in range(-29, 1): + k = 0. + if i > -15: + k = 1j + cumul += a[i] + k + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(29, a.shape[0]): + cumul = 0 + for i in range(-29, 1): + k = 0.0 + if i > -15: + k = 1.0j + cumul += a[__an + i] + k + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-29, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic72(self): + """neighborhood, narrower range than specified""" + def kernel(a): + cumul = 0 + for i in range(-19, -3): + cumul += a[i] + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(29, a.shape[0]): + cumul = 0 + for i in range(-19, -3): + cumul += a[__an + i] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-29, 0),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic73(self): + """neighborhood, +ve range""" + def kernel(a): + cumul = 0 + for i in range(5, 11): + cumul += a[i] + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(0, a.shape[0] - 10): + cumul = 0 + for i in range(5, 11): + cumul += a[__an + i] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((5, 10),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic73b(self): + """neighborhood, -ve range""" + def kernel(a): + cumul = 0 + for i in range(-10, -4): + cumul += a[i] + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(10, a.shape[0]): + cumul = 0 + for i in range(-10, -4): + cumul += a[__an + i] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-10, -5),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic74(self): + """neighborhood, -ve->+ve range span""" + def kernel(a): + cumul = 0 + for i in range(-5, 11): + cumul += a[i] + return cumul / 30 + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(5, a.shape[0] - 10): + cumul = 0 + for i in range(-5, 11): + cumul += a[__an + i] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-5, 10),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic75(self): + """neighborhood, -ve->-ve range span""" + def kernel(a): + cumul = 0 + for i in range(-10, -1): + cumul += a[i] + return cumul / 30 + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(10, a.shape[0]): + cumul = 0 + for i in range(-10, -1): + cumul += a[__an + i] + __b0[__an,] = cumul / 30 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(60.) + nh = ((-10, -2),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic76(self): + """neighborhood, mixed range span""" + def kernel(a): + cumul = 0 + zz = 12. + for i in range(-3, 0): + t = zz + i + for j in range(-3, 4): + cumul += a[i, j] + t + return cumul / (10 * 5) + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(3, a.shape[1] - 3): + for __an in range(3, a.shape[0]): + cumul = 0 + zz = 12.0 + for i in range(-3, 0): + t = zz + i + for j in range(-3, 4): + cumul += a[__an + i, __bn + j] + t + __b0[__an, __bn] = cumul / 50 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + nh = ((-3, -1), (-3, 3),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + def test_basic77(self): + """ neighborhood, two args """ + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b[i, j] + return cumul / (9.) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(3, a.shape[1]): + for __an in range(3, a.shape[0]): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += (a[__an + i, __bn + j] + + b[__an + i, __bn + j]) + __b0[__an, __bn] = cumul / 9.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + b = np.arange(10. * 20.).reshape(10, 20) + nh = ((-3, 0), (-3, 0),) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh}) + + def test_basic78(self): + """ neighborhood, two args, -ve range, -ve range """ + def kernel(a, b): + cumul = 0 + for i in range(-6, -2): + for j in range(-7, -1): + cumul += a[i, j] + b[i, j] + return cumul / (9.) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(7, a.shape[1]): + for __an in range(6, a.shape[0]): + cumul = 0 + for i in range(-6, -2): + for j in range(-7, -1): + cumul += (a[__an + i, __bn + j] + + b[__an + i, __bn + j]) + __b0[__an, __bn] = cumul / 9.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(15. * 20.).reshape(15, 20) + b = np.arange(15. * 20.).reshape(15, 20) + nh = ((-6, -3), (-7, -2),) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh}) + + def test_basic78b(self): + """ neighborhood, two args, -ve range, +ve range """ + def kernel(a, b): + cumul = 0 + for i in range(-6, -2): + for j in range(2, 10): + cumul += a[i, j] + b[i, j] + return cumul / (9.) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(0, a.shape[1] - 9): + for __an in range(6, a.shape[0]): + cumul = 0 + for i in range(-6, -2): + for j in range(2, 10): + cumul += (a[__an + i, __bn + j] + + b[__an + i, __bn + j]) + __b0[__an, __bn] = cumul / 9.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(15. * 20.).reshape(15, 20) + b = np.arange(15. * 20.).reshape(15, 20) + nh = ((-6, -3), (2, 9),) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh}) + + def test_basic79(self): + """ neighborhood, two incompatible args """ + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b[i, j] + return cumul / (9.) + a = np.arange(10. * 20.).reshape(10, 20) + b = np.arange(10. * 20.).reshape(10, 10, 2) + ex = self.exception_dict( + stencil=TypingError, + parfor=TypingError, + njit=TypingError) + self.check_exceptions(kernel, a, b, options={'neighborhood': + ((-3, 0), (-3, 0),)}, + expected_exception=ex) + + def test_basic80(self): + """ neighborhood, type change """ + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b + return cumul / (9.) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(3, a.shape[1]): + for __an in range(3, a.shape[0]): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[__an + i, __bn + j] + b + __b0[__an, __bn] = cumul / 9.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + b = 12.j + nh = ((-3, 0), (-3, 0)) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh}) + + def test_basic81(self): + """ neighborhood, dimensionally incompatible arrays """ + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b[i] + return cumul / (9.) + a = np.arange(10. * 20.).reshape(10, 20) + b = a[0].copy() + ex = self.exception_dict( + stencil=TypingError, + parfor=AssertionError, + njit=TypingError) + self.check_exceptions(kernel, a, b, + options={'neighborhood': ((-3, 0), (-3, 0))}, + expected_exception=ex) + + def test_basic82(self): + """ neighborhood, with standard_indexing""" + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b[1, 3] + return cumul / (9.) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(3, a.shape[1]): + for __an in range(3, a.shape[0]): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[__an + i, __bn + j] + b[1, 3] + __b0[__an, __bn] = cumul / 9.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + b = a.copy() + nh = ((-3, 0), (-3, 0)) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh, + 'standard_indexing': 'b'}) + + def test_basic83(self): + """ neighborhood, with standard_indexing and cval""" + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b[1, 3] + return cumul / (9.) + a = np.arange(10. * 20.).reshape(10, 20) + b = a.copy() + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 1.5, dtype=type(__retdtype)) + for __bn in range(3, a.shape[1]): + for __an in range(3, a.shape[0]): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[__an + i, __bn + j] + b[1, 3] + __b0[__an, __bn] = cumul / 9.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + b = a.copy() + nh = ((-3, 0), (-3, 0)) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh, + 'standard_indexing': 'b', + 'cval': 1.5,}) + + def test_basic84(self): + """ kernel calls njit """ + def kernel(a): + return a[0, 0] + addone_njit(a[0, 1]) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + addone_njit.py_func(a[__a + 0, __b + 1])) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic85(self): + """ kernel calls njit(parallel=True)""" + def kernel(a): + return a[0, 0] + addone_pjit(a[0, 1]) + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 1): + for __a in range(0, a.shape[0]): + __b0[__a, __b] = (a[__a + 0, __b + 0] + + addone_pjit.py_func(a[__a + 0, __b + 1])) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + # njit/parfors fail correctly, but the error message isn't very informative + def test_basic86(self): + """ bad kwarg """ + def kernel(a): + return a[0, 0] + + a = np.arange(10. * 20.).reshape(10, 20) + self.check_exceptions(kernel, a, options={'bad': 10}, + expected_exception=[ValueError, TypingError]) + + def test_basic87(self): + """ reserved arg name in use """ + def kernel(__sentinel__): + return __sentinel__[0, 0] + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(__sentinel__, neighborhood): + self.check_stencil_arrays(__sentinel__, neighborhood=neighborhood) + __retdtype = kernel(__sentinel__) + __b0 = np.full(__sentinel__.shape, 0, dtype=type(__retdtype)) + for __b in range(0, __sentinel__.shape[1]): + for __a in range(0, __sentinel__.shape[0]): + __b0[__a, __b] = __sentinel__[__a + 0, __b + 0] + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic88(self): + """ use of reserved word """ + def kernel(a, out): + return out * a[0, 1] + a = np.arange(12.).reshape(3, 4) + ex = self.exception_dict( + stencil=NumbaValueError, + parfor=NumbaValueError, + njit=NumbaValueError) + self.check_exceptions(kernel, a, 1.0, options={}, expected_exception=ex) + + def test_basic89(self): + """ basic multiple return""" + def kernel(a): + if a[0, 1] > 10: + return 10. + elif a[0, 3] < 8: + return a[0, 0] + else: + return 7. + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1] - 3): + for __a in range(0, a.shape[0]): + if a[__a + 0, __b + 1] > 10: + __b0[__a, __b] = 10.0 + elif a[__a + 0, __b + 3] < 8: + __b0[__a, __b] = a[__a + 0, __b + 0] + else: + __b0[__a, __b] = 7.0 + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic90(self): + """ neighborhood, with standard_indexing and cval, multiple returns""" + def kernel(a, b): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[i, j] + b[1, 3] + res = cumul / (9.) + if res > 200.0: + return res + 1.0 + else: + return res + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, b, neighborhood): + self.check_stencil_arrays(a, b, neighborhood=neighborhood) + __retdtype = kernel(a, b) + __b0 = np.full(a.shape, 1.5, dtype=type(__retdtype)) + for __bn in range(3, a.shape[1]): + for __an in range(3, a.shape[0]): + cumul = 0 + for i in range(-3, 1): + for j in range(-3, 1): + cumul += a[__an + i, __bn + j] + b[1, 3] + res = cumul / 9.0 + if res > 200.0: + __b0[__an, __bn] = res + 1.0 + else: + __b0[__an, __bn] = res + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + b = a.copy() + nh = ((-3, 0), (-3, 0)) + expected = __kernel(a, b, nh) + self.check_against_expected(kernel, expected, a, b, + options={'neighborhood': nh, + 'standard_indexing': 'b', + 'cval': 1.5,}) + + def test_basic91(self): + """ Issue #3454, const(int) == const(int) evaluating incorrectly. """ + def kernel(a): + b = 0 + if (2 == 0): + b = 2 + return a[0, 0] + b + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(0, a.shape[1]): + for __a in range(0, a.shape[0]): + b = 0 + if 2 == 0: + b = 2 + __b0[__a, __b] = a[__a + 0, __b + 0] + b + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(10. * 20.).reshape(10, 20) + expected = __kernel(a, None) + self.check_against_expected(kernel, expected, a) + + def test_basic92(self): + """ Issue #3497, bool return type evaluating incorrectly. """ + def kernel(a): + return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^ + a[0, -1] ^ a[0, 0] ^ a[0, 1] ^ + a[1, -1] ^ a[1, 0] ^ a[1, 1]) + + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(1, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + -1, __b + -1] ^ + a[__a + -1, __b + 0] ^ + a[__a + -1, __b + 1] ^ + a[__a + 0, __b + -1] ^ + a[__a + 0, __b + 0] ^ + a[__a + 0, __b + 1] ^ + a[__a + 1, __b + -1] ^ + a[__a + 1, __b + 0] ^ + a[__a + 1, __b + 1]) + return __b0 + # ---------------------------------------------------------------------- + A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_) + expected = __kernel(A, None) + self.check_against_expected(kernel, expected, A) + + def test_basic93(self): + """ Issue #3497, bool return type evaluating incorrectly. """ + def kernel(a): + return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^ + a[0, -1] ^ a[0, 0] ^ a[0, 1] ^ + a[1, -1] ^ a[1, 0] ^ a[1, 1]) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 1, dtype=type(__retdtype)) + for __b in range(1, a.shape[1] - 1): + for __a in range(1, a.shape[0] - 1): + __b0[__a, __b] = (a[__a + -1, __b + -1] ^ + a[__a + -1, __b + 0] ^ + a[__a + -1, __b + 1] ^ + a[__a + 0, __b + -1] ^ + a[__a + 0, __b + 0] ^ + a[__a + 0, __b + 1] ^ + a[__a + 1, __b + -1] ^ + a[__a + 1, __b + 0] ^ + a[__a + 1, __b + 1]) + return __b0 + # ---------------------------------------------------------------------- + A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_) + expected = __kernel(A, None) + self.check_against_expected(kernel, expected, A, options={'cval': True}) + + def test_basic94(self): + """ Issue #3528. Support for slices. """ + def kernel(a): + return np.median(a[-1:2, -1:2]) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __bn in range(1, a.shape[1] - 1): + for __an in range(1, a.shape[0] - 1): + __b0[__an, __bn] = np.median(a[__an + -1:__an + 2, + __bn + -1:__bn + 2]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(20, dtype=np.uint32).reshape(4, 5) + nh = ((-1, 1), (-1, 1),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + @unittest.skip("not yet supported") + def test_basic95(self): + """ Slice, calculate neighborhood. """ + def kernel(a): + return np.median(a[-1:2, -3:4]) + #TODO: add check should this be implemented + + def test_basic96(self): + """ 1D slice. """ + def kernel(a): + return np.median(a[-1:2]) + # ---------------------------------------------------------------------- + # Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, 0, dtype=type(__retdtype)) + for __an in range(1, a.shape[0] - 1): + __b0[__an,] = np.median(a[__an + -1:__an + 2]) + return __b0 + # ---------------------------------------------------------------------- + a = np.arange(20, dtype=np.uint32) + nh = ((-1, 1),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh}) + + @unittest.skip("not yet supported") + def test_basic97(self): + """ 2D slice and index. """ + def kernel(a): + return np.median(a[-1:2, 3]) + #TODO: add check should this be implemented + + def test_basic98(self): + """ Test issue #7286 where the cval is a np attr/string-based numerical + constant""" + for cval in (np.nan, np.inf, -np.inf, float('inf'), -float('inf')): + def kernel(a): + return a[0, 0] + ## ----------------------------------------------------------------- + ## Autogenerated kernel + + def __kernel(a, neighborhood): + self.check_stencil_arrays(a, neighborhood=neighborhood) + __retdtype = kernel(a) + __b0 = np.full(a.shape, cval, dtype=type(__retdtype)) + for __bn in range(1, a.shape[1] - 1): + for __an in range(1, a.shape[0] - 1): + __b0[__an, __bn] = a[__an + 0, __bn + 0] + return __b0 + + ## ----------------------------------------------------------------- + a = np.arange(6.).reshape((2, 3)) + nh = ((-1, 1), (-1, 1),) + expected = __kernel(a, nh) + self.check_against_expected(kernel, expected, a, + options={'neighborhood': nh, + 'cval':cval}) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_storeslice.py b/venv/lib/python3.10/site-packages/numba/tests/test_storeslice.py new file mode 100644 index 0000000000000000000000000000000000000000..34950439b7b91026574e7b25713f23d9202bb5d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_storeslice.py @@ -0,0 +1,68 @@ +import numpy as np + +import unittest +from numba import njit +from numba.core import types, errors +from numba.tests.support import TestCase + + +def setitem_slice(a, start, stop, step, scalar): + a[start:stop:step] = scalar + + +def usecase(obs, nPoints): + center = nPoints // 2 + obs[0:center] = np.arange(center) + obs[center] = 321 + obs[(center + 1):] = np.arange(nPoints - center - 1) + + +class TestStoreSlice(TestCase): + + def test_usecase(self): + n = 10 + obs_got = np.zeros(n) + obs_expected = obs_got.copy() + + cfunc = njit((types.float64[:], types.intp))(usecase) + cfunc(obs_got, n) + usecase(obs_expected, n) + + self.assertPreciseEqual(obs_got, obs_expected) + + def test_array_slice_setitem(self): + n = 10 + argtys = (types.int64[:], types.int64, types.int64, types.int64, + types.int64) + cfunc = njit(argtys)(setitem_slice) + a = np.arange(n, dtype=np.int64) + # tuple is (start, stop, step, scalar) + tests = ((2, 6, 1, 7), + (2, 6, -1, 7), + (-2, len(a), 2, 77), + (-2, 2 * len(a), 2, 77), + (-2, -6, 3, 88), + (-2, -6, -3, 9999), + (-6, -2, 4, 88), + (-6, -2, -4, 88), + (16, 20, 2, 88), + (16, 20, -2, 88), + ) + + for start, stop, step, scalar in tests: + a = np.arange(n, dtype=np.int64) + b = np.arange(n, dtype=np.int64) + cfunc(a, start, stop, step, scalar) + setitem_slice(b, start, stop, step, scalar) + self.assertPreciseEqual(a, b) + + # test if step = 0 + a = np.arange(n, dtype=np.int64) + with self.assertRaises(ValueError) as cm: + cfunc(a, 3, 6, 0, 88) + self.assertEqual(str(cm.exception), "slice step cannot be zero") + + +if __name__ == '__main__': + unittest.main() + diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_struct_ref.py b/venv/lib/python3.10/site-packages/numba/tests/test_struct_ref.py new file mode 100644 index 0000000000000000000000000000000000000000..de52fdcf96218af9f3f607ae6fe8078ede521856 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_struct_ref.py @@ -0,0 +1,440 @@ +""" +Test mutable struct, aka, structref +""" +import warnings + +import numpy as np + +from numba import typed, njit, errors, typeof +from numba.core import types +from numba.experimental import structref +from numba.extending import overload_method, overload_attribute +from numba.tests.support import ( + MemoryLeakMixin, TestCase, temp_directory, override_config, +) + + +@structref.register +class MySimplerStructType(types.StructRef): + """ + Test associated with this type represent the lowest level uses of structref. + """ + pass + + +my_struct_ty = MySimplerStructType( + fields=[("values", types.intp[:]), ("counter", types.intp)] +) + +structref.define_boxing(MySimplerStructType, structref.StructRefProxy) + + +class MyStruct(structref.StructRefProxy): + + def __new__(cls, values, counter): + # Define this method to customize the constructor. + # The default takes `*args`. Customizing allow the use of keyword-arg. + # The impl of the method calls `StructRefProxy.__new__` + return structref.StructRefProxy.__new__(cls, values, counter) + + # The below defines wrappers for attributes and methods manually + + @property + def values(self): + return get_values(self) + + @values.setter + def values(self, val): + return set_values(self, val) + + @property + def counter(self): + return get_counter(self) + + def testme(self, arg): + return self.values * arg + self.counter + + @property + def prop(self): + return self.values, self.counter + + def __hash__(self): + return compute_fields(self) + + +@structref.register +class MyStructType(types.StructRef): + """Test associated with this type represent the higher-level uses of + structef. + """ + pass + + +# Call to define_proxy is needed to register the use of `MyStruct` as a +# PyObject proxy for creating a Numba-allocated structref. +# The `MyStruct` class can then be used in both jit-code and interpreted-code. +structref.define_proxy( + MyStruct, + MyStructType, + ['values', 'counter'], +) + + +@njit +def my_struct(values, counter): + st = structref.new(my_struct_ty) + my_struct_init(st, values, counter) + return st + + +@njit +def my_struct_init(self, values, counter): + self.values = values + self.counter = counter + + +@njit +def ctor_by_intrinsic(vs, ctr): + st = my_struct(vs, counter=ctr) + st.values += st.values + st.counter *= ctr + return st + + +@njit +def ctor_by_class(vs, ctr): + return MyStruct(values=vs, counter=ctr) + + +@njit +def get_values(st): + return st.values + + +@njit +def set_values(st, val): + st.values = val + + +@njit +def get_counter(st): + return st.counter + + +@njit +def compute_fields(st): + return st.values + st.counter + + +class TestStructRefBasic(MemoryLeakMixin, TestCase): + def test_structref_type(self): + sr = types.StructRef([('a', types.int64)]) + self.assertEqual(sr.field_dict['a'], types.int64) + sr = types.StructRef([('a', types.int64), ('b', types.float64)]) + self.assertEqual(sr.field_dict['a'], types.int64) + self.assertEqual(sr.field_dict['b'], types.float64) + # bad case + with self.assertRaisesRegex(ValueError, + "expecting a str for field name"): + types.StructRef([(1, types.int64)]) + with self.assertRaisesRegex(ValueError, + "expecting a Numba Type for field type"): + types.StructRef([('a', 123)]) + + def test_invalid_uses(self): + with self.assertRaisesRegex(ValueError, "cannot register"): + structref.register(types.StructRef) + with self.assertRaisesRegex(ValueError, "cannot register"): + structref.define_boxing(types.StructRef, MyStruct) + + def test_MySimplerStructType(self): + vs = np.arange(10, dtype=np.intp) + ctr = 13 + + first_expected = vs + vs + first_got = ctor_by_intrinsic(vs, ctr) + # the returned instance is a structref.StructRefProxy + # but not a MyStruct + self.assertNotIsInstance(first_got, MyStruct) + self.assertPreciseEqual(first_expected, get_values(first_got)) + + second_expected = first_expected + (ctr * ctr) + second_got = compute_fields(first_got) + self.assertPreciseEqual(second_expected, second_got) + + def test_MySimplerStructType_wrapper_has_no_attrs(self): + vs = np.arange(10, dtype=np.intp) + ctr = 13 + wrapper = ctor_by_intrinsic(vs, ctr) + self.assertIsInstance(wrapper, structref.StructRefProxy) + with self.assertRaisesRegex(AttributeError, 'values'): + wrapper.values + with self.assertRaisesRegex(AttributeError, 'counter'): + wrapper.counter + + def test_MyStructType(self): + vs = np.arange(10, dtype=np.float64) + ctr = 11 + + first_expected_arr = vs.copy() + first_got = ctor_by_class(vs, ctr) + self.assertIsInstance(first_got, MyStruct) + self.assertPreciseEqual(first_expected_arr, first_got.values) + + second_expected = first_expected_arr + ctr + second_got = compute_fields(first_got) + self.assertPreciseEqual(second_expected, second_got) + self.assertEqual(first_got.counter, ctr) + + def test_MyStructType_mixed_types(self): + # structref constructor is generic + @njit + def mixed_type(x, y, m, n): + return MyStruct(x, y), MyStruct(m, n) + + a, b = mixed_type(1, 2.3, 3.4j, (4,)) + self.assertEqual(a.values, 1) + self.assertEqual(a.counter, 2.3) + self.assertEqual(b.values, 3.4j) + self.assertEqual(b.counter, (4,)) + + def test_MyStructType_in_dict(self): + td = typed.Dict() + td['a'] = MyStruct(1, 2.3) + self.assertEqual(td['a'].values, 1) + self.assertEqual(td['a'].counter, 2.3) + # overwrite + td['a'] = MyStruct(2, 3.3) + self.assertEqual(td['a'].values, 2) + self.assertEqual(td['a'].counter, 3.3) + # mutate + td['a'].values += 10 + self.assertEqual(td['a'].values, 12) # changed + self.assertEqual(td['a'].counter, 3.3) # unchanged + # insert + td['b'] = MyStruct(4, 5.6) + + def test_MyStructType_in_dict_mixed_type_error(self): + self.disable_leak_check() + + td = typed.Dict() + td['a'] = MyStruct(1, 2.3) + self.assertEqual(td['a'].values, 1) + self.assertEqual(td['a'].counter, 2.3) + + # ERROR: store different types + with self.assertRaisesRegex(errors.TypingError, + r"Cannot cast numba.MyStructType"): + # because first field is not a float; + # the second field is now an integer. + td['b'] = MyStruct(2.3, 1) + + def test_MyStructType_hash_no_typeof_recursion(self): + # Tests that __hash__ is not called prematurely in typeof + # causing infinite recursion (see #8241). + st = MyStruct(1, 2) + typeof(st) + + self.assertEqual(hash(st), 3) + + +@overload_method(MyStructType, "testme") +def _ol_mystructtype_testme(self, arg): + def impl(self, arg): + return self.values * arg + self.counter + return impl + + +@overload_attribute(MyStructType, "prop") +def _ol_mystructtype_prop(self): + def get(self): + return self.values, self.counter + return get + + +class TestStructRefExtending(MemoryLeakMixin, TestCase): + def test_overload_method(self): + @njit + def check(x): + vs = np.arange(10, dtype=np.float64) + ctr = 11 + obj = MyStruct(vs, ctr) + return obj.testme(x) + + x = 3 + got = check(x) + expect = check.py_func(x) + self.assertPreciseEqual(got, expect) + + def test_overload_attribute(self): + @njit + def check(): + vs = np.arange(10, dtype=np.float64) + ctr = 11 + obj = MyStruct(vs, ctr) + return obj.prop + + got = check() + expect = check.py_func() + self.assertPreciseEqual(got, expect) + + +def caching_test_make(x, y): + struct = MyStruct(values=x, counter=y) + return struct + + +def caching_test_use(struct, z): + return struct.testme(z) + + +class TestStructRefCaching(MemoryLeakMixin, TestCase): + def setUp(self): + self._cache_dir = temp_directory(TestStructRefCaching.__name__) + self._cache_override = override_config('CACHE_DIR', self._cache_dir) + self._cache_override.__enter__() + warnings.simplefilter("error") + warnings.filterwarnings(action="ignore", module="typeguard") + + def tearDown(self): + self._cache_override.__exit__(None, None, None) + warnings.resetwarnings() + + def test_structref_caching(self): + def assert_cached(stats): + self.assertEqual(len(stats.cache_hits), 1) + self.assertEqual(len(stats.cache_misses), 0) + + def assert_not_cached(stats): + self.assertEqual(len(stats.cache_hits), 0) + self.assertEqual(len(stats.cache_misses), 1) + + def check(cached): + check_make = njit(cache=True)(caching_test_make) + check_use = njit(cache=True)(caching_test_use) + vs = np.random.random(3) + ctr = 17 + factor = 3 + st = check_make(vs, ctr) + got = check_use(st, factor) + expect = vs * factor + ctr + self.assertPreciseEqual(got, expect) + + if cached: + assert_cached(check_make.stats) + assert_cached(check_use.stats) + else: + assert_not_cached(check_make.stats) + assert_not_cached(check_use.stats) + + check(cached=False) + check(cached=True) + + +@structref.register +class PolygonStructType(types.StructRef): + + def preprocess_fields(self, fields): + # temp name to allow Optional instantiation + self.name = f"numba.PolygonStructType#{id(self)}" + fields = tuple([ + ('value', types.Optional(types.int64)), + ('parent', types.Optional(self)), + ]) + + return fields + + +polygon_struct_type = PolygonStructType(fields=( + ('value', types.Any), + ('parent', types.Any) +)) + + +class PolygonStruct(structref.StructRefProxy): + def __new__(cls, value, parent): + return structref.StructRefProxy.__new__(cls, value, parent) + + @property + def value(self): + return PolygonStruct_get_value(self) + + @property + def parent(self): + return PolygonStruct_get_parent(self) + + +@njit +def PolygonStruct_get_value(self): + return self.value + + +@njit +def PolygonStruct_get_parent(self): + return self.parent + + +structref.define_proxy( + PolygonStruct, + PolygonStructType, + ["value", "parent"] +) + + +@overload_method(PolygonStructType, "flip") +def _ol_polygon_struct_flip(self): + def impl(self): + if self.value is not None: + self.value = -self.value + return impl + + +@overload_attribute(PolygonStructType, "prop") +def _ol_polygon_struct_prop(self): + def get(self): + return self.value, self.parent + return get + + +class TestStructRefForwardTyping(MemoryLeakMixin, TestCase): + def test_same_type_assignment(self): + @njit + def check(x): + poly = PolygonStruct(None, None) + p_poly = PolygonStruct(None, None) + poly.value = x + poly.parent = p_poly + p_poly.value = x + return poly.parent.value + + x = 11 + got = check(x) + expect = x + self.assertPreciseEqual(got, expect) + + def test_overload_method(self): + @njit + def check(x): + poly = PolygonStruct(None, None) + p_poly = PolygonStruct(None, None) + poly.value = x + poly.parent = p_poly + p_poly.value = x + poly.flip() + poly.parent.flip() + return poly.parent.value + + x = 3 + got = check(x) + expect = -x + self.assertPreciseEqual(got, expect) + + def test_overload_attribute(self): + @njit + def check(): + obj = PolygonStruct(5, None) + return obj.prop[0] + + got = check() + expect = 5 + self.assertPreciseEqual(got, expect) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_support.py b/venv/lib/python3.10/site-packages/numba/tests/test_support.py new file mode 100644 index 0000000000000000000000000000000000000000..e35eda1ff27ff5a97a5db447a36217cbdbcc47a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_support.py @@ -0,0 +1,346 @@ +import itertools + +import numpy as np + +from numba import jit +from numba.core import utils +from numba.tests.support import TestCase, forbid_codegen +from .enum_usecases import * +import unittest + +DBL_EPSILON = 2**-52 +FLT_EPSILON = 2**-23 + +INF = float('inf') +NAN = float('nan') + + +class TestAssertPreciseEqual(TestCase): + """ + Tests for TestCase.assertPreciseEqual(). + """ + + int_types = [int] + np_float_types = [np.float32, np.float64] + float_types = [float] + np_float_types + np_complex_types = [np.complex64, np.complex128] + complex_types = [complex] + np_complex_types + bool_types = [bool, np.bool_] + + def eq(self, left, right, **kwargs): + def assert_succeed(left, right): + self.assertPreciseEqual(left, right, **kwargs) + self.assertPreciseEqual(right, left, **kwargs) + assert_succeed(left, right) + assert_succeed((left, left), (right, right)) + assert_succeed([left, left], [right, right]) + + def ne(self, left, right, **kwargs): + def assert_fail(left, right): + try: + self.assertPreciseEqual(left, right, **kwargs) + except AssertionError: + pass + else: + self.fail("%s and %s unexpectedly considered equal" % (left, right)) + assert_fail(left, right) + assert_fail(right, left) + assert_fail((left, left), (right, right)) + assert_fail((right, right), (left, left)) + assert_fail([left, left], [right, right]) + assert_fail([right, right], [left, left]) + + def test_types(self): + # assertPreciseEqual() should test for type compatibility + # int-like, float-like, complex-like are not compatible + for i, f, c in itertools.product(self.int_types, self.float_types, + self.complex_types): + self.ne(i(1), f(1)) + self.ne(f(1), c(1)) + self.ne(i(1), c(1)) + # int and long are compatible between each other + for u, v in itertools.product(self.int_types, self.int_types): + self.eq(u(1), v(1)) + # int and bool are not compatible between each other + for u, v in itertools.product(self.int_types, self.bool_types): + self.ne(u(1), v(1)) + # NumPy float types are not compatible between each other + for u, v in itertools.product(self.np_float_types, self.np_float_types): + if u is v: + self.eq(u(1), v(1)) + else: + self.ne(u(1), v(1)) + # NumPy complex types are not compatible between each other + for u, v in itertools.product(self.np_complex_types, self.np_complex_types): + if u is v: + self.eq(u(1), v(1)) + else: + self.ne(u(1), v(1)) + + def test_int_values(self): + for tp in self.int_types: + for prec in ['exact', 'single', 'double']: + self.eq(tp(0), tp(0), prec=prec) + self.ne(tp(0), tp(1), prec=prec) + self.ne(tp(-1), tp(1), prec=prec) + self.ne(tp(2**80), tp(1+2**80), prec=prec) + + def test_bool_values(self): + for tpa, tpb in itertools.product(self.bool_types, self.bool_types): + self.eq(tpa(True), tpb(True)) + self.eq(tpa(False), tpb(False)) + self.ne(tpa(True), tpb(False)) + + def test_abs_tol_parse(self): + # check invalid values in abs_tol kwarg raises + with self.assertRaises(ValueError): + self.eq(np.float64(1e-17), np.float64(1e-17), abs_tol="invalid") + with self.assertRaises(ValueError): + self.eq(np.float64(1), np.float64(2), abs_tol=int(7)) + + def test_float_values(self): + for tp in self.float_types: + for prec in ['exact', 'single', 'double']: + self.eq(tp(1.5), tp(1.5), prec=prec) + # Signed zeros + self.eq(tp(0.0), tp(0.0), prec=prec) + self.eq(tp(-0.0), tp(-0.0), prec=prec) + self.ne(tp(0.0), tp(-0.0), prec=prec) + self.eq(tp(0.0), tp(-0.0), prec=prec, ignore_sign_on_zero=True) + # Infinities + self.eq(tp(INF), tp(INF), prec=prec) + self.ne(tp(INF), tp(1e38), prec=prec) + self.eq(tp(-INF), tp(-INF), prec=prec) + self.ne(tp(INF), tp(-INF), prec=prec) + # NaNs + self.eq(tp(NAN), tp(NAN), prec=prec) + self.ne(tp(NAN), tp(0), prec=prec) + self.ne(tp(NAN), tp(INF), prec=prec) + self.ne(tp(NAN), tp(-INF), prec=prec) + + def test_float64_values(self): + for tp in [float, np.float64]: + self.ne(tp(1.0 + DBL_EPSILON), tp(1.0)) + + def test_float32_values(self): + tp = np.float32 + self.ne(tp(1.0 + FLT_EPSILON), tp(1.0)) + + def test_float64_values_inexact(self): + for tp in [float, np.float64]: + for scale in [1.0, -2**3, 2**-4, -2**-20]: + a = scale * 1.0 + b = scale * (1.0 + DBL_EPSILON) + c = scale * (1.0 + DBL_EPSILON * 2) + d = scale * (1.0 + DBL_EPSILON * 4) + self.ne(tp(a), tp(b)) + self.ne(tp(a), tp(b), prec='exact') + self.eq(tp(a), tp(b), prec='double') + self.eq(tp(a), tp(b), prec='double', ulps=1) + self.ne(tp(a), tp(c), prec='double') + self.eq(tp(a), tp(c), prec='double', ulps=2) + self.ne(tp(a), tp(d), prec='double', ulps=2) + self.eq(tp(a), tp(c), prec='double', ulps=3) + self.eq(tp(a), tp(d), prec='double', ulps=3) + # test absolute tolerance based on eps + self.eq(tp(1e-16), tp(3e-16), prec='double', abs_tol="eps") + self.ne(tp(1e-16), tp(4e-16), prec='double', abs_tol="eps") + # test absolute tolerance based on value + self.eq(tp(1e-17), tp(1e-18), prec='double', abs_tol=1e-17) + self.ne(tp(1e-17), tp(3e-17), prec='double', abs_tol=1e-17) + + def test_float32_values_inexact(self): + tp = np.float32 + for scale in [1.0, -2**3, 2**-4, -2**-20]: + # About the choice of 0.9: there seem to be issues when + # converting + a = scale * 1.0 + b = scale * (1.0 + FLT_EPSILON) + c = scale * (1.0 + FLT_EPSILON * 2) + d = scale * (1.0 + FLT_EPSILON * 4) + self.ne(tp(a), tp(b)) + self.ne(tp(a), tp(b), prec='exact') + self.ne(tp(a), tp(b), prec='double') + self.eq(tp(a), tp(b), prec='single') + self.ne(tp(a), tp(c), prec='single') + self.eq(tp(a), tp(c), prec='single', ulps=2) + self.ne(tp(a), tp(d), prec='single', ulps=2) + self.eq(tp(a), tp(c), prec='single', ulps=3) + self.eq(tp(a), tp(d), prec='single', ulps=3) + # test absolute tolerance based on eps + self.eq(tp(1e-7), tp(2e-7), prec='single', abs_tol="eps") + self.ne(tp(1e-7), tp(3e-7), prec='single', abs_tol="eps") + # test absolute tolerance based on value + self.eq(tp(1e-7), tp(1e-8), prec='single', abs_tol=1e-7) + self.ne(tp(1e-7), tp(3e-7), prec='single', abs_tol=1e-7) + + def test_complex_values(self): + # Complex literals with signed zeros are confusing, better use + # the explicit constructor. + c_pp, c_pn, c_np, c_nn = [complex(0.0, 0.0), complex(0.0, -0.0), + complex(-0.0, 0.0), complex(-0.0, -0.0)] + for tp in self.complex_types: + for prec in ['exact', 'single', 'double']: + self.eq(tp(1 + 2j), tp(1 + 2j), prec=prec) + self.ne(tp(1 + 1j), tp(1 + 2j), prec=prec) + self.ne(tp(2 + 2j), tp(1 + 2j), prec=prec) + # Signed zeros + self.eq(tp(c_pp), tp(c_pp), prec=prec) + self.eq(tp(c_np), tp(c_np), prec=prec) + self.eq(tp(c_nn), tp(c_nn), prec=prec) + self.ne(tp(c_pp), tp(c_pn), prec=prec) + self.ne(tp(c_pn), tp(c_nn), prec=prec) + # Infinities + self.eq(tp(complex(INF, INF)), tp(complex(INF, INF)), prec=prec) + self.eq(tp(complex(INF, -INF)), tp(complex(INF, -INF)), prec=prec) + self.eq(tp(complex(-INF, -INF)), tp(complex(-INF, -INF)), prec=prec) + self.ne(tp(complex(INF, INF)), tp(complex(INF, -INF)), prec=prec) + self.ne(tp(complex(INF, INF)), tp(complex(-INF, INF)), prec=prec) + self.eq(tp(complex(INF, 0)), tp(complex(INF, 0)), prec=prec) + # NaNs + self.eq(tp(complex(NAN, 0)), tp(complex(NAN, 0)), prec=prec) + self.eq(tp(complex(0, NAN)), tp(complex(0, NAN)), prec=prec) + self.eq(tp(complex(NAN, NAN)), tp(complex(NAN, NAN)), prec=prec) + self.eq(tp(complex(INF, NAN)), tp(complex(INF, NAN)), prec=prec) + self.eq(tp(complex(NAN, -INF)), tp(complex(NAN, -INF)), prec=prec) + # FIXME + #self.ne(tp(complex(NAN, INF)), tp(complex(NAN, -INF))) + #self.ne(tp(complex(NAN, 0)), tp(complex(NAN, 1))) + #self.ne(tp(complex(INF, NAN)), tp(complex(-INF, NAN))) + #self.ne(tp(complex(0, NAN)), tp(complex(1, NAN))) + #self.ne(tp(complex(NAN, 0)), tp(complex(0, NAN))) + # XXX should work with other precisions as well? + self.ne(tp(complex(INF, 0)), tp(complex(INF, 1)), prec='exact') + + def test_complex128_values_inexact(self): + for tp in [complex, np.complex128]: + for scale in [1.0, -2**3, 2**-4, -2**-20]: + a = scale * 1.0 + b = scale * (1.0 + DBL_EPSILON) + c = scale * (1.0 + DBL_EPSILON * 2) + aa = tp(complex(a, a)) + ab = tp(complex(a, b)) + bb = tp(complex(b, b)) + self.ne(tp(aa), tp(ab)) + self.eq(tp(aa), tp(ab), prec='double') + self.eq(tp(ab), tp(bb), prec='double') + self.eq(tp(aa), tp(bb), prec='double') + ac = tp(complex(a, c)) + cc = tp(complex(c, c)) + self.ne(tp(aa), tp(ac), prec='double') + self.ne(tp(ac), tp(cc), prec='double') + self.eq(tp(aa), tp(ac), prec='double', ulps=2) + self.eq(tp(ac), tp(cc), prec='double', ulps=2) + self.eq(tp(aa), tp(cc), prec='double', ulps=2) + self.eq(tp(aa), tp(cc), prec='single') + + def test_complex64_values_inexact(self): + tp = np.complex64 + for scale in [1.0, -2**3, 2**-4, -2**-20]: + a = scale * 1.0 + b = scale * (1.0 + FLT_EPSILON) + c = scale * (1.0 + FLT_EPSILON * 2) + aa = tp(complex(a, a)) + ab = tp(complex(a, b)) + bb = tp(complex(b, b)) + self.ne(tp(aa), tp(ab)) + self.ne(tp(aa), tp(ab), prec='double') + self.eq(tp(aa), tp(ab), prec='single') + self.eq(tp(ab), tp(bb), prec='single') + self.eq(tp(aa), tp(bb), prec='single') + ac = tp(complex(a, c)) + cc = tp(complex(c, c)) + self.ne(tp(aa), tp(ac), prec='single') + self.ne(tp(ac), tp(cc), prec='single') + self.eq(tp(aa), tp(ac), prec='single', ulps=2) + self.eq(tp(ac), tp(cc), prec='single', ulps=2) + self.eq(tp(aa), tp(cc), prec='single', ulps=2) + + def test_enums(self): + values = [Color.red, Color.green, Color.blue, Shake.mint, + Shape.circle, Shape.square, Planet.EARTH, Planet.MERCURY] + for val in values: + self.eq(val, val) + self.ne(val, val.value) + for a, b in itertools.combinations(values, 2): + self.ne(a, b) + + def test_arrays(self): + a = np.arange(1, 7, dtype=np.int16).reshape((2, 3)) + b = a.copy() + self.eq(a, b) + # Different values + self.ne(a, b + 1) + self.ne(a, b[:-1]) + self.ne(a, b.T) + # Different dtypes + self.ne(a, b.astype(np.int32)) + # Different layout + self.ne(a, b.T.copy().T) + # Different ndim + self.ne(a, b.flatten()) + # Different writeability + b.flags.writeable = False + self.ne(a, b) + # Precision + a = np.arange(1, 3, dtype=np.float64) + b = a * (1.0 + DBL_EPSILON) + c = a * (1.0 + DBL_EPSILON * 2) + self.ne(a, b) + self.eq(a, b, prec='double') + self.ne(a, c, prec='double') + + def test_npdatetime(self): + a = np.datetime64('1900', 'Y') + b = np.datetime64('1900', 'Y') + c = np.datetime64('1900-01-01', 'D') + d = np.datetime64('1901', 'Y') + self.eq(a, b) + # Different unit + self.ne(a, c) + # Different value + self.ne(a, d) + + def test_nptimedelta(self): + a = np.timedelta64(1, 'h') + b = np.timedelta64(1, 'h') + c = np.timedelta64(60, 'm') + d = np.timedelta64(2, 'h') + self.eq(a, b) + # Different unit + self.ne(a, c) + # Different value + self.ne(a, d) + + +class TestMisc(TestCase): + + def test_assertRefCount(self): + # Use floats to avoid integer interning + x = 55. + y = 66. + l = [] + with self.assertRefCount(x, y): + pass + with self.assertRaises(AssertionError) as cm: + # y gains a reference + with self.assertRefCount(x, y): + l.append(y) + self.assertIn("66", str(cm.exception)) + + def test_forbid_codegen(self): + """ + Test that forbid_codegen() prevents code generation using the @jit + decorator. + """ + def f(): + return 1 + with forbid_codegen(): + with self.assertRaises(RuntimeError) as raises: + cfunc = jit(nopython=True)(f) + cfunc() + self.assertIn("codegen forbidden by test case", str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_svml.py b/venv/lib/python3.10/site-packages/numba/tests/test_svml.py new file mode 100644 index 0000000000000000000000000000000000000000..a047acea529e1cb72b488df3a7117b6f955893ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_svml.py @@ -0,0 +1,411 @@ +import math +import numpy as np +import numbers +import re +import traceback +import multiprocessing as mp + +import numba +from numba import njit, prange +from numba.core import config +from numba.tests.support import TestCase, tag, override_env_config +import unittest + +needs_svml = unittest.skipUnless(config.USING_SVML, + "SVML tests need SVML to be present") + +# a map of float64 vector lengths with corresponding CPU architecture +vlen2cpu = {2: 'nehalem', 4: 'haswell', 8: 'skylake-avx512'} +# force LLVM to use AVX512 registers for vectorization +# https://reviews.llvm.org/D67259 +vlen2cpu_features = {2: '', 4: '', 8: '-prefer-256-bit'} + +# env vars to force CPU as skylake-avx512. +# force LLVM to use AVX512 registers for vectorization +# https://reviews.llvm.org/D67259 +_skylake_axv512_envvars = {'NUMBA_CPU_NAME': 'skylake-avx512', + 'NUMBA_CPU_FEATURES': '-prefer-256-bit'} + +# K: SVML functions, V: python functions which are expected to be SIMD-vectorized +# using SVML, explicit references to Python functions here are mostly for sake of +# instant import checks. +# TODO: [] and comments below mean unused/untested SVML function, it's to be +# either enabled or to be replaced with the explanation why the function +# cannot be used in Numba +# TODO: this test does not support functions with more than 1 arguments yet +# The test logic should be modified if there is an SVML function being used under +# different name or module from Python +svml_funcs = { + "sin": [np.sin, math.sin], + "cos": [np.cos, math.cos], + "pow": [], # pow, math.pow], + "exp": [np.exp, math.exp], + "log": [np.log, math.log], + "acos": [math.acos], + "acosh": [math.acosh], + "asin": [math.asin], + "asinh": [math.asinh], + "atan2": [], # math.atan2], + "atan": [math.atan], + "atanh": [math.atanh], + "cbrt": [], # np.cbrt], + "cdfnorm": [], + "cdfnorminv": [], + "ceil": [], # np.ceil, math.ceil], + "cosd": [], + "cosh": [np.cosh, math.cosh], + "erf": [math.erf], # np.erf is available in Intel Distribution + "erfc": [math.erfc], + "erfcinv": [], + "erfinv": [], + "exp10": [], + "exp2": [], # np.exp2], + "expm1": [np.expm1, math.expm1], + "floor": [], # np.floor, math.floor], + "fmod": [], # np.fmod, math.fmod], + "hypot": [], # np.hypot, math.hypot], + "invsqrt": [], # available in Intel Distribution + "log10": [np.log10, math.log10], + "log1p": [np.log1p, math.log1p], + "log2": [], # np.log2], + "logb": [], + "nearbyint": [], + "rint": [], # np.rint], + "round": [], # round], + "sind": [], + "sinh": [np.sinh, math.sinh], + "tan": [np.tan, math.tan], + "tanh": [np.tanh, math.tanh], + "trunc": [], # np.trunc, math.trunc], +} +# TODO: these functions are not vectorizable with complex types +complex_funcs_exclude = ["tan", "log10", "expm1", "log1p", "tanh", "log"] + +# remove untested entries +svml_funcs = {k: v for k, v in svml_funcs.items() if len(v) > 0} +# lists for functions which belong to numpy and math modules correpondently +numpy_funcs = [f for f, v in svml_funcs.items() if "= (3, 12), "needs Python 3.12+") +class TestMonitoring(TestCase): + # Tests the interaction of the Numba dispatcher with `sys.monitoring`. + # + # Note that it looks like a lot of these try..finally type patterns could + # be written using a context manager, this is true, but it is not written + # like that deliberately as a context manager adds implementation details + # onto the stack which makes it harder to debug tests. + + def setUp(self): + # First... check if there's other monitoring stuff registered (e.g. test + # is running under cProfile or coverage), skip if so. + monitor_kinds = [] + for i in range(6): # there are 5 tool IDs + if sys.monitoring.get_tool(i) is not None: + monitor_kinds.append(TOOL2MONITORTYPE[i]) + + if monitor_kinds: + msg = ("Cannot run monitoring tests when other monitors are " + "active, found monitor(s) of type: " + f"{', '.join(monitor_kinds)}") + self.skipTest(msg) + + # set up some standard functions and answers for use throughout + self.foo, self.call_foo = generate_usecase() + self.arg = 10 + self.foo_result = self.arg + 5 + 1 + self.call_foo_result = 2 * self.foo_result + # pretend to be a profiler in the majority of these unit tests + self.tool_id = sys.monitoring.PROFILER_ID + + def check_py_start_calls(self, allcalls): + # Checks that PY_START calls were correctly captured for a + # `self.call_foo(self.arg)` call. + mockcalls = allcalls[PY_START] + self.assertEqual(mockcalls.call_count, 2) + # Find the resume op, this is where the code for `call_foo` "starts" + inst = [x for x in dis.get_instructions(self.call_foo) + if x.opname == "RESUME"] + offset = inst[0].offset + # Numba always reports the start location as offset 0. + calls = (call(self.call_foo.__code__, offset), + call(self.foo.__code__, 0)) + mockcalls.assert_has_calls(calls) + + def check_py_return_calls(self, allcalls): + # Checks that PY_RETURN calls were correctly captured for a + # `self.call_foo(self.arg)` call. + mockcalls = allcalls[PY_RETURN] + self.assertEqual(mockcalls.call_count, 2) + # These are in the order the returns were encountered. Return from `foo` + # occurred first, followed by return from `call_foo`. + # NOTE: it is a known issue that Numba reports the PY_RETURN event as + # occurring at offset 0. At present there's no information about the + # location that the return occurred propagating from the machine code + # back to the dispatcher (where the monitoring events are handled). + offset = [x for x in dis.get_instructions(self.call_foo)][-1].offset + calls = [call(self.foo.__code__, 0, self.foo_result), + call(self.call_foo.__code__, offset, self.call_foo_result)] + mockcalls.assert_has_calls(calls) + + def run_with_events(self, function, args, events, tool_id=None): + # Runs function with args with monitoring set for events on `tool_id` + # (if present, else just uses the default of "PROFILER_ID") returns a + # dictionary event->callback. + try: + if tool_id is None: + _tool_id = self.tool_id + else: + _tool_id = tool_id + sys.monitoring.use_tool_id(_tool_id, "custom_monitor") + callbacks = {} + event_bitmask = 0 + for event in events: + callback = Mock() + sys.monitoring.register_callback(_tool_id, event, callback) + callbacks[event] = callback + event_bitmask |= event + # only start monitoring once callbacks are registered + sys.monitoring.set_events(_tool_id, event_bitmask) + function(*args) + finally: + # clean up state + for event in events: + sys.monitoring.register_callback(_tool_id, event, None) + sys.monitoring.set_events(_tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(_tool_id) + return callbacks + + def test_start_event(self): + # test event PY_START + cb = self.run_with_events(self.call_foo, (self.arg,), (PY_START,)) + # Check... + self.assertEqual(len(cb), 1) + self.check_py_start_calls(cb) + + def test_return_event(self): + # test event PY_RETURN + cb = self.run_with_events(self.call_foo, (self.arg,), (PY_RETURN,)) + # Check... + self.assertEqual(len(cb), 1) + self.check_py_return_calls(cb) + + def test_call_event_chain(self): + # test event PY_START and PY_RETURN monitored at the same time + cb = self.run_with_events(self.call_foo, (self.arg,), + (PY_START, PY_RETURN)) + # Check... + self.assertEqual(len(cb), 2) + self.check_py_return_calls(cb) + self.check_py_start_calls(cb) + + # -------------------------------------------------------------------------- + # NOTE: About the next two tests... + # Numba doesn't support "local event" level monitoring, it's implemented + # in CPython via adjusting the code object bytecode to use + # "instrumented" opcodes. When the interpreter encounters an + # instrumented opcode it triggers the event handling pathways. As Numba + # doesn't interpret the bytecode instruction-at-a-time there's not + # really any way to support this. Two things to check... + # 1. The an instrumented code object doesn't trigger events in + # the dispatcher. + # 2. That Numba can compile instrumented functions (it should be able + # to without any problem as the instrumented bytecode should not + # leak into `.co_code`.). + + def test_instrumented_code_does_not_trigger_numba_events(self): + # 1. from above. + @jit('int64(int64)',) + def foo(x): + return x + 3 + + try: + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + callbacks = {} + event_bitmask = 0 + events = (PY_START, PY_RETURN) + for event in events: + callback = Mock() + sys.monitoring.register_callback(tool_id, event, callback) + callbacks[event] = callback + event_bitmask |= event + + sys.monitoring.set_local_events(tool_id, foo.__code__, + event_bitmask) + result = foo(self.arg) + finally: + for event in events: + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.set_local_events(tool_id, foo.__code__, 0) + sys.monitoring.free_tool_id(tool_id) + + # check + self.assertEqual(result, foo.py_func(self.arg)) + self.assertEqual(len(callbacks), 2) + callbacks[PY_START].assert_not_called() + callbacks[PY_RETURN].assert_not_called() + + def test_instrumented_code_can_be_compiled(self): + # 2. from above. + + def foo(x): + return x + 1 + + try: + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.set_local_events(tool_id, foo.__code__, PY_START) + sys.monitoring.register_callback(tool_id, PY_START, Mock()) + # test compile + result = jit(foo)(self.arg) + self.assertEqual(result, foo(self.arg)) + finally: + sys.monitoring.register_callback(tool_id, PY_START, None) + sys.monitoring.set_local_events(tool_id, foo.__code__, 0) + sys.monitoring.free_tool_id(tool_id) + + def test_unhandled_events_are_ignored(self): + # Check an unhandled event e.g. PY_YIELD isn't reported. + def generate(dec): + @dec('void()') + def producer(): + yield 10 + + @dec('int64()') + def consumer(): + p = producer() + return next(p) + + return consumer + + event = sys.monitoring.events.PY_YIELD + # check that pure python reports + wrapper = lambda sig: lambda fn: fn + py_consumer = generate(wrapper) + py_cb = self.run_with_events(py_consumer, (), (event,)) + py_cb[event].assert_called_once() + # check the numba does not report + nb_consumer = generate(jit) + nb_cb = self.run_with_events(nb_consumer, (), (event,)) + nb_cb[event].assert_not_called() + + def test_event_with_no_callback_runs(self): + # This checks the situation where an event is being monitored but + # there's no callback associated with the event. In the dispatcher C + # code the loop over tools will be entered, but nothing will get called + # as the "instrument" is missing (NULL). + try: + event = PY_START + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.set_events(tool_id, event) + # NO CALLBACK IS REGISTERED! + active_events = sys.monitoring.get_events(tool_id) + self.assertEqual(active_events, event) + result = self.call_foo(self.arg) + active_events = sys.monitoring.get_events(tool_id) + self.assertEqual(active_events, event) + self.assertEqual(result, self.call_foo_result) + finally: + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(tool_id) + + def test_disable_from_callback(self): + # Event callbacks can disable a _local_ event at a specific location to + # prevent it triggering in the future by returning + # `sys.monitoring.DISABLE`. As this only applies to local events, doing + # this should have absolutely no impact for the global events that Numba + # supports. + + callback = Mock(return_value=sys.monitoring.DISABLE) + + try: + event = PY_START + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.set_events(tool_id, event) + sys.monitoring.register_callback(tool_id, event, callback) + active_events = sys.monitoring.get_events(tool_id) + self.assertEqual(active_events, event) + result = self.call_foo(self.arg) + active_events = sys.monitoring.get_events(tool_id) + self.assertEqual(active_events, event) + self.assertEqual(result, self.call_foo_result) + callback.assert_called() + finally: + # It is necessary to restart events that have been disabled. The + # "disabled" state of the `PY_START` event for the tool + # `self.tool_id` "leaks" into subsequent tests. These subsequent + # tests then end up failing as events that should have been + # triggered are not triggered due to the state leak! It's not really + # clear why this happens, if it is part of the design or a side + # effect of the design, or if this behaviour is simply a bug in + # CPython itself. + sys.monitoring.restart_events() + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(tool_id) + + def test_mutation_from_objmode(self): + try: + # Check that it's possible to enable an event (mutate the event + # state)from an `objmode` block. Monitoring for PY_RETURN is set in + # objmode once the function starts executing. + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + event = PY_RETURN + # register the callback... note that the event isn't switched on yet + callback = Mock() + sys.monitoring.register_callback(tool_id, event, callback) + + def objmode_enable_event(switch_on_event): + if switch_on_event: + sys.monitoring.set_events(tool_id, event) + + @_enable_sysmon + @jit('int64(int64)') + def foo(enable): + with objmode: + objmode_enable_event(enable) + return enable + 7 + + # this should not trigger the return callback + foo(0) + callback.assert_not_called() + + # this should trigger the return callback + foo(1) + # switch off the event so the callback mock is protected from + # mutation. + sys.monitoring.set_events(tool_id, NO_EVENTS) + # check what happened + callback.assert_called() + # 2 calls, 1 is the return from the objmode_enable_event, the other + # is the return from foo. + self.assertEqual(callback.call_count, 2) + finally: + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.free_tool_id(tool_id) + + def test_multiple_tool_id(self): + # Check that multiple tools will work across different combinations of + # events that Numba dispatcher supports, namely: + # (NO_EVENTS, PY_START, PY_RETURN). + + # the use of NO_EVENTS is superfluous, it is to demonstrate usage. + tool_ids_2_events = {sys.monitoring.DEBUGGER_ID: (NO_EVENTS,), + sys.monitoring.COVERAGE_ID: (PY_START,), + sys.monitoring.PROFILER_ID: (PY_RETURN,), + sys.monitoring.OPTIMIZER_ID: + (PY_START, PY_RETURN,),} + + all_callbacks = {} + try: + for tool_id, events in tool_ids_2_events.items(): + sys.monitoring.use_tool_id(tool_id, f"custom_monitor_{tool_id}") + event_bitmask = 0 + callbacks = {} + all_callbacks[tool_id] = callbacks + for event in events: + callback = Mock() + # Can't set an event for NO_EVENTS! + if event != NO_EVENTS: + sys.monitoring.register_callback(tool_id, event, + callback) + callbacks[event] = callback + event_bitmask |= event + # only start monitoring once callbacks are registered + for tool_id in tool_ids_2_events.keys(): + sys.monitoring.set_events(tool_id, event_bitmask) + self.call_foo(self.arg) + finally: + # clean up state + for tool_id, events in tool_ids_2_events.items(): + for event in events: + # Can't remove an event for NO_EVENTS! + if event != NO_EVENTS: + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(tool_id) + + # Now check all_callbacks... + + # check debugger tool slot + dbg_tool = all_callbacks[sys.monitoring.DEBUGGER_ID] + self.assertEqual(len(dbg_tool), 1) # one event to capture + callback = dbg_tool[NO_EVENTS] + callback.assert_not_called() + + # check coverage tool slot + cov_tool = all_callbacks[sys.monitoring.COVERAGE_ID] + self.assertEqual(len(cov_tool), 1) # one event to capture + self.check_py_start_calls(cov_tool) + + # check profiler tool slot + prof_tool = all_callbacks[sys.monitoring.PROFILER_ID] + self.assertEqual(len(prof_tool), 1) # one event to capture + self.check_py_return_calls(prof_tool) + + # check optimiser tool slot + opt_tool = all_callbacks[sys.monitoring.OPTIMIZER_ID] + self.assertEqual(len(opt_tool), 2) # two events to capture + self.check_py_start_calls(opt_tool) + self.check_py_return_calls(opt_tool) + + def test_raising_under_monitoring(self): + # Check that Numba can raise an exception whilst monitoring is running + # and that 1. `RAISE` is issued 2. `PY_UNWIND` is issued, 3. that + # `PY_RETURN` is not issued. + + ret_callback = Mock() + raise_callback = Mock() + unwind_callback = Mock() + + msg = 'exception raised' + + @_enable_sysmon + @jit('()') + def foo(): + raise ValueError(msg) + + store_raised = None + try: + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.register_callback(tool_id, PY_RETURN, ret_callback) + sys.monitoring.register_callback(tool_id, RAISE, raise_callback) + sys.monitoring.register_callback(tool_id, PY_UNWIND, + unwind_callback) + sys.monitoring.set_events(tool_id, PY_RETURN | RAISE | PY_UNWIND) + try: + foo() + except ValueError as raises: + store_raised = raises + # switch off monitoring + sys.monitoring.set_events(tool_id, NO_EVENTS) + # check that the ret_callback was called once (by Numba unpickle to + # fetch the exception info out of the stored bytes). + ret_callback.assert_called_once() + # and that elements that are feasible to check about the call are + # as expected + the_call = ret_callback.call_args_list[0] + self.assertEqual(the_call.args[0], _numba_unpickle.__code__) + self.assertEqual(the_call.args[2][0], ValueError) + self.assertEqual(the_call.args[2][1][0], msg) + + # check that the RAISE event callback was triggered + raise_callback.assert_called() + numba_unpickle_call = raise_callback.call_args_list[0] + self.assertEqual(numba_unpickle_call.args[0], + _numba_unpickle.__code__) + self.assertIsInstance(numba_unpickle_call.args[2], KeyError) + foo_call = raise_callback.call_args_list[1] + self.assertEqual(foo_call.args[0], foo.py_func.__code__) + self.assertIsInstance(foo_call.args[2], ValueError) + self.assertIn(msg, str(foo_call.args[2])) + + # check that PY_UNWIND event callback was called + unwind_callback.assert_called_once() + unwind_call = unwind_callback.call_args_list[0] + self.assertEqual(unwind_call.args[0], foo.py_func.__code__) + self.assertIsInstance(unwind_call.args[2], ValueError) + self.assertIn(msg, str(unwind_call.args[2])) + finally: + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.register_callback(tool_id, PY_RETURN, None) + sys.monitoring.register_callback(tool_id, RAISE, None) + sys.monitoring.register_callback(tool_id, PY_UNWIND, None) + sys.monitoring.free_tool_id(tool_id) + + self.assertIn(msg, str(store_raised)) + + def test_stop_iteration_under_monitoring(self): + # Check that Numba can raise an StopIteration exception whilst + # monitoring is running and that: + # 1. RAISE is issued for an explicitly raised StopIteration exception. + # 2. PY_RETURN is issued appropriately for the unwinding stack + # 3. STOP_ITERATION is not issued as there is no implicit StopIteration + # raised. + + return_callback = Mock() + raise_callback = Mock() + stopiter_callback = Mock() + + msg = 'exception raised' + + @_enable_sysmon + @jit('()') + def foo(): + raise StopIteration(msg) + + store_raised = None + try: + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.register_callback(tool_id, PY_RETURN, + return_callback) + sys.monitoring.register_callback(tool_id, RAISE, + raise_callback) + sys.monitoring.register_callback(tool_id, STOP_ITERATION, + stopiter_callback) + sys.monitoring.set_events(tool_id, + PY_RETURN | STOP_ITERATION | RAISE) + try: + foo() + except StopIteration as raises: + store_raised = raises + # switch off monitoring + sys.monitoring.set_events(tool_id, NO_EVENTS) + # check that the return_callback was called once (by Numba unpickle + # to fetch the exception info out of the stored bytes). + return_callback.assert_called_once() + # and that elements that are feasible to check about the call are + # as expected + the_call = return_callback.call_args_list[0] + self.assertEqual(the_call.args[0], _numba_unpickle.__code__) + self.assertEqual(the_call.args[2][0], StopIteration) + self.assertEqual(the_call.args[2][1][0], msg) + + # check that the RAISE event callback was triggered + raise_callback.assert_called() + # check that it's 3 long (numba unpickle, jit(foo), the test method) + self.assertEqual(raise_callback.call_count, 3) + + # check the numba pickle call + numba_unpickle_call = raise_callback.call_args_list[0] + self.assertEqual(numba_unpickle_call.args[0], + _numba_unpickle.__code__) + self.assertIsInstance(numba_unpickle_call.args[2], KeyError) + + # check the jit(foo) call + foo_call = raise_callback.call_args_list[1] + self.assertEqual(foo_call.args[0], foo.py_func.__code__) + self.assertIsInstance(foo_call.args[2], StopIteration) + self.assertIn(msg, str(foo_call.args[2])) + + # check the test method call + meth_call = raise_callback.call_args_list[2] + test_method_code = sys._getframe().f_code + self.assertEqual(meth_call.args[0], test_method_code) + self.assertIsInstance(meth_call.args[2], StopIteration) + self.assertIn(msg, str(meth_call.args[2])) + + # check that the STOP_ITERATION event was not triggered + stopiter_callback.assert_not_called() + finally: + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.register_callback(tool_id, PY_RETURN, None) + sys.monitoring.register_callback(tool_id, STOP_ITERATION, None) + sys.monitoring.register_callback(tool_id, RAISE, None) + sys.monitoring.free_tool_id(tool_id) + + self.assertIn(msg, str(store_raised)) + + def test_raising_callback_unwinds_from_jit_on_success_path(self): + # An event callback can legitimately raise an exception, this test + # makes sure Numba's dispatcher handles it ok on the "successful path", + # i.e. the JIT compiled function didn't raise an exception at runtime. + + msg = "deliberately broken callback" + + callback = Mock(side_effect=ValueError(msg)) + + store_raised = None + try: + event = PY_START + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.set_events(tool_id, event) + sys.monitoring.register_callback(tool_id, event, callback) + self.foo(self.arg) + except ValueError as raises: + store_raised = raises + finally: + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(tool_id) + + callback.assert_called_once() + self.assertIn(msg, str(store_raised)) + + def test_raising_callback_unwinds_from_jit_on_raising_path(self): + # An event callback can legitimately raise an exception, this test + # makes sure Numba's dispatcher handles it ok on the + # "unsuccessful path", i.e. the JIT compiled function raised an + # exception at runtime. This test checks the RAISE event, as the + # callback itself raises, it overrides the exception coming from the + # JIT compiled function. + + msg_callback = "deliberately broken callback" + msg_execution = "deliberately broken execution" + + callback = Mock(side_effect=ValueError(msg_callback)) + + class LocalException(Exception): + pass + + @_enable_sysmon + @jit("()") + def raising(): + raise LocalException(msg_execution) + + store_raised = None + try: + event = RAISE + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.set_events(tool_id, event) + sys.monitoring.register_callback(tool_id, event, callback) + raising() + except ValueError as raises: + store_raised = raises + finally: + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(tool_id) + + callback.assert_called() + # Called 3x (numba unpickle, ValueError in callback, the test method) + self.assertEqual(callback.call_count, 3) + + # check the numba unpickle call + numba_unpickle_call = callback.call_args_list[0] + self.assertEqual(numba_unpickle_call.args[0], _numba_unpickle.__code__) + self.assertIsInstance(numba_unpickle_call.args[2], KeyError) + + # check the jit(raising) call + raising_call = callback.call_args_list[1] + self.assertEqual(raising_call.args[0], raising.py_func.__code__) + self.assertIs(raising_call.args[2], callback.side_effect) + + # check the test method call + meth_call = callback.call_args_list[2] + test_method_code = sys._getframe().f_code + self.assertEqual(meth_call.args[0], test_method_code) + self.assertIs(meth_call.args[2], callback.side_effect) + + # check the stored exception is the expected exception + self.assertIs(store_raised, callback.side_effect) + + def test_raising_callback_unwinds_from_jit_on_unwind_path(self): + # An event callback can legitimately raise an exception, this test + # makes sure Numba's dispatcher handles it ok on the + # "unsuccessful path", i.e. the JIT compiled function raised an + # exception at runtime. This test checks the PY_UNWIND event. CPython + # seems to not notice the PY_UNWIND coming from the exception arising + # from the raise in the event callback, it just has the PY_UNWIND from + # the raise in the JIT compiled function. + + msg_callback = "deliberately broken callback" + msg_execution = "deliberately broken execution" + + callback = Mock(side_effect=ValueError(msg_callback)) + + class LocalException(Exception): + pass + + @_enable_sysmon + @jit("()") + def raising(): + raise LocalException(msg_execution) + + store_raised = None + try: + event = PY_UNWIND + tool_id = self.tool_id + sys.monitoring.use_tool_id(tool_id, "custom_monitor") + sys.monitoring.set_events(tool_id, event) + sys.monitoring.register_callback(tool_id, event, callback) + raising() + except ValueError as raises: + store_raised = raises + finally: + sys.monitoring.register_callback(tool_id, event, None) + sys.monitoring.set_events(tool_id, NO_EVENTS) + sys.monitoring.free_tool_id(tool_id) + + callback.assert_called_once() + + # check the jit(raising) call + raising_call = callback.call_args_list[0] + self.assertEqual(raising_call.args[0], raising.py_func.__code__) + self.assertEqual(type(raising_call.args[2]), LocalException) + self.assertEqual(str(raising_call.args[2]), msg_execution) + + # check the stored_raise + self.assertIs(store_raised, callback.side_effect) + + def test_monitoring_multiple_threads(self): + # two threads, different tools and events registered on each thread. + + def t1_work(self, q): + try: + # test event PY_START on a "debugger tool" + cb = self.run_with_events(self.call_foo, (self.arg,), + (PY_START,), + tool_id=sys.monitoring.DEBUGGER_ID) + # Check... + self.assertEqual(len(cb), 1) + self.check_py_start_calls(cb) + except Exception as e: + q.put(e) + + def t2_work(self, q): + try: + # test event PY_RETURN on a "coverage tool" + cb = self.run_with_events(self.call_foo, (self.arg,), + (PY_RETURN,), + tool_id=sys.monitoring.COVERAGE_ID) + # Check... + self.assertEqual(len(cb), 1) + self.check_py_return_calls(cb) + except Exception as e: + q.put(e) + + q1 = queue.Queue() + t1 = threading.Thread(target=t1_work, args=(self, q1)) + q2 = queue.Queue() + t2 = threading.Thread(target=t2_work, args=(self, q2)) + + threads = (t1, t2) + for t in threads: + t.start() + for t in threads: + t.join() + + # make sure there were no exceptions + def assert_empty_queue(q): + if q.qsize() != 0: + while not q.empty(): + print(q.get()) + self.fail("queue supposed to be empty") + + assert_empty_queue(q1) + assert_empty_queue(q2) + + +@unittest.skipUnless(PYVERSION >= (3, 12), "needs Python 3.12+") +class TestMonitoringSelfTest(TestCase): + + def test_skipping_of_tests_if_monitoring_in_use(self): + # check that the unit tests in the TestMonitoring class above will skip + # if there are other monitoring tools registered in the thread (in this + # case cProfile is used to cause that effect). + r = self.subprocess_test_runner(TestMonitoring.__module__, + 'TestMonitoring', + 'test_start_event', + flags={'-m': 'cProfile'}) + self.assertIn("skipped=1", str(r)) + + +@unittest.skipUnless(PYVERSION >= (3, 12), "needs Python 3.12+") +class TestMonitoringEnvVarControl(TestCase): + @TestCase.run_test_in_subprocess( + envvars={"NUMBA_ENABLE_SYS_MONITORING": ''}) + def test_default_off(self): + @jit + def foo(x): + return x + 1 + + self.assertFalse(foo._enable_sysmon) + + @TestCase.run_test_in_subprocess( + envvars={"NUMBA_ENABLE_SYS_MONITORING": '0'}) + def test_override_off(self): + @jit + def foo(x): + return x + 1 + + self.assertFalse(foo._enable_sysmon) + + @TestCase.run_test_in_subprocess( + envvars={"NUMBA_ENABLE_SYS_MONITORING": '1'}) + def test_override_on(self): + @jit + def foo(x): + return x + 1 + + self.assertTrue(foo._enable_sysmon) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_sys_stdin_assignment.py b/venv/lib/python3.10/site-packages/numba/tests/test_sys_stdin_assignment.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d88e23547d098ed5c6ffcaaf4f8585b6bbba89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_sys_stdin_assignment.py @@ -0,0 +1,67 @@ +import sys + +import unittest +from numba import njit + + +@njit +def f0(a, b): + return a + b + + +@njit +def f1(begin1, end1, begin2, end2): + if begin1 > begin2: return f1(begin2, end2, begin1, end1) + return end1 + 1 >= begin2 + + +@njit +def f0_2(a, b): + return a + b + + +@njit +def f1_2(begin1, end1, begin2, end2): + if begin1 > begin2: return f1_2(begin2, end2, begin1, end1) + return end1 + 1 >= begin2 + + +class TestSysStdinAssignment(unittest.TestCase): + + def test_no_reassignment_of_stdout(self): + """ + https://github.com/numba/numba/issues/3027 + Older versions of colorama break stdout/err when recursive functions + are compiled. + + This test should work irrespective of colorama version, or indeed its + presence. If the version is too low, it should be disabled and the test + should work anyway, if it is a sufficiently high version or it is not + present, it should work anyway. + """ + + originally = sys.stdout, sys.stderr + + try: + sys.stdout = None + f0(0, 1) # Not changed stdout? + self.assertEqual(sys.stdout, None) + f1(0, 1, 2, 3) # Not changed stdout? + self.assertEqual(sys.stdout, None) + + sys.stderr = None + f0_2(0, 1) # Not changed stderr? + self.assertEqual(sys.stderr, None) + f1_2(0, 1, 2, 3) # Not changed stderr? + self.assertEqual(sys.stderr, None) + + finally: + sys.stdout, sys.stderr = originally + + self.assertNotEqual(sys.stderr, None) + self.assertNotEqual(sys.stdout, None) + + +if __name__ == '__main__': + unittest.main() + diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_sysinfo.py b/venv/lib/python3.10/site-packages/numba/tests/test_sysinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..b63f601b98c251b30467e789d55f9515a1516316 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_sysinfo.py @@ -0,0 +1,189 @@ +import platform +import unittest +from unittest import skipUnless +from unittest.mock import NonCallableMock +from itertools import chain +from datetime import datetime +from contextlib import redirect_stdout +from io import StringIO + +from numba.tests.support import TestCase +import numba.misc.numba_sysinfo as nsi + + +class TestSysInfo(TestCase): + + def setUp(self): + super(TestSysInfo, self).setUp() + self.info = nsi.get_sysinfo() + self.safe_contents = { + int: ( + nsi._cpu_count, + ), + float: ( + nsi._runtime, + ), + str: ( + nsi._machine, + nsi._cpu_name, + nsi._platform_name, + nsi._os_name, + nsi._os_version, + nsi._python_comp, + nsi._python_impl, + nsi._python_version, + nsi._llvm_version, + nsi._numpy_version, + ), + bool: ( + nsi._cu_dev_init, + nsi._svml_state, + nsi._svml_loaded, + nsi._svml_operational, + nsi._llvm_svml_patched, + nsi._tbb_thread, + nsi._openmp_thread, + nsi._wkq_thread, + nsi._numpy_AVX512_SKX_detected, + ), + list: ( + nsi._errors, + nsi._warnings, + ), + dict: ( + nsi._numba_env_vars, + ), + datetime: ( + nsi._start, + nsi._start_utc, + ), + } + self.safe_keys = chain(*self.safe_contents.values()) + + def tearDown(self): + super(TestSysInfo, self).tearDown() + # System info might contain long strings or lists so delete it. + del self.info + + def test_has_safe_keys(self): + for k in self.safe_keys: + with self.subTest(k=k): + self.assertIn(k, self.info) + + def test_safe_content_type(self): + for t, keys in self.safe_contents.items(): + for k in keys: + with self.subTest(k=k): + self.assertIsInstance(self.info[k], t) + + def test_has_no_error(self): + self.assertFalse(self.info[nsi._errors]) + + def test_display_empty_info(self): + output = StringIO() + with redirect_stdout(output): + res = nsi.display_sysinfo({}) + self.assertIsNone(res) + output.close() + + +class TestSysInfoWithPsutil(TestCase): + + mem_total = 2 * 1024 ** 2 # 2_097_152 + mem_available = 1024 ** 2 # 1_048_576 + cpus_list = [1, 2] + + def setUp(self): + super(TestSysInfoWithPsutil, self).setUp() + self.psutil_orig_state = nsi._psutil_import + # Mocking psutil + nsi._psutil_import = True + nsi.psutil = NonCallableMock() + vm = nsi.psutil.virtual_memory.return_value + vm.total = self.mem_total + vm.available = self.mem_available + if platform.system() in ('Linux', 'Windows',): + # cpu_affiniy only available on Linux and Windows + proc = nsi.psutil.Process.return_value + proc.cpu_affinity.return_value = self.cpus_list + else: + nsi.psutil.Process.return_value = None + + self.info = nsi.get_os_spec_info(platform.system()) + + def tearDown(self): + super(TestSysInfoWithPsutil, self).tearDown() + nsi._psutil_import = self.psutil_orig_state + + def test_has_all_data(self): + keys = (nsi._mem_total, nsi._mem_available) + for k in keys: + with self.subTest(k=k): + self.assertIn(k, self.info.keys()) + self.assertIsInstance(self.info[k], int) + + def test_has_correct_values(self): + self.assertEqual(self.info[nsi._mem_total], self.mem_total) + self.assertEqual(self.info[nsi._mem_available], self.mem_available) + + @skipUnless(platform.system() in ('Linux', 'Windows'), + "CPUs allowed info only available on Linux and Windows") + def test_cpus_list(self): + self.assertEqual(self.info[nsi._cpus_allowed], len(self.cpus_list)) + self.assertEqual(self.info[nsi._cpus_list], + ' '.join(str(n) for n in self.cpus_list)) + + +class TestSysInfoWithoutPsutil(TestCase): + + def setUp(self): + super(TestSysInfoWithoutPsutil, self).setUp() + self.psutil_orig_state = nsi._psutil_import + nsi._psutil_import = False + self.info = nsi.get_os_spec_info(platform.system()) + + def tearDown(self): + super(TestSysInfoWithoutPsutil, self).tearDown() + nsi._psutil_import = self.psutil_orig_state + + def test_has_all_data(self): + keys = (nsi._mem_total, nsi._mem_available) + for k in keys: + with self.subTest(k=k): + self.assertIn(k, self.info.keys()) + self.assertIsInstance(self.info[k], int) + + +class TestPlatformSpecificInfo(TestCase): + + def setUp(self): + self.plat_spec_info = { + 'Linux': { + str: (nsi._libc_version,), + }, + 'Windows': { + str: (nsi._os_spec_version,), + }, + 'Darwin': { + str: (nsi._os_spec_version,), + }, + } + self.os_name = platform.system() + self.contents = self.plat_spec_info.get(self.os_name, {}) + self.info = nsi.get_os_spec_info(self.os_name) + + def test_has_all_data(self): + keys = chain(*self.contents.values()) + for k in keys: + with self.subTest(k=k): + self.assertIn(k, self.info.keys()) + + def test_content_type(self): + for t, keys in self.contents.items(): + for k in keys: + with self.subTest(k=k): + self.assertIsInstance(self.info[k], t) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_target_extension.py b/venv/lib/python3.10/site-packages/numba/tests/test_target_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a78b234b81ef1fdfa258f7b4a32b0afdd01b7ba9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_target_extension.py @@ -0,0 +1,851 @@ +"""This tests the target extension API to ensure that rudimentary expected +behaviours are present and correct. It uses a piece of fake hardware as a +target, the Dummy Processing Unit (DPU), to do this. The DPU borrows a lot from +the CPU but is part of the GPU class of target. The DPU target has deliberately +strange implementations of fundamental operations so as to make it identifiable +in testing.""" + +import unittest +from numba.tests.support import TestCase +import ctypes +import operator +from functools import cached_property +import numpy as np +from numba import njit, types +from numba.extending import (overload, overload_attribute, + overload_classmethod, intrinsic) +from numba.core.target_extension import ( + JitDecorator, + target_registry, + dispatcher_registry, + jit_registry, + target_override, + GPU, + resolve_dispatcher_from_str, +) +from numba.core import utils, fastmathpass, errors +from numba.core.dispatcher import Dispatcher +from numba.core.descriptors import TargetDescriptor +from numba.core import cpu, typing, cgutils +from numba.core.base import BaseContext +from numba.core.compiler_lock import global_compiler_lock +from numba.core import callconv +from numba.core.codegen import CPUCodegen, JITCodeLibrary +from numba.core.callwrapper import PyCallWrapper +from numba.core.imputils import RegistryLoader, Registry +from numba.core.typing.typeof import typeof +from numba import _dynfunc +import llvmlite.binding as ll +from llvmlite import ir as llir +from numba.core.runtime import rtsys + +from numba.core import compiler +from numba.core.compiler import CompilerBase, DefaultPassBuilder +from numba.core.compiler_machinery import FunctionPass, register_pass +from numba.core.typed_passes import PreLowerStripPhis + +# Define a new target, this target extends GPU, this places the DPU in the +# target hierarchy as a type of GPU. + + +class DPU(GPU): + ... + + +# register the dpu target hierarchy token in the target registry, this +# permits lookup and reference in userspace by the string "dpu" +target_registry["dpu"] = DPU + +# Create a JIT DPU codegen for the DPU target + + +class JITDPUCodegen(CPUCodegen): + # This largely rips off the CPU for ease + + _library_class = JITCodeLibrary + + def _customize_tm_options(self, options): + # Customize the target machine options. + options["cpu"] = self._get_host_cpu_name() + arch = ll.Target.from_default_triple().name + if arch.startswith("x86"): + reloc_model = "static" + elif arch.startswith("ppc"): + reloc_model = "pic" + else: + reloc_model = "default" + options["reloc"] = reloc_model + options["codemodel"] = "jitdefault" + + # Set feature attributes (such as ISA extensions) + # This overrides default feature selection by CPU model above + options["features"] = self._tm_features + + # Deal with optional argument to ll.Target.create_target_machine + sig = utils.pysignature(ll.Target.create_target_machine) + if "jit" in sig.parameters: + # Mark that this is making a JIT engine + options["jit"] = True + + def _customize_tm_features(self): + # For JIT target, we will use LLVM to get the feature map + return self._get_host_cpu_features() + + def _add_module(self, module): + self._engine.add_module(module) + + def set_env(self, env_name, env): + """Set the environment address. + + Update the GlobalVariable named *env_name* to the address of *env*. + """ + gvaddr = self._engine.get_global_value_address(env_name) + envptr = (ctypes.c_void_p * 1).from_address(gvaddr) + envptr[0] = ctypes.c_void_p(id(env)) + + +# This is the function registry for the dpu, it just has one registry, this one! +dpu_function_registry = Registry() + +# Implement a new context for the DPU target + + +class DPUContext(BaseContext): + allow_dynamic_globals = True + + # Overrides + def create_module(self, name): + return self._internal_codegen._create_empty_module(name) + + @global_compiler_lock + def init(self): + self._internal_codegen = JITDPUCodegen("numba.exec") + # Initialize NRT runtime + rtsys.initialize(self) + self.refresh() + + def refresh(self): + registry = dpu_function_registry + try: + loader = self._registries[registry] + except KeyError: + loader = RegistryLoader(registry) + self._registries[registry] = loader + self.install_registry(registry) + # Also refresh typing context, since @overload declarations can + # affect it. + self.typing_context.refresh() + + @property + def target_data(self): + return self._internal_codegen.target_data + + def codegen(self): + return self._internal_codegen + + # Borrow the CPU call conv + @cached_property + def call_conv(self): + return callconv.CPUCallConv(self) + + def get_env_body(self, builder, envptr): + """ + From the given *envptr* (a pointer to a _dynfunc.Environment object), + get a EnvBody allowing structured access to environment fields. + """ + body_ptr = cgutils.pointer_add( + builder, envptr, _dynfunc._impl_info["offsetof_env_body"] + ) + return cpu.EnvBody(self, builder, ref=body_ptr, cast_ref=True) + + def get_env_manager(self, builder): + envgv = self.declare_env_global( + builder.module, self.get_env_name(self.fndesc) + ) + envarg = builder.load(envgv) + pyapi = self.get_python_api(builder) + pyapi.emit_environment_sentry( + envarg, debug_msg=self.fndesc.env_name, + ) + env_body = self.get_env_body(builder, envarg) + return pyapi.get_env_manager(self.environment, env_body, envarg) + + def get_generator_state(self, builder, genptr, return_type): + """ + From the given *genptr* (a pointer to a _dynfunc.Generator object), + get a pointer to its state area. + """ + return cgutils.pointer_add( + builder, + genptr, + _dynfunc._impl_info["offsetof_generator_state"], + return_type=return_type, + ) + + def post_lowering(self, mod, library): + if self.fastmath: + fastmathpass.rewrite_module(mod, self.fastmath) + + library.add_linking_library(rtsys.library) + + def create_cpython_wrapper( + self, library, fndesc, env, call_helper, release_gil=False + ): + wrapper_module = self.create_module("wrapper") + fnty = self.call_conv.get_function_type(fndesc.restype, fndesc.argtypes) + wrapper_callee = llir.Function( + wrapper_module, fnty, fndesc.llvm_func_name + ) + builder = PyCallWrapper( + self, + wrapper_module, + wrapper_callee, + fndesc, + env, + call_helper=call_helper, + release_gil=release_gil, + ) + builder.build() + library.add_ir_module(wrapper_module) + + def create_cfunc_wrapper(self, library, fndesc, env, call_helper): + # There's no cfunc wrapper on the dpu + pass + + def get_executable(self, library, fndesc, env): + """ + Returns + ------- + (cfunc, fnptr) + + - cfunc + callable function (Can be None) + - fnptr + callable function address + - env + an execution environment (from _dynfunc) + """ + # Code generation + fnptr = library.get_pointer_to_function( + fndesc.llvm_cpython_wrapper_name + ) + + # Note: we avoid reusing the original docstring to avoid encoding + # issues on Python 2, see issue #1908 + doc = "compiled wrapper for %r" % (fndesc.qualname,) + cfunc = _dynfunc.make_function( + fndesc.lookup_module(), + fndesc.qualname.split(".")[-1], + doc, + fnptr, + env, + # objects to keepalive with the function + (library,), + ) + library.codegen.set_env(self.get_env_name(fndesc), env) + return cfunc + + +# Implement a DPU TargetDescriptor, this one borrows bits from the CPU +class DPUTarget(TargetDescriptor): + options = cpu.CPUTargetOptions + + @cached_property + def _toplevel_target_context(self): + # Lazily-initialized top-level target context, for all threads + return DPUContext(self.typing_context, self._target_name) + + @cached_property + def _toplevel_typing_context(self): + # Lazily-initialized top-level typing context, for all threads + return typing.Context() + + @property + def target_context(self): + """ + The target context for DPU targets. + """ + return self._toplevel_target_context + + @property + def typing_context(self): + """ + The typing context for CPU targets. + """ + return self._toplevel_typing_context + + +# Create a DPU target instance +dpu_target = DPUTarget("dpu") + + +# Declare a dispatcher for the DPU target +class DPUDispatcher(Dispatcher): + targetdescr = dpu_target + + def compile(self, sig): + with target_override('dpu'): + return super().compile(sig) + + +# Register a dispatcher for the DPU target, a lot of the code uses this +# internally to work out what to do RE compilation +dispatcher_registry[target_registry["dpu"]] = DPUDispatcher + +# Implement a dispatcher for the DPU target + + +class djit(JitDecorator): + def __init__(self, *args, **kwargs): + self._args = args + self._kwargs = kwargs + + def __call__(self, *args): + assert len(args) < 2 + if args: + func = args[0] + else: + func = self._args[0] + self.py_func = func + # wrap in dispatcher + return self.dispatcher_wrapper() + + def get_dispatcher(self): + """ + Returns the dispatcher + """ + return dispatcher_registry[target_registry["dpu"]] + + def dispatcher_wrapper(self): + disp = self.get_dispatcher() + # Parse self._kwargs here + topt = {} + if "nopython" in self._kwargs: + topt["nopython"] = True + + # It would be easy to specialise the default compilation pipeline for + # this target here. + pipeline_class = compiler.Compiler + if "pipeline_class" in self._kwargs: + pipeline_class = self._kwargs["pipeline_class"] + return disp( + py_func=self.py_func, + targetoptions=topt, + pipeline_class=pipeline_class, + ) + + +# add it to the decorator registry, this is so e.g. @overload can look up a +# JIT function to do the compilation work. +jit_registry[target_registry["dpu"]] = djit + +# The DPU target "knows" nothing, add in some primitives for basic things... + +# need to register how to lower dummy for @intrinsic + + +@dpu_function_registry.lower_constant(types.Dummy) +def constant_dummy(context, builder, ty, pyval): + return context.get_dummy_value() + + +# and how to deal with IntegerLiteral to Integer casts +@dpu_function_registry.lower_cast(types.IntegerLiteral, types.Integer) +def literal_int_to_number(context, builder, fromty, toty, val): + lit = context.get_constant_generic( + builder, fromty.literal_type, fromty.literal_value, + ) + return context.cast(builder, lit, fromty.literal_type, toty) + + +# and how to lower an Int constant +@dpu_function_registry.lower_constant(types.Integer) +def const_int(context, builder, ty, pyval): + lty = context.get_value_type(ty) + return lty(pyval) + + +# and tell the DPU how to lower a float constant +@dpu_function_registry.lower_constant(types.Float) +def const_float(context, builder, ty, pyval): + lty = context.get_value_type(ty) + return lty(pyval) + + +# The DPU actually subtracts when it's asked to 'add'! +@intrinsic(target="dpu") +def intrin_add(tyctx, x, y): + sig = x(x, y) + + def codegen(cgctx, builder, tyargs, llargs): + return builder.sub(*llargs) + + return sig, codegen + + +# Use extending.overload API to register 'add', call the dpu specific intrinsic +@overload(operator.add, target="dpu") +def ol_add(x, y): + if isinstance(x, types.Integer) and isinstance(y, types.Integer): + + def impl(x, y): + return intrin_add(x, y) + + return impl + + +class TestTargetHierarchySelection(TestCase): + """This tests that the target hierarchy is scanned in the right order, + that appropriate functions are selected based on what's available and that + the DPU target is distinctly different to the CPU""" + + def test_0_dpu_registry(self): + """Checks that the DPU registry only contains the things added + + This test must be first to execute among all tests in this file to + ensure the no lazily loaded entries are added yet. + """ + self.assertFalse(dpu_function_registry.functions) + self.assertFalse(dpu_function_registry.getattrs) + # int literal -> int cast is registered + self.assertEqual(len(dpu_function_registry.casts), 1) + # int, float and dummy constants are registered + self.assertEqual(len(dpu_function_registry.constants), 3) + + def test_specialise_gpu(self): + def my_func(x): + pass + + # Can be used by both CPU and DPU + @overload(my_func, target="generic") + def ol_my_func1(x): + def impl(x): + return 1 + x + + return impl + + # Should be used by the DPU if there's no dpu specific one + @overload(my_func, target="gpu") + def ol_my_func2(x): + def impl(x): + return 10 + x + + return impl + + @djit() + def dpu_foo(): + return my_func(7) + + @njit() + def cpu_foo(): + return my_func(7) + + # DPU chooses the ol_my_func2 as it's most specific, and DPU subtracts + # for addition, so 10 + x -> 10 - 7 -> 3 + self.assertPreciseEqual(dpu_foo(), 3) + # CPU uses the generic one function ol_my_func1 and adds + self.assertPreciseEqual(cpu_foo(), 8) + + def test_specialise_dpu(self): + def my_func(x): + pass + + # Can be used by both CPU and DPU + @overload(my_func, target="generic") + def ol_my_func1(x): + def impl(x): + return 1 + x + + return impl + + # Should be used by the DPU if there's no dpu specific one + @overload(my_func, target="gpu") + def ol_my_func2(x): + def impl(x): + return 10 + x + + return impl + + # Should be used by the DPU only + @overload(my_func, target="dpu") + def ol_my_func3(x): + def impl(x): + return 100 + x + + return impl + + @djit() + def dpu_foo(): + return my_func(7) + + @njit() + def cpu_foo(): + return my_func(7) + + # DPU chooses the ol_my_func3 as it's most specific, and DPU subtracts + # for addition, so 100 + x -> 100 - 7 -> 93 + self.assertPreciseEqual(dpu_foo(), 93) + # CPU uses the generic one function ol_my_func1 and adds + self.assertPreciseEqual(cpu_foo(), 8) + + def test_no_specialisation_found(self): + + def my_func(x): + pass + + # only create a cuda specialisation + @overload(my_func, target='cuda') + def ol_my_func_cuda(x): + return lambda x: None + + @djit(nopython=True) + def dpu_foo(): + my_func(1) + + # new style errors raise UnsupportedError, old style ends up as + # TypingError + accept = (errors.UnsupportedError, errors.TypingError) + with self.assertRaises(accept) as raises: + dpu_foo() + + msgs = ["Function resolution cannot find any matches for function", + "test_no_specialisation_found..my_func", + "for the current target:", + "'numba.tests.test_target_extension.DPU'"] + + for msg in msgs: + self.assertIn(msg, str(raises.exception)) + + def test_invalid_target_jit(self): + + with self.assertRaises(errors.NonexistentTargetError) as raises: + @njit(_target='invalid_silicon') + def foo(): + pass + + foo() + + msg = "No target is registered against 'invalid_silicon'" + self.assertIn(msg, str(raises.exception)) + + def test_invalid_target_overload(self): + + def bar(): + pass + + # This is a typing error at present as it fails during typing when the + # overloads are walked. + with self.assertRaises(errors.NonexistentTargetError) as raises: + @overload(bar, target='invalid_silicon') + def ol_bar(): + return lambda : None + + @njit + def foo(): + bar() + + foo() + + msg = "No target is registered against 'invalid_silicon'" + self.assertIn(msg, str(raises.exception)) + + def test_intrinsic_selection(self): + """ + Test to make sure that targets can share generic implementations and + cannot reach implementations that are not in their target hierarchy. + """ + + # NOTE: The actual operation performed by these functions is irrelevant + @intrinsic(target="generic") + def intrin_math_generic(tyctx, x, y): + sig = x(x, y) + + def codegen(cgctx, builder, tyargs, llargs): + return builder.mul(*llargs) + + return sig, codegen + + @intrinsic(target="dpu") + def intrin_math_dpu(tyctx, x, y): + sig = x(x, y) + + def codegen(cgctx, builder, tyargs, llargs): + return builder.sub(*llargs) + + return sig, codegen + + @intrinsic(target="cpu") + def intrin_math_cpu(tyctx, x, y): + sig = x(x, y) + + def codegen(cgctx, builder, tyargs, llargs): + return builder.add(*llargs) + + return sig, codegen + + # CPU can use the CPU version + @njit + def cpu_foo_specific(): + return intrin_math_cpu(3, 4) + + self.assertEqual(cpu_foo_specific(), 7) + + # CPU can use the 'generic' version + @njit + def cpu_foo_generic(): + return intrin_math_generic(3, 4) + + self.assertEqual(cpu_foo_generic(), 12) + + # CPU cannot use the 'dpu' version + @njit + def cpu_foo_dpu(): + return intrin_math_dpu(3, 4) + + accept = (errors.UnsupportedError, errors.TypingError) + with self.assertRaises(accept) as raises: + cpu_foo_dpu() + + msgs = ["Function resolution cannot find any matches for function", + "intrinsic intrin_math_dpu", + "for the current target",] + for msg in msgs: + self.assertIn(msg, str(raises.exception)) + + # DPU can use the DPU version + @djit(nopython=True) + def dpu_foo_specific(): + return intrin_math_dpu(3, 4) + + self.assertEqual(dpu_foo_specific(), -1) + + # DPU can use the 'generic' version + @djit(nopython=True) + def dpu_foo_generic(): + return intrin_math_generic(3, 4) + + self.assertEqual(dpu_foo_generic(), 12) + + # DPU cannot use the 'cpu' version + @djit(nopython=True) + def dpu_foo_cpu(): + return intrin_math_cpu(3, 4) + + accept = (errors.UnsupportedError, errors.TypingError) + with self.assertRaises(accept) as raises: + dpu_foo_cpu() + + msgs = ["Function resolution cannot find any matches for function", + "intrinsic intrin_math_cpu", + "for the current target",] + for msg in msgs: + self.assertIn(msg, str(raises.exception)) + + def test_overload_allocation(self): + def cast_integer(context, builder, val, fromty, toty): + # XXX Shouldn't require this. + if toty.bitwidth == fromty.bitwidth: + # Just a change of signedness + return val + elif toty.bitwidth < fromty.bitwidth: + # Downcast + return builder.trunc(val, context.get_value_type(toty)) + elif fromty.signed: + # Signed upcast + return builder.sext(val, context.get_value_type(toty)) + else: + # Unsigned upcast + return builder.zext(val, context.get_value_type(toty)) + + @intrinsic(target='dpu') + def intrin_alloc(typingctx, allocsize, align): + """Intrinsic to call into the allocator for Array + """ + def codegen(context, builder, signature, args): + [allocsize, align] = args + + # XXX: error are being eaten. + # example: replace the next line with `align_u32 = align` + align_u32 = cast_integer(context, builder, align, + signature.args[1], types.uint32) + meminfo = context.nrt.meminfo_alloc_aligned(builder, allocsize, + align_u32) + return meminfo + + from numba.core.typing import signature + mip = types.MemInfoPointer(types.voidptr) # return untyped pointer + sig = signature(mip, allocsize, align) + return sig, codegen + + @overload_classmethod(types.Array, '_allocate', target='dpu', + jit_options={'nopython':True}) + def _ol_arr_allocate_dpu(cls, allocsize, align): + def impl(cls, allocsize, align): + return intrin_alloc(allocsize, align) + return impl + + @overload(np.empty, target='dpu', jit_options={'nopython':True}) + def ol_empty_impl(n): + def impl(n): + return types.Array._allocate(n, 7) + return impl + + def buffer_func(): + pass + + @overload(buffer_func, target='dpu', jit_options={'nopython':True}) + def ol_buffer_func_impl(): + def impl(): + return np.empty(10) + return impl + + from numba.core.target_extension import target_override + + # XXX: this should probably go inside the dispatcher + with target_override('dpu'): + @djit(nopython=True) + def foo(): + return buffer_func() + r = foo() + from numba.core.runtime import nrt + self.assertIsInstance(r, nrt.MemInfo) + + def test_overload_attribute_target(self): + MyDummy, MyDummyType = self.make_dummy_type() + mydummy_type = typeof(MyDummy()) + + @overload_attribute(MyDummyType, 'dpu_only', target='dpu') + def ov_dummy_dpu_attr(obj): + def imp(obj): + return 42 + + return imp + + # Ensure that we cannot use the DPU target-specific attribute on the + # CPU, and that an appropriate typing error is raised + with self.assertRaisesRegex(errors.TypingError, + "Unknown attribute 'dpu_only'"): + @njit(types.int64(mydummy_type)) + def illegal_target_attr_use(x): + return x.dpu_only + + # Ensure that the DPU target-specific attribute is usable and works + # correctly when the target is DPU - note eager compilation via + # signature + @djit(types.void(types.int64[::1], mydummy_type)) + def cuda_target_attr_use(res, dummy): + res[0] = dummy.dpu_only + + +class TestTargetOffload(TestCase): + """In this use case the CPU compilation pipeline is extended with a new + compilation pass that runs just prior to lowering. The pass looks for + function calls and when it finds one it sees if there's a DPU function + available that is a valid overload for the function call. If there is one + then it swaps the CPU implementation out for a DPU implementation. This + producing an "offload" effect. + """ + + def test_basic_offload(self): + + _DEBUG = False + + # This is the DPU function for sin, it'll return a pi-like constant + @overload(np.sin, target="dpu") + def ol_np_sin_DPU(x): + def dpu_sin_impl(x): + return 314159.0 + + return dpu_sin_impl + + # Check the DPU reports the correct overload value + @djit(nopython=True) + def foo(x): + return np.sin(x) + + self.assertPreciseEqual(foo(5), 314159.0) + + # Check the CPU call is correct + + @njit + def foo(x): + return np.sin(x) + + self.assertPreciseEqual(foo(5), np.sin(5)) + + @register_pass(mutates_CFG=False, analysis_only=False) + class DispatcherSwitcher(FunctionPass): + _name = "DispatcherSwitcher" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + func_ir = state.func_ir + mutated = False + for blk in func_ir.blocks.values(): + # find the assignment nodes in the block and walk them, if + # there's a DPU version then swap out for a call to that + for call in blk.find_exprs("call"): + function = state.typemap[call.func.name] + tname = "dpu" + + # Note: `target_override` context driven compilation can + # be done here, the DPU target is in use. + with target_override(tname): + try: + sig = function.get_call_type( + state.typingctx, + state.calltypes[call].args, + {}, + ) + disp = resolve_dispatcher_from_str(tname) + # force compile check + hw_ctx = disp.targetdescr.target_context + hw_ctx.get_function(function, sig) + except Exception as e: + if _DEBUG: + msg = ( + f"Failed to find and compile an " + f"overload for {function} for {tname} " + f"due to {e}" + ) + print(msg) + continue + + # This is a necessary hack at present so as to + # generate code into the same library. I.e. the DPU + # target is going to do code gen into the CPUs lib. + hw_ctx._codelib_stack = ( + state.targetctx._codelib_stack + ) + + # All is good, so switch IR node for one targeting + # this target. Should generate this, but for now + # just mutate as: + # ir.Expr.call(call.func, call.args, call.kws, + # call.loc, target='dpu') + call.target = tname + mutated = True + # return True if the IR was mutated, False if not. + return mutated + + # DPU compiler pipeline, compiles with offload to the DPU target + class DPUOffloadCompiler(CompilerBase): + def define_pipelines(self): + pm = DefaultPassBuilder.define_nopython_pipeline(self.state) + pm.add_pass_after(DispatcherSwitcher, PreLowerStripPhis) + pm.finalize() + return [pm] + + # Now compile for CPU, but with the DispatcherSwitcher pass in place + # that switches CPU calls for DPU calls + @njit(pipeline_class=DPUOffloadCompiler) + def foo(x): + return np.sin(x), np.cos(x) # np.sin is DPU, np.cos is CPU + + self.assertPreciseEqual(foo(5), (314159.0, np.cos(5))) + + +if __name__ == "__main__": + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_target_overloadselector.py b/venv/lib/python3.10/site-packages/numba/tests/test_target_overloadselector.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa54bccc65057b62b331c24c698d71a8d1caaac --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_target_overloadselector.py @@ -0,0 +1,148 @@ +from itertools import product, permutations +from collections import defaultdict + +import unittest +from numba.core.base import OverloadSelector +from numba.core.registry import cpu_target +from numba.core.imputils import builtin_registry, RegistryLoader +from numba.core import types +from numba.core.errors import NumbaNotImplementedError, NumbaTypeError + + +class TestOverloadSelector(unittest.TestCase): + def test_select_and_sort_1(self): + os = OverloadSelector() + os.append(1, (types.Any, types.Boolean)) + os.append(2, (types.Boolean, types.Integer)) + os.append(3, (types.Boolean, types.Any)) + os.append(4, (types.Boolean, types.Boolean)) + compats = os._select_compatible((types.boolean, types.boolean)) + self.assertEqual(len(compats), 3) + ordered, scoring = os._sort_signatures(compats) + self.assertEqual(len(ordered), 3) + self.assertEqual(len(scoring), 3) + self.assertEqual(ordered[0], (types.Boolean, types.Boolean)) + self.assertEqual(scoring[types.Boolean, types.Boolean], 0) + self.assertEqual(scoring[types.Boolean, types.Any], 1) + self.assertEqual(scoring[types.Any, types.Boolean], 1) + + def test_select_and_sort_2(self): + os = OverloadSelector() + os.append(1, (types.Container,)) + os.append(2, (types.Sequence,)) + os.append(3, (types.MutableSequence,)) + os.append(4, (types.List,)) + compats = os._select_compatible((types.List,)) + self.assertEqual(len(compats), 4) + ordered, scoring = os._sort_signatures(compats) + self.assertEqual(len(ordered), 4) + self.assertEqual(len(scoring), 4) + self.assertEqual(ordered[0], (types.List,)) + self.assertEqual(scoring[(types.List,)], 0) + self.assertEqual(scoring[(types.MutableSequence,)], 1) + self.assertEqual(scoring[(types.Sequence,)], 2) + self.assertEqual(scoring[(types.Container,)], 3) + + def test_match(self): + os = OverloadSelector() + self.assertTrue(os._match(formal=types.Boolean, actual=types.boolean)) + self.assertTrue(os._match(formal=types.Boolean, actual=types.Boolean)) + # test subclass + self.assertTrue(issubclass(types.Sequence, types.Container)) + self.assertTrue(os._match(formal=types.Container, + actual=types.Sequence)) + self.assertFalse(os._match(formal=types.Sequence, + actual=types.Container)) + # test any + self.assertTrue(os._match(formal=types.Any, actual=types.Any)) + self.assertTrue(os._match(formal=types.Any, actual=types.Container)) + self.assertFalse(os._match(formal=types.Container, actual=types.Any)) + + def test_ambiguous_detection(self): + os = OverloadSelector() + # unambiguous signatures + os.append(1, (types.Any, types.Boolean)) + os.append(2, (types.Integer, types.Boolean)) + self.assertEqual(os.find((types.boolean, types.boolean)), 1) + # not implemented + with self.assertRaises(NumbaNotImplementedError) as raises: + os.find((types.boolean, types.int32)) + # generic + os.append(3, (types.Any, types.Any)) + self.assertEqual(os.find((types.boolean, types.int32)), 3) + self.assertEqual(os.find((types.boolean, types.boolean)), 1) + # add ambiguous signature; can match (bool, any) and (any, bool) + os.append(4, (types.Boolean, types.Any)) + with self.assertRaises(NumbaTypeError) as raises: + os.find((types.boolean, types.boolean)) + self.assertIn('2 ambiguous signatures', str(raises.exception)) + # disambiguous + os.append(5, (types.boolean, types.boolean)) + self.assertEqual(os.find((types.boolean, types.boolean)), 5) + + def test_subclass_specialization(self): + os = OverloadSelector() + self.assertTrue(issubclass(types.Sequence, types.Container)) + os.append(1, (types.Container, types.Container,)) + lstty = types.List(types.boolean) + self.assertEqual(os.find((lstty, lstty)), 1) + os.append(2, (types.Container, types.Sequence,)) + self.assertEqual(os.find((lstty, lstty)), 2) + + def test_cache(self): + os = OverloadSelector() + self.assertEqual(len(os._cache), 0) + os.append(1, (types.Any,)) + self.assertEqual(os.find((types.int32,)), 1) + self.assertEqual(len(os._cache), 1) + os.append(2, (types.Integer,)) + self.assertEqual(len(os._cache), 0) + self.assertEqual(os.find((types.int32,)), 2) + self.assertEqual(len(os._cache), 1) + + +class TestAmbiguousOverloads(unittest.TestCase): + + @classmethod + def setUpClass(cls): + # ensure all impls are loaded + cpu_target.target_context.refresh() + + def create_overload_selector(self, kind): + os = OverloadSelector() + loader = RegistryLoader(builtin_registry) + for impl, sig in loader.new_registrations(kind): + os.append(impl, sig) + return os + + def test_ambiguous_casts(self): + os = self.create_overload_selector(kind='casts') + all_types = set(t for sig, impl in os.versions for t in sig) + # ensure there are no ambiguous cast overloads + # note: using permutations to avoid testing cast to the same type + for sig in permutations(all_types, r=2): + try: + os.find(sig) + except NumbaNotImplementedError: + pass # ignore not implemented cast + + def test_ambiguous_functions(self): + loader = RegistryLoader(builtin_registry) + selectors = defaultdict(OverloadSelector) + for impl, fn, sig in loader.new_registrations('functions'): + os = selectors[fn] + os.append(impl, sig) + + for fn, os in selectors.items(): + all_types = set(t for sig, impl in os.versions for t in sig) + # ensure there are no ambiguous overloads + for sig in product(all_types, all_types): + try: + os.find(sig) + except NumbaNotImplementedError: + pass # ignore not implemented cast + + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_threadsafety.py b/venv/lib/python3.10/site-packages/numba/tests/test_threadsafety.py new file mode 100644 index 0000000000000000000000000000000000000000..19a974e244fc0e17ee72887089c74e71dde7b575 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_threadsafety.py @@ -0,0 +1,97 @@ +""" +Test threadsafety for compiler. +These tests will cause segfault if fail. +""" +import threading +import random + +import numpy as np + +from numba import jit, vectorize, guvectorize + +from numba.tests.support import temp_directory, override_config +from numba.core import config +import unittest + + +def foo(n, v): + return np.ones(n) + + +def ufunc_foo(a, b): + return a + b + + +def gufunc_foo(a, b, out): + out[0] = a + b + + + +class TestThreadSafety(unittest.TestCase): + + def run_jit(self, **options): + def runner(): + cfunc = jit(**options)(foo) + + return cfunc(4, 10) + return runner + + def run_compile(self, fnlist): + self._cache_dir = temp_directory(self.__class__.__name__) + with override_config('CACHE_DIR', self._cache_dir): + def chooser(): + for _ in range(10): + fn = random.choice(fnlist) + fn() + + ths = [threading.Thread(target=chooser) + for i in range(4)] + for th in ths: + th.start() + for th in ths: + th.join() + + def test_concurrent_jit(self): + self.run_compile([self.run_jit(nopython=True)]) + + def test_concurrent_jit_cache(self): + self.run_compile([self.run_jit(nopython=True, cache=True)]) + + def run_vectorize(self, **options): + def runner(): + cfunc = vectorize(['(f4, f4)'], **options)(ufunc_foo) + a = b = np.random.random(10).astype(np.float32) + return cfunc(a, b) + return runner + + def test_concurrent_vectorize(self): + self.run_compile([self.run_vectorize(nopython=True)]) + + def test_concurrent_vectorize_cache(self): + self.run_compile([self.run_vectorize(nopython=True, cache=True)]) + + def run_guvectorize(self, **options): + def runner(): + sig = ['(f4, f4, f4[:])'] + cfunc = guvectorize(sig, '(),()->()', **options)(gufunc_foo) + a = b = np.random.random(10).astype(np.float32) + return cfunc(a, b) + return runner + + def test_concurrent_guvectorize(self): + self.run_compile([self.run_guvectorize(nopython=True)]) + + def test_concurrent_guvectorize_cache(self): + self.run_compile([self.run_guvectorize(nopython=True, cache=True)]) + + def test_concurrent_mix_use(self): + self.run_compile([self.run_jit(nopython=True, cache=True), + self.run_jit(nopython=True), + self.run_vectorize(nopython=True, cache=True), + self.run_vectorize(nopython=True), + self.run_guvectorize(nopython=True, cache=True), + self.run_guvectorize(nopython=True)]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_tracing.py b/venv/lib/python3.10/site-packages/numba/tests/test_tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab895efa2c73baac757857e568825e25794de03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_tracing.py @@ -0,0 +1,182 @@ +from io import StringIO +import logging + +import unittest +from numba.core import tracing + +logger = logging.getLogger('trace') +logger.setLevel(logging.INFO) + +# Make sure tracing is enabled +orig_trace = tracing.trace +tracing.trace = tracing.dotrace + +class CapturedTrace: + """Capture the trace temporarily for validation.""" + + def __init__(self): + self.buffer = StringIO() + self.handler = logging.StreamHandler(self.buffer) + def __enter__(self): + self._handlers = logger.handlers + self.buffer = StringIO() + logger.handlers = [logging.StreamHandler(self.buffer)] + def __exit__(self, type, value, traceback): + logger.handlers = self._handlers + def getvalue(self): + + # Depending on how the tests are run, object names may be + # qualified by their containing module. + # Remove that to make the trace output independent from the testing mode. + log = self.buffer.getvalue() + log = log.replace(__name__ + '.','') + return log + +class Class(object): + + @tracing.trace + @classmethod + def class_method(cls): + pass + + @tracing.trace + @staticmethod + def static_method(): + pass + + __test = None + + def _test_get(self): + return self.__test + + def _test_set(self, value): + self.__test = value + + test = tracing.trace(property(_test_get, _test_set)) + + @tracing.trace + def method(self, some, other='value', *args, **kwds): + pass + + def __repr__(self): + """Generate a deterministic string for testing.""" + return '' + +class Class2(object): + @classmethod + def class_method(cls): + pass + + @staticmethod + def static_method(): + pass + + __test = None + @property + def test(self): + return self.__test + @test.setter + def test(self, value): + self.__test = value + + def method(self): + pass + + def __str__(self): + return 'Test(' + str(self.test) + ')' + + def __repr__(self): + """Generate a deterministic string for testing.""" + return '' + + +@tracing.trace +def test(x, y, z = True): + a = x + y + b = x * y + if z: return a + else: return b + +class TestTracing(unittest.TestCase): + + def __init__(self, *args): + super(TestTracing, self).__init__(*args) + + def setUp(self): + self.capture = CapturedTrace() + + def tearDown(self): + del self.capture + + def test_method(self): + + with self.capture: + Class().method('foo', bar='baz') + self.assertEqual(self.capture.getvalue(), + ">> Class.method(self=, some='foo', other='value', bar='baz')\n" + + "<< Class.method\n") + + def test_class_method(self): + + with self.capture: + Class.class_method() + self.assertEqual(self.capture.getvalue(), + ">> Class.class_method(cls=)\n" + + "<< Class.class_method\n") + + def test_static_method(self): + + with self.capture: + Class.static_method() + self.assertEqual(self.capture.getvalue(), + ">> static_method()\n" + + "<< static_method\n") + + + def test_property(self): + + with self.capture: + test = Class() + test.test = 1 + assert 1 == test.test + self.assertEqual(self.capture.getvalue(), + ">> Class._test_set(self=, value=1)\n" + + "<< Class._test_set\n" + + ">> Class._test_get(self=)\n" + + "<< Class._test_get -> 1\n") + + def test_function(self): + + with self.capture: + test(5, 5) + test(5, 5, False) + self.assertEqual(self.capture.getvalue(), + ">> test(x=5, y=5, z=True)\n" + + "<< test -> 10\n" + + ">> test(x=5, y=5, z=False)\n" + + "<< test -> 25\n") + + @unittest.skip("recursive decoration not yet implemented") + def test_injected(self): + + with self.capture: + tracing.trace(Class2, recursive=True) + Class2.class_method() + Class2.static_method() + test = Class2() + test.test = 1 + assert 1 == test.test + test.method() + + self.assertEqual(self.capture.getvalue(), + ">> Class2.class_method(cls=)\n" + + "<< Class2.class_method\n" + ">> static_method()\n" + "<< static_method\n") + + +# Reset tracing to its original value +tracing.trace = orig_trace + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_try_except.py b/venv/lib/python3.10/site-packages/numba/tests/test_try_except.py new file mode 100644 index 0000000000000000000000000000000000000000..8239a948a3b0ab52512760b4f73be8044ed9038b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_try_except.py @@ -0,0 +1,849 @@ +import warnings +import dis +from itertools import product + +import numpy as np + +from numba import njit, typed, objmode, prange +from numba.core import ir_utils, ir +from numba.core.errors import ( + CompilerError, NumbaPerformanceWarning, TypingError, + UnsupportedBytecodeError, +) +from numba.tests.support import ( + TestCase, unittest, captured_stdout, MemoryLeakMixin, + skip_parfors_unsupported, skip_unless_scipy, expected_failure_py311, + expected_failure_py312, expected_failure_py313, +) + + +class MyError(Exception): + pass + + +class TestTryBareExcept(TestCase): + """Test the following pattern: + + try: + + except: + + """ + def test_try_inner_raise(self): + @njit + def inner(x): + if x: + raise MyError + + @njit + def udt(x): + try: + inner(x) + return "not raised" + except: # noqa: E722 + return "caught" + + self.assertEqual(udt(False), "not raised") + self.assertEqual(udt(True), "caught") + + def test_try_state_reset(self): + @njit + def inner(x): + if x == 1: + raise MyError("one") + elif x == 2: + raise MyError("two") + + @njit + def udt(x): + try: + inner(x) + res = "not raised" + except: # noqa: E722 + res = "caught" + if x == 0: + inner(2) + return res + + with self.assertRaises(MyError) as raises: + udt(0) + self.assertEqual(str(raises.exception), "two") + self.assertEqual(udt(1), "caught") + self.assertEqual(udt(-1), "not raised") + + def _multi_inner(self): + @njit + def inner(x): + if x == 1: + print("call_one") + raise MyError("one") + elif x == 2: + print("call_two") + raise MyError("two") + elif x == 3: + print("call_three") + raise MyError("three") + else: + print("call_other") + + return inner + + def test_nested_try(self): + inner = self._multi_inner() + + @njit + def udt(x, y, z): + try: + try: + print("A") + inner(x) + print("B") + except: # noqa: E722 + print("C") + inner(y) + print("D") + except: # noqa: E722 + print("E") + inner(z) + print("F") + + # case 1 + with self.assertRaises(MyError) as raises: + with captured_stdout() as stdout: + udt(1, 2, 3) + self.assertEqual( + stdout.getvalue().split(), + ["A", "call_one", "C", "call_two", "E", "call_three"], + ) + self.assertEqual(str(raises.exception), "three") + + # case 2 + with captured_stdout() as stdout: + udt(1, 0, 3) + self.assertEqual( + stdout.getvalue().split(), + ["A", "call_one", "C", "call_other", "D"], + ) + + # case 3 + with captured_stdout() as stdout: + udt(1, 2, 0) + self.assertEqual( + stdout.getvalue().split(), + ["A", "call_one", "C", "call_two", "E", "call_other", "F"], + ) + + def test_loop_in_try(self): + inner = self._multi_inner() + + @njit + def udt(x, n): + try: + print("A") + for i in range(n): + print(i) + if i == x: + inner(i) + except: # noqa: E722 + print("B") + return i + + # case 1 + with captured_stdout() as stdout: + res = udt(3, 5) + self.assertEqual( + stdout.getvalue().split(), + ["A", "0", "1", "2", "3", "call_three", "B"], + ) + self.assertEqual(res, 3) + + # case 2 + with captured_stdout() as stdout: + res = udt(1, 3) + self.assertEqual( + stdout.getvalue().split(), + ["A", "0", "1", "call_one", "B"], + ) + self.assertEqual(res, 1) + + # case 3 + with captured_stdout() as stdout: + res = udt(0, 3) + self.assertEqual( + stdout.getvalue().split(), + ["A", "0", "call_other", "1", "2"], + ) + self.assertEqual(res, 2) + + def test_raise_in_try(self): + @njit + def udt(x): + try: + print("A") + if x: + raise MyError("my_error") + print("B") + except: # noqa: E722 + print("C") + return 321 + return 123 + + # case 1 + with captured_stdout() as stdout: + res = udt(True) + + self.assertEqual( + stdout.getvalue().split(), + ["A", "C"], + ) + self.assertEqual(res, 321) + + # case 2 + with captured_stdout() as stdout: + res = udt(False) + + self.assertEqual( + stdout.getvalue().split(), + ["A", "B"], + ) + self.assertEqual(res, 123) + + def test_recursion(self): + @njit + def foo(x): + if x > 0: + try: + foo(x - 1) + except: # noqa: E722 + print("CAUGHT") + return 12 + if x == 1: + raise ValueError("exception") + + with captured_stdout() as stdout: + res = foo(10) + + self.assertIsNone(res) + self.assertEqual(stdout.getvalue().split(), ["CAUGHT",],) + + def test_yield(self): + @njit + def foo(x): + if x > 0: + try: + yield 7 + raise ValueError("exception") # never hit + except Exception: + print("CAUGHT") + + @njit + def bar(z): + return next(foo(z)) + + with captured_stdout() as stdout: + res = bar(10) + + self.assertEqual(res, 7) + self.assertEqual(stdout.getvalue().split(), []) + + def test_closure2(self): + @njit + def foo(x): + def bar(): + try: + raise ValueError("exception") + except: # noqa: E722 + print("CAUGHT") + return 12 + bar() + + with captured_stdout() as stdout: + foo(10) + + self.assertEqual(stdout.getvalue().split(), ["CAUGHT",],) + + def test_closure3(self): + @njit + def foo(x): + def bar(z): + try: + raise ValueError("exception") + except: # noqa: E722 + print("CAUGHT") + return z + return [x for x in map(bar, [1, 2, 3])] + + with captured_stdout() as stdout: + res = foo(10) + + self.assertEqual(res, [1, 2, 3]) + self.assertEqual(stdout.getvalue().split(), ["CAUGHT",] * 3,) + + def test_closure4(self): + @njit + def foo(x): + def bar(z): + if z < 0: + raise ValueError("exception") + return z + + try: + return [x for x in map(bar, [1, 2, 3, x])] + except: # noqa: E722 + print("CAUGHT") + + with captured_stdout() as stdout: + res = foo(-1) + + self.assertEqual(stdout.getvalue().strip(), "CAUGHT") + self.assertIsNone(res) + + with captured_stdout() as stdout: + res = foo(4) + self.assertEqual(stdout.getvalue(), "") + self.assertEqual(res, [1, 2, 3, 4]) + + @skip_unless_scipy + def test_real_problem(self): + @njit + def foo(): + a = np.zeros((4, 4)) + try: + chol = np.linalg.cholesky(a) + except: # noqa: E722 + print("CAUGHT") + return chol + + with captured_stdout() as stdout: + foo() + + self.assertEqual(stdout.getvalue().split(), ["CAUGHT",]) + + def test_for_loop(self): + @njit + def foo(n): + + for i in range(n): + try: + if i > 5: + raise ValueError + except: # noqa: E722 + print("CAUGHT") + else: + try: + try: + try: + if i > 5: + raise ValueError + except: # noqa: E722 + print("CAUGHT1") + raise ValueError + except: # noqa: E722 + print("CAUGHT2") + raise ValueError + except: # noqa: E722 + print("CAUGHT3") + + with captured_stdout() as stdout: + foo(10) + + self.assertEqual( + stdout.getvalue().split(), + ["CAUGHT",] * 4 + ["CAUGHT%s" % i for i in range(1, 4)], + ) + + def test_try_pass(self): + @njit + def foo(x): + try: + pass + except: # noqa: E722 + pass + return x + + res = foo(123) + self.assertEqual(res, 123) + + def test_try_except_reraise(self): + @njit + def udt(): + try: + raise ValueError("ERROR") + except: # noqa: E722 + raise + + with self.assertRaises(UnsupportedBytecodeError) as raises: + udt() + self.assertIn( + "The re-raising of an exception is not yet supported.", + str(raises.exception), + ) + + +class TestTryExceptCaught(TestCase): + def test_catch_exception(self): + @njit + def udt(x): + try: + print("A") + if x: + raise ZeroDivisionError("321") + print("B") + except Exception: + print("C") + print("D") + + # case 1 + with captured_stdout() as stdout: + udt(True) + + self.assertEqual( + stdout.getvalue().split(), + ["A", "C", "D"], + ) + + # case 2 + with captured_stdout() as stdout: + udt(False) + + self.assertEqual( + stdout.getvalue().split(), + ["A", "B", "D"], + ) + + def test_return_in_catch(self): + @njit + def udt(x): + try: + print("A") + if x: + raise ZeroDivisionError + print("B") + r = 123 + except Exception: + print("C") + r = 321 + return r + print("D") + return r + + # case 1 + with captured_stdout() as stdout: + res = udt(True) + + self.assertEqual( + stdout.getvalue().split(), + ["A", "C"], + ) + self.assertEqual(res, 321) + + # case 2 + with captured_stdout() as stdout: + res = udt(False) + + self.assertEqual( + stdout.getvalue().split(), + ["A", "B", "D"], + ) + self.assertEqual(res, 123) + + def test_save_caught(self): + @njit + def udt(x): + try: + if x: + raise ZeroDivisionError + r = 123 + except Exception as e: # noqa: F841 + r = 321 + return r + return r + + with self.assertRaises(UnsupportedBytecodeError) as raises: + udt(True) + self.assertIn( + "Exception object cannot be stored into variable (e)", + str(raises.exception) + ) + + def test_try_except_reraise(self): + @njit + def udt(): + try: + raise ValueError("ERROR") + except Exception: + raise + + with self.assertRaises(UnsupportedBytecodeError) as raises: + udt() + self.assertIn( + "The re-raising of an exception is not yet supported.", + str(raises.exception), + ) + + def test_try_except_reraise_chain(self): + @njit + def udt(): + try: + raise ValueError("ERROR") + except Exception: + try: + raise + except Exception: + raise + + with self.assertRaises(UnsupportedBytecodeError) as raises: + udt() + self.assertIn( + "The re-raising of an exception is not yet supported.", + str(raises.exception), + ) + + def test_division_operator(self): + # This test that old-style implementation propagate exception + # to the exception handler properly. + @njit + def udt(y): + try: + 1 / y + except Exception: + return 0xdead + else: + return 1 / y + + self.assertEqual(udt(0), 0xdead) + self.assertEqual(udt(2), 0.5) + + +class TestTryExceptNested(TestCase): + "Tests for complicated nesting" + + def check_compare(self, cfunc, pyfunc, *args, **kwargs): + with captured_stdout() as stdout: + pyfunc(*args, **kwargs) + expect = stdout.getvalue() + + with captured_stdout() as stdout: + cfunc(*args, **kwargs) + got = stdout.getvalue() + self.assertEqual( + expect, got, + msg="args={} kwargs={}".format(args, kwargs) + ) + + def test_try_except_else(self): + @njit + def udt(x, y, z, p): + print('A') + if x: + print('B') + try: + print('C') + if y: + print('D') + raise MyError("y") + print('E') + except Exception: # noqa: F722 + print('F') + try: + print('H') + try: + print('I') + if z: + print('J') + raise MyError('z') + print('K') + except Exception: + print('L') + else: + print('M') + except Exception: + print('N') + else: + print('O') + print('P') + else: + print('G') + print('Q') + print('R') + + cases = list(product([True, False], repeat=4)) + self.assertTrue(cases) + for x, y, z, p in cases: + self.check_compare( + udt, udt.py_func, + x=x, y=y, z=z, p=p, + ) + + def test_try_except_finally(self): + @njit + def udt(p, q): + try: + print('A') + if p: + print('B') + raise MyError + print('C') + except: # noqa: E722 + print('D') + finally: + try: + print('E') + if q: + print('F') + raise MyError + except Exception: + print('G') + else: + print('H') + finally: + print('I') + + cases = list(product([True, False], repeat=2)) + self.assertTrue(cases) + for p, q in cases: + self.check_compare( + udt, udt.py_func, + p=p, q=q, + ) + + +class TestTryExceptRefct(MemoryLeakMixin, TestCase): + def test_list_direct_raise(self): + @njit + def udt(n, raise_at): + lst = typed.List() + try: + for i in range(n): + if i == raise_at: + raise IndexError + lst.append(i) + except Exception: + return lst + else: + return lst + + out = udt(10, raise_at=5) + self.assertEqual(list(out), list(range(5))) + out = udt(10, raise_at=10) + self.assertEqual(list(out), list(range(10))) + + def test_list_indirect_raise(self): + @njit + def appender(lst, n, raise_at): + for i in range(n): + if i == raise_at: + raise IndexError + lst.append(i) + return lst + + @njit + def udt(n, raise_at): + lst = typed.List() + lst.append(0xbe11) + try: + appender(lst, n, raise_at) + except Exception: + return lst + else: + return lst + + out = udt(10, raise_at=5) + self.assertEqual(list(out), [0xbe11] + list(range(5))) + out = udt(10, raise_at=10) + self.assertEqual(list(out), [0xbe11] + list(range(10))) + + def test_incompatible_refinement(self): + @njit + def udt(): + try: + lst = typed.List() + print("A") + lst.append(0) + print("B") + lst.append("fda") # invalid type will cause typing error + print("C") + return lst + except Exception: + print("D") + + with self.assertRaises(TypingError) as raises: + udt() + self.assertRegex( + str(raises.exception), + r"Cannot refine type|cannot safely cast unicode_type to int(32|64)" + ) + + +class TestTryExceptOtherControlFlow(TestCase): + def test_yield(self): + @njit + def udt(n, x): + for i in range(n): + try: + if i == x: + raise ValueError + yield i + except Exception: + return + + self.assertEqual(list(udt(10, 5)), list(range(5))) + self.assertEqual(list(udt(10, 10)), list(range(10))) + + @expected_failure_py311 + @expected_failure_py312 + @expected_failure_py313 + def test_objmode(self): + @njit + def udt(): + try: + with objmode(): + print(object()) + except Exception: + return + + with self.assertRaises(CompilerError) as raises: + udt() + msg = ("unsupported control flow: with-context contains branches " + "(i.e. break/return/raise) that can leave the block ") + self.assertIn( + msg, + str(raises.exception), + ) + + @expected_failure_py311 + @expected_failure_py312 + @expected_failure_py313 + def test_objmode_output_type(self): + def bar(x): + return np.asarray(list(reversed(x.tolist()))) + + @njit + def test_objmode(): + x = np.arange(5) + y = np.zeros_like(x) + try: + with objmode(y='intp[:]'): # annotate return type + # this region is executed by object-mode. + y += bar(x) + except Exception: + pass + return y + + with self.assertRaises(CompilerError) as raises: + test_objmode() + msg = ("unsupported control flow: with-context contains branches " + "(i.e. break/return/raise) that can leave the block ") + self.assertIn( + msg, + str(raises.exception), + ) + + def test_reraise_opcode_unreachable(self): + # The opcode RERAISE was added in python 3.9, there should be no + # supported way to actually reach it. This test just checks that an + # exception is present to deal with if it is reached in a case known + # to produce this opcode. + def pyfunc(): + try: + raise Exception + except Exception: + raise ValueError("ERROR") + for inst in dis.get_instructions(pyfunc): + if inst.opname == 'RERAISE': + break + else: + self.fail("expected RERAISE opcode not found") + func_ir = ir_utils.get_ir_of_code({}, pyfunc.__code__) + found = False + for lbl, blk in func_ir.blocks.items(): + for stmt in blk.find_insts(ir.StaticRaise): + # don't worry about guarding this strongly, if the exec_args[0] + # is a string it'll either be "ERROR" or the guard message + # saying unreachable has been reached + msg = "Unreachable condition reached (op code RERAISE executed)" + if stmt.exc_args and msg in stmt.exc_args[0]: + found = True + if not found: + self.fail("expected RERAISE unreachable message not found") + + +@skip_parfors_unsupported +class TestTryExceptParfors(TestCase): + + def test_try_in_prange_reduction(self): + # The try-except is transformed basically into chains of if-else + def udt(n): + c = 0 + for i in prange(n): + try: + c += 1 + except Exception: + c += 1 + return c + + args = [10] + expect = udt(*args) + self.assertEqual(njit(parallel=False)(udt)(*args), expect) + self.assertEqual(njit(parallel=True)(udt)(*args), expect) + + def test_try_outside_prange_reduction(self): + # The try-except is transformed basically into chains of if-else + def udt(n): + c = 0 + try: + for i in prange(n): + c += 1 + except Exception: + return 0xdead + else: + return c + + args = [10] + expect = udt(*args) + self.assertEqual(njit(parallel=False)(udt)(*args), expect) + # Parfors transformation didn't happen + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaPerformanceWarning) + self.assertEqual(njit(parallel=True)(udt)(*args), expect) + self.assertEqual(len(w), 1) + self.assertIn("no transformation for parallel execution was possible", + str(w[0])) + + def test_try_in_prange_map(self): + def udt(arr, x): + out = arr.copy() + for i in prange(arr.size): + try: + if i == x: + raise ValueError + out[i] = arr[i] + i + except Exception: + out[i] = -1 + return out + + args = [np.arange(10), 6] + expect = udt(*args) + self.assertPreciseEqual(njit(parallel=False)(udt)(*args), expect) + self.assertPreciseEqual(njit(parallel=True)(udt)(*args), expect) + + def test_try_outside_prange_map(self): + def udt(arr, x): + out = arr.copy() + try: + for i in prange(arr.size): + if i == x: + raise ValueError + out[i] = arr[i] + i + except Exception: + out[i] = -1 + return out + + args = [np.arange(10), 6] + expect = udt(*args) + self.assertPreciseEqual(njit(parallel=False)(udt)(*args), expect) + self.assertPreciseEqual(njit(parallel=True)(udt)(*args), expect) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_tuples.py b/venv/lib/python3.10/site-packages/numba/tests/test_tuples.py new file mode 100644 index 0000000000000000000000000000000000000000..44145197f16185bea8c661eeabc5813d1685a360 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_tuples.py @@ -0,0 +1,764 @@ +import collections +import itertools + +import numpy as np + +from numba import njit, jit, typeof, literally +from numba.core import types, errors, utils +from numba.tests.support import TestCase, MemoryLeakMixin, tag +import unittest + + +Rect = collections.namedtuple('Rect', ('width', 'height')) + +Point = collections.namedtuple('Point', ('x', 'y', 'z')) + +Point2 = collections.namedtuple('Point2', ('x', 'y', 'z')) + +Empty = collections.namedtuple('Empty', ()) + +def tuple_return_usecase(a, b): + return a, b + +def tuple_first(tup): + a, b = tup + return a + +def tuple_second(tup): + a, b = tup + return b + +def tuple_index(tup, idx): + return tup[idx] + +def tuple_index_static(tup): + # Note the negative index + return tup[-2] + +def tuple_slice2(tup): + return tup[1:-1] + +def tuple_slice3(tup): + return tup[1::2] + +def len_usecase(tup): + return len(tup) + +def add_usecase(a, b): + return a + b + +def eq_usecase(a, b): + return a == b + +def ne_usecase(a, b): + return a != b + +def gt_usecase(a, b): + return a > b + +def ge_usecase(a, b): + return a >= b + +def lt_usecase(a, b): + return a < b + +def le_usecase(a, b): + return a <= b + +def in_usecase(a, b): + return a in b + +def bool_usecase(tup): + return bool(tup), (3 if tup else 2) + +def getattr_usecase(tup): + return tup.z, tup.y, tup.x + +def make_point(a, b, c): + return Point(a, b, c) + +def make_point_kws(a, b, c): + return Point(z=c, y=b, x=a) + +def make_point_nrt(n): + r = Rect(list(range(n)), np.zeros(n + 1)) + # This also exercises attribute access + p = Point(r, len(r.width), len(r.height)) + return p + +def type_usecase(tup, *args): + return type(tup)(*args) + +def identity(tup): + return tup + +def index_method_usecase(tup, value): + return tup.index(value) + +def tuple_unpack_static_getitem_err(): + # see issue3895, `c` is imprecise + a, b, c, d = [], [], [], 0.0 + a.append(1) + b.append(1) + return + + +class TestTupleLengthError(unittest.TestCase): + + def test_tuple_length_error(self): + # issue 2195 + # raise an error on tuples greater than 1000 in length + @njit + def eattuple(tup): + return len(tup) + + with self.assertRaises(errors.UnsupportedError) as raises: + tup = tuple(range(1001)) + eattuple(tup) + + expected = "Tuple 'tup' length must be smaller than 1000" + self.assertIn(expected, str(raises.exception)) + +class TestTupleTypeNotIterable(unittest.TestCase): + ''' + issue 4369 + raise an error if 'type' is not iterable + ''' + def test_namedtuple_types_exception(self): + with self.assertRaises(errors.TypingError) as raises: + types.NamedTuple(types.uint32, 'p') + self.assertIn( + "Argument 'types' is not iterable", + str(raises.exception) + ) + + def test_tuple_types_exception(self): + with self.assertRaises(errors.TypingError) as raises: + types.Tuple((types.uint32)) + self.assertIn( + "Argument 'types' is not iterable", + str(raises.exception) + ) + + +class TestTupleReturn(TestCase): + + def test_array_tuple(self): + aryty = types.Array(types.float64, 1, 'C') + cfunc = njit((aryty, aryty))(tuple_return_usecase) + a = b = np.arange(5, dtype='float64') + ra, rb = cfunc(a, b) + self.assertPreciseEqual(ra, a) + self.assertPreciseEqual(rb, b) + del a, b + self.assertPreciseEqual(ra, rb) + + def test_scalar_tuple(self): + scalarty = types.float32 + cfunc = njit((scalarty, scalarty))(tuple_return_usecase) + a = b = 1 + ra, rb = cfunc(a, b) + self.assertEqual(ra, a) + self.assertEqual(rb, b) + + def test_hetero_tuple(self): + alltypes = [] + allvalues = [] + + alltypes.append((types.int32, types.int64)) + allvalues.append((1, 2)) + + alltypes.append((types.float32, types.float64)) + allvalues.append((1.125, .25)) + + alltypes.append((types.int32, types.float64)) + allvalues.append((1231, .5)) + + for (ta, tb), (a, b) in zip(alltypes, allvalues): + cfunc = njit((ta, tb))(tuple_return_usecase) + ra, rb = cfunc(a, b) + self.assertPreciseEqual((ra, rb), (a, b)) + + +class TestTuplePassing(TestCase): + + def test_unituple(self): + tuple_type = types.UniTuple(types.int32, 2) + cf_first = njit((tuple_type,))(tuple_first) + cf_second = njit((tuple_type,))(tuple_second) + self.assertPreciseEqual(cf_first((4, 5)), 4) + self.assertPreciseEqual(cf_second((4, 5)), 5) + + def test_hetero_tuple(self): + tuple_type = types.Tuple((types.int64, types.float32)) + cf_first = njit((tuple_type,))(tuple_first) + cf_second = njit((tuple_type,))(tuple_second) + self.assertPreciseEqual(cf_first((2**61, 1.5)), 2**61) + self.assertPreciseEqual(cf_second((2**61, 1.5)), 1.5) + + def test_size_mismatch(self): + # Issue #1638: tuple size should be checked when unboxing + tuple_type = types.UniTuple(types.int32, 2) + cfunc = njit((tuple_type,))(tuple_first) + entry_point = cfunc.overloads[cfunc.signatures[0]].entry_point + with self.assertRaises(ValueError) as raises: + entry_point((4, 5, 6)) + self.assertEqual(str(raises.exception), + ("size mismatch for tuple, " + "expected 2 element(s) but got 3")) + + +class TestOperations(TestCase): + + def test_len(self): + pyfunc = len_usecase + cfunc = njit((types.Tuple((types.int64, types.float32)),))(pyfunc) + self.assertPreciseEqual(cfunc((4, 5)), 2) + cfunc = njit((types.UniTuple(types.int64, 3),))(pyfunc) + self.assertPreciseEqual(cfunc((4, 5, 6)), 3) + + def test_index_literal(self): + # issue #6023, test non-static getitem with IntegerLiteral index + def pyfunc(tup, idx): + idx = literally(idx) + return tup[idx] + cfunc = njit(pyfunc) + + tup = (4, 3.1, 'sss') + for i in range(len(tup)): + self.assertPreciseEqual(cfunc(tup, i), tup[i]) + + def test_index(self): + pyfunc = tuple_index + cfunc = njit((types.UniTuple(types.int64, 3), types.int64),)(pyfunc) + tup = (4, 3, 6) + for i in range(len(tup)): + self.assertPreciseEqual(cfunc(tup, i), tup[i]) + + # test negative indexing + for i in range(len(tup) + 1): + self.assertPreciseEqual(cfunc(tup, -i), tup[-i]) + + # oob indexes, +ve then -ve + with self.assertRaises(IndexError) as raises: + cfunc(tup, len(tup)) + self.assertEqual("tuple index out of range", str(raises.exception)) + with self.assertRaises(IndexError) as raises: + cfunc(tup, -(len(tup) + 1)) + self.assertEqual("tuple index out of range", str(raises.exception)) + + # Test empty tuple, this is a bit unusual as `njit` will infer the empty + # tuple arg as a types.Tuple and not match the compiled signature, this + # is essentially because the test originally relied on + # `compile_isolated`. + args = (types.UniTuple(types.int64, 0), types.int64,) + cr = njit(args)(pyfunc).overloads[args] + with self.assertRaises(IndexError) as raises: + cr.entry_point((), 0) + self.assertEqual("tuple index out of range", str(raises.exception)) + + # test uintp indexing (because, e.g., parfor generates unsigned prange) + cfunc = njit((types.UniTuple(types.int64, 3), types.uintp,),)(pyfunc) + for i in range(len(tup)): + self.assertPreciseEqual(cfunc(tup, types.uintp(i)), tup[i]) + + # With a compile-time static index (the code generation path is + # different) + pyfunc = tuple_index_static + for typ in (types.UniTuple(types.int64, 4), + types.Tuple((types.int64, types.int32, types.int64, types.int32))): + cfunc = njit((typ,))(pyfunc) + tup = (4, 3, 42, 6) + self.assertPreciseEqual(cfunc(tup), pyfunc(tup)) + + typ = types.UniTuple(types.int64, 1) + with self.assertTypingError(): + njit((typ,))(pyfunc) + + # test unpack, staticgetitem with imprecise type (issue #3895) + pyfunc = tuple_unpack_static_getitem_err + with self.assertTypingError() as raises: + njit((),)(pyfunc) + msg = ("Cannot infer the type of variable 'c', have imprecise type: " + "list(undefined).") + self.assertIn(msg, str(raises.exception)) + + def test_in(self): + pyfunc = in_usecase + cfunc = njit((types.int64, types.UniTuple(types.int64, 3),),)(pyfunc) + tup = (4, 1, 5) + for i in range(5): + self.assertPreciseEqual(cfunc(i, tup), pyfunc(i, tup)) + + # Test the empty case + cfunc = njit((types.int64, types.Tuple([]),),)(pyfunc) + self.assertPreciseEqual(cfunc(1, ()), pyfunc(1, ())) + + def check_slice(self, pyfunc): + tup = (4, 5, 6, 7) + cfunc = njit((types.UniTuple(types.int64, 4),),)(pyfunc) + self.assertPreciseEqual(cfunc(tup), pyfunc(tup)) + args = types.Tuple((types.int64, types.int32, types.int64, types.int32)) + cfunc = njit((args,))(pyfunc) + self.assertPreciseEqual(cfunc(tup), pyfunc(tup)) + + def test_slice2(self): + self.check_slice(tuple_slice2) + + def test_slice3(self): + self.check_slice(tuple_slice3) + + def test_bool(self): + pyfunc = bool_usecase + cfunc = njit((types.Tuple((types.int64, types.int32)),),)(pyfunc) + args = ((4, 5),) + self.assertPreciseEqual(cfunc(*args), pyfunc(*args)) + cfunc = njit((types.UniTuple(types.int64, 3),),)(pyfunc) + args = ((4, 5, 6),) + self.assertPreciseEqual(cfunc(*args), pyfunc(*args)) + cfunc = njit((types.Tuple(()),),)(pyfunc) + self.assertPreciseEqual(cfunc(()), pyfunc(())) + + def test_add(self): + pyfunc = add_usecase + samples = [(types.Tuple(()), ()), + (types.UniTuple(types.int32, 0), ()), + (types.UniTuple(types.int32, 1), (42,)), + (types.Tuple((types.int64, types.float32)), (3, 4.5)), + ] + for (ta, a), (tb, b) in itertools.product(samples, samples): + cfunc = njit((ta, tb),)(pyfunc) + expected = pyfunc(a, b) + got = cfunc(a, b) + self.assertPreciseEqual(got, expected, msg=(ta, tb)) + + def _test_compare(self, pyfunc): + def eq(pyfunc, cfunc, args): + self.assertIs(cfunc(*args), pyfunc(*args), + "mismatch for arguments %s" % (args,)) + + # Same-sized tuples + argtypes = [types.Tuple((types.int64, types.float32)), + types.UniTuple(types.int32, 2)] + for ta, tb in itertools.product(argtypes, argtypes): + cfunc = njit((ta, tb),)(pyfunc) + for args in [((4, 5), (4, 5)), + ((4, 5), (4, 6)), + ((4, 6), (4, 5)), + ((4, 5), (5, 4))]: + eq(pyfunc, cfunc, args) + # Different-sized tuples + argtypes = [types.Tuple((types.int64, types.float32)), + types.UniTuple(types.int32, 3)] + cfunc = njit(tuple(argtypes),)(pyfunc) + for args in [((4, 5), (4, 5, 6)), + ((4, 5), (4, 4, 6)), + ((4, 5), (4, 6, 7))]: + eq(pyfunc, cfunc, args) + + def test_eq(self): + self._test_compare(eq_usecase) + + def test_ne(self): + self._test_compare(ne_usecase) + + def test_gt(self): + self._test_compare(gt_usecase) + + def test_ge(self): + self._test_compare(ge_usecase) + + def test_lt(self): + self._test_compare(lt_usecase) + + def test_le(self): + self._test_compare(le_usecase) + + +class TestNamedTuple(TestCase, MemoryLeakMixin): + + def test_unpack(self): + def check(p): + for pyfunc in tuple_first, tuple_second: + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check(Rect(4, 5)) + # Heterogeneous + check(Rect(4, 5.5)) + + def test_len(self): + def check(p): + pyfunc = len_usecase + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check(Rect(4, 5)) + check(Point(4, 5, 6)) + # Heterogeneous + check(Rect(4, 5.5)) + check(Point(4, 5.5, 6j)) + + def test_index(self): + pyfunc = tuple_index + cfunc = jit(nopython=True)(pyfunc) + + p = Point(4, 5, 6) + for i in range(len(p)): + self.assertPreciseEqual(cfunc(p, i), pyfunc(p, i)) + + # test uintp indexing (because, e.g., parfor generates unsigned prange) + for i in range(len(p)): + self.assertPreciseEqual(cfunc(p, types.uintp(i)), pyfunc(p, i)) + + def test_bool(self): + def check(p): + pyfunc = bool_usecase + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check(Rect(4, 5)) + # Heterogeneous + check(Rect(4, 5.5)) + check(Empty()) + + def _test_compare(self, pyfunc): + def eq(pyfunc, cfunc, args): + self.assertIs(cfunc(*args), pyfunc(*args), + "mismatch for arguments %s" % (args,)) + + cfunc = jit(nopython=True)(pyfunc) + + # Same-sized named tuples + for a, b in [((4, 5), (4, 5)), + ((4, 5), (4, 6)), + ((4, 6), (4, 5)), + ((4, 5), (5, 4))]: + eq(pyfunc, cfunc, (Rect(*a), Rect(*b))) + + # Different-sized named tuples + for a, b in [((4, 5), (4, 5, 6)), + ((4, 5), (4, 4, 6)), + ((4, 5), (4, 6, 7))]: + eq(pyfunc, cfunc, (Rect(*a), Point(*b))) + + def test_eq(self): + self._test_compare(eq_usecase) + + def test_ne(self): + self._test_compare(ne_usecase) + + def test_gt(self): + self._test_compare(gt_usecase) + + def test_ge(self): + self._test_compare(ge_usecase) + + def test_lt(self): + self._test_compare(lt_usecase) + + def test_le(self): + self._test_compare(le_usecase) + + def test_getattr(self): + pyfunc = getattr_usecase + cfunc = jit(nopython=True)(pyfunc) + + for args in (4, 5, 6), (4, 5.5, 6j): + p = Point(*args) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + def test_construct(self): + def check(pyfunc): + cfunc = jit(nopython=True)(pyfunc) + for args in (4, 5, 6), (4, 5.5, 6j): + expected = pyfunc(*args) + got = cfunc(*args) + self.assertIs(type(got), type(expected)) + self.assertPreciseEqual(got, expected) + + check(make_point) + check(make_point_kws) + + def test_type(self): + # Test the type() built-in on named tuples + pyfunc = type_usecase + cfunc = jit(nopython=True)(pyfunc) + + arg_tuples = [(4, 5, 6), (4, 5.5, 6j)] + for tup_args, args in itertools.product(arg_tuples, arg_tuples): + tup = Point(*tup_args) + expected = pyfunc(tup, *args) + got = cfunc(tup, *args) + self.assertIs(type(got), type(expected)) + self.assertPreciseEqual(got, expected) + + def test_literal_unification(self): + # Test for #3565. + @jit(nopython=True) + def Data1(value): + return Rect(value, -321) + + @jit(nopython=True) + def call(i, j): + if j == 0: + # In the error, `result` is typed to `Rect(int, LiteralInt)` + # because of the `-321` literal. This doesn't match the + # `result` type in the other branch. + result = Data1(i) + else: + # `result` is typed to be `Rect(int, int)` + result = Rect(i, j) + return result + + r = call(123, 1321) + self.assertEqual(r, Rect(width=123, height=1321)) + r = call(123, 0) + self.assertEqual(r, Rect(width=123, height=-321)) + + def test_string_literal_in_ctor(self): + # Test for issue #3813 + + @jit(nopython=True) + def foo(): + return Rect(10, 'somestring') + + r = foo() + self.assertEqual(r, Rect(width=10, height='somestring')) + + def test_dispatcher_mistreat(self): + # Test for issue #5215 that mistreat namedtuple as tuples + @jit(nopython=True) + def foo(x): + return x + + in1 = (1, 2, 3) + out1 = foo(in1) + self.assertEqual(in1, out1) + + in2 = Point(1, 2, 3) + out2 = foo(in2) + self.assertEqual(in2, out2) + + # Check the signatures + self.assertEqual(len(foo.nopython_signatures), 2) + self.assertEqual(foo.nopython_signatures[0].args[0], typeof(in1)) + self.assertEqual(foo.nopython_signatures[1].args[0], typeof(in2)) + + # Differently named + in3 = Point2(1, 2, 3) + out3 = foo(in3) + self.assertEqual(in3, out3) + self.assertEqual(len(foo.nopython_signatures), 3) + self.assertEqual(foo.nopython_signatures[2].args[0], typeof(in3)) + + +class TestTupleNRT(TestCase, MemoryLeakMixin): + def test_tuple_add(self): + def pyfunc(x): + a = np.arange(3) + return (a,) + (x,) + + cfunc = jit(nopython=True)(pyfunc) + x = 123 + expect_a, expect_x = pyfunc(x) + got_a, got_x = cfunc(x) + np.testing.assert_equal(got_a, expect_a) + self.assertEqual(got_x, expect_x) + + +class TestNamedTupleNRT(TestCase, MemoryLeakMixin): + + def test_return(self): + # Check returning a namedtuple with a list inside it + pyfunc = make_point_nrt + cfunc = jit(nopython=True)(pyfunc) + + for arg in (3, 0): + expected = pyfunc(arg) + got = cfunc(arg) + self.assertIs(type(got), type(expected)) + self.assertPreciseEqual(got, expected) + + +class TestConversions(TestCase): + """ + Test implicit conversions between tuple types. + """ + + def check_conversion(self, fromty, toty, val): + pyfunc = identity + cfunc = njit(toty(fromty))(pyfunc) + res = cfunc(val) + self.assertEqual(res, val) + + def test_conversions(self): + check = self.check_conversion + fromty = types.UniTuple(types.int32, 2) + check(fromty, types.UniTuple(types.float32, 2), (4, 5)) + check(fromty, types.Tuple((types.float32, types.int16)), (4, 5)) + aty = types.UniTuple(types.int32, 0) + bty = types.Tuple(()) + check(aty, bty, ()) + check(bty, aty, ()) + + with self.assertRaises(errors.TypingError) as raises: + check(fromty, types.Tuple((types.float32,)), (4, 5)) + msg = "No conversion from UniTuple(int32 x 2) to UniTuple(float32 x 1)" + self.assertIn(msg, str(raises.exception)) + + +class TestMethods(TestCase): + + def test_index(self): + pyfunc = index_method_usecase + cfunc = jit(nopython=True)(pyfunc) + self.assertEqual(cfunc((1, 2, 3), 2), 1) + + with self.assertRaises(ValueError) as raises: + cfunc((1, 2, 3), 4) + msg = 'tuple.index(x): x not in tuple' + self.assertEqual(msg, str(raises.exception)) + + +class TestTupleBuild(TestCase): + + def test_build_unpack(self): + def check(p): + pyfunc = lambda a: (1, *a) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check((4, 5)) + # Heterogeneous + check((4, 5.5)) + + def test_build_unpack_assign_like(self): + # see #6534 + def check(p): + pyfunc = lambda a: (*a,) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check((4, 5)) + # Heterogeneous + check((4, 5.5)) + + def test_build_unpack_fail_on_list_assign_like(self): + # see #6534 + def check(p): + pyfunc = lambda a: (*a,) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + with self.assertRaises(errors.TypingError) as raises: + check([4, 5]) + + # Python 3.9 has a peephole rewrite due to large changes in tuple + # unpacking. It results in a tuple + list situation from the above + # so the error message reflects that. Catching this specific and + # seemingly rare sequence in the peephole rewrite is prohibitively + # hard. Should it be reported numerous times, revisit then. + msg1 = "No implementation of function" + self.assertIn(msg1, str(raises.exception)) + msg2 = "tuple(reflected list(" # ignore the rest of reflected list + # part, it's repr is quite volatile. + self.assertIn(msg2, str(raises.exception)) + + def test_build_unpack_more(self): + def check(p): + pyfunc = lambda a: (1, *a, (1, 2), *a) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check((4, 5)) + # Heterogeneous + check((4, 5.5)) + + def test_build_unpack_call(self): + def check(p): + @jit + def inner(*args): + return args + pyfunc = lambda a: inner(1, *a) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check((4, 5)) + # Heterogeneous + check((4, 5.5)) + + def test_build_unpack_call_more(self): + def check(p): + @jit + def inner(*args): + return args + pyfunc = lambda a: inner(1, *a, *(1, 2), *a) + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + # Homogeneous + check((4, 5)) + # Heterogeneous + check((4, 5.5)) + + def test_tuple_constructor(self): + def check(pyfunc, arg): + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(arg), pyfunc(arg)) + + # empty + check(lambda _: tuple(), ()) + # Homogeneous + check(lambda a: tuple(a), (4, 5)) + # Heterogeneous + check(lambda a: tuple(a), (4, 5.5)) + + def test_unpack_with_predicate_fails(self): + # this fails as the list_to_tuple/list_extend peephole bytecode + # rewriting needed for Python 3.9+ cannot yet traverse the CFG. + @njit + def foo(): + a = (1,) + b = (3,2, 4) + return (*(b if a[0] else (5, 6)),) + + with self.assertRaises(errors.UnsupportedBytecodeError) as raises: + foo() + msg = "op_LIST_EXTEND at the start of a block" + self.assertIn(msg, str(raises.exception)) + + def test_build_unpack_with_calls_in_unpack(self): + def check(p): + def pyfunc(a): + z = [1, 2] + return (*a, z.append(3), z.extend(a), np.ones(3)), z + + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + check((4, 5)) + + def test_build_unpack_complicated(self): + def check(p): + def pyfunc(a): + z = [1, 2] + return (*a, *(*a, a), *(a, (*(a, (1, 2), *(3,), *a), + (a, 1, (2, 3), *a, 1), (1,))), + *(z.append(4), z.extend(a))), z + + cfunc = jit(nopython=True)(pyfunc) + self.assertPreciseEqual(cfunc(p), pyfunc(p)) + + check((10, 20)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typeconv.py b/venv/lib/python3.10/site-packages/numba/tests/test_typeconv.py new file mode 100644 index 0000000000000000000000000000000000000000..4042542b65b8e210f83134ee46959c650715a0da --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typeconv.py @@ -0,0 +1,295 @@ +import itertools + +from numba.core import types +from numba.core.typeconv.typeconv import TypeManager, TypeCastingRules +from numba.core.typeconv import rules +from numba.core.typeconv import castgraph, Conversion +import unittest + + +class CompatibilityTestMixin(unittest.TestCase): + + def check_number_compatibility(self, check_compatible): + b = types.boolean + i8 = types.int8 + i16 = types.int16 + i32 = types.int32 + i64 = types.int64 + u8 = types.uint8 + u16 = types.uint16 + u32 = types.uint32 + u64 = types.uint64 + f16 = types.float16 + f32 = types.float32 + f64 = types.float64 + c64 = types.complex64 + c128 = types.complex128 + + self.assertEqual(check_compatible(i32, i32), Conversion.exact) + + self.assertEqual(check_compatible(b, i8), Conversion.safe) + self.assertEqual(check_compatible(b, u8), Conversion.safe) + self.assertEqual(check_compatible(i8, b), Conversion.unsafe) + self.assertEqual(check_compatible(u8, b), Conversion.unsafe) + + self.assertEqual(check_compatible(i32, i64), Conversion.promote) + self.assertEqual(check_compatible(i32, u32), Conversion.unsafe) + self.assertEqual(check_compatible(u32, i32), Conversion.unsafe) + self.assertEqual(check_compatible(u32, i64), Conversion.safe) + + self.assertEqual(check_compatible(i16, f16), Conversion.unsafe) + self.assertEqual(check_compatible(i32, f32), Conversion.unsafe) + self.assertEqual(check_compatible(u32, f32), Conversion.unsafe) + self.assertEqual(check_compatible(i32, f64), Conversion.safe) + self.assertEqual(check_compatible(u32, f64), Conversion.safe) + # Note this is inconsistent with i32 -> f32... + self.assertEqual(check_compatible(i64, f64), Conversion.safe) + self.assertEqual(check_compatible(u64, f64), Conversion.safe) + + self.assertEqual(check_compatible(f32, c64), Conversion.safe) + self.assertEqual(check_compatible(f64, c128), Conversion.safe) + self.assertEqual(check_compatible(f64, c64), Conversion.unsafe) + + # Propagated compatibility relationships + self.assertEqual(check_compatible(i16, f64), Conversion.safe) + self.assertEqual(check_compatible(i16, i64), Conversion.promote) + self.assertEqual(check_compatible(i32, c64), Conversion.unsafe) + self.assertEqual(check_compatible(i32, c128), Conversion.safe) + self.assertEqual(check_compatible(i32, u64), Conversion.unsafe) + + for ta, tb in itertools.product(types.number_domain, + types.number_domain): + if ta in types.complex_domain and tb not in types.complex_domain: + continue + self.assertTrue(check_compatible(ta, tb) is not None, + msg="No cast from %s to %s" % (ta, tb)) + + +class TestTypeConv(CompatibilityTestMixin, unittest.TestCase): + + def test_typeconv(self): + tm = TypeManager() + + i32 = types.int32 + i64 = types.int64 + f32 = types.float32 + + tm.set_promote(i32, i64) + tm.set_unsafe_convert(i32, f32) + + sig = (i32, f32) + ovs = [ + (i32, i32), + (f32, f32), + (i64, i64), + ] + + # allow_unsafe = True => a conversion from i32 to f32 is chosen + sel = tm.select_overload(sig, ovs, True, False) + self.assertEqual(sel, 1) + # allow_unsafe = False => no overload available + with self.assertRaises(TypeError): + sel = tm.select_overload(sig, ovs, False, False) + + def test_default_rules(self): + tm = rules.default_type_manager + self.check_number_compatibility(tm.check_compatible) + + def test_overload1(self): + tm = rules.default_type_manager + + i32 = types.int32 + i64 = types.int64 + + sig = (i64, i32, i32) + ovs = [ + (i32, i32, i32), + (i64, i64, i64), + ] + # The first overload is unsafe, the second is safe => the second + # is always chosen, regardless of allow_unsafe. + self.assertEqual(tm.select_overload(sig, ovs, True, False), 1) + self.assertEqual(tm.select_overload(sig, ovs, False, False), 1) + + def test_overload2(self): + tm = rules.default_type_manager + + i16 = types.int16 + i32 = types.int32 + i64 = types.int64 + + sig = (i32, i16, i32) + ovs = [ + # Three promotes + (i64, i64, i64), + # One promotes, two exact types + (i32, i32, i32), + # Two unsafe converts, one exact type + (i16, i16, i16), + ] + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False, + exact_match_required=False), 1) + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True, + exact_match_required=False), 1) + + # The same in reverse order + ovs.reverse() + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False, + exact_match_required=False), 1) + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True, + exact_match_required=False), 1) + + def test_overload3(self): + # Promotes should be preferred over safe converts + tm = rules.default_type_manager + + i32 = types.int32 + i64 = types.int64 + f64 = types.float64 + + sig = (i32, i32) + ovs = [ + # Two promotes + (i64, i64), + # Two safe converts + (f64, f64), + ] + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False, + exact_match_required=False), 0) + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True, + exact_match_required=False), 0) + + # The same in reverse order + ovs.reverse() + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False, + exact_match_required=False), 1) + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True, + exact_match_required=False), 1) + + def test_overload4(self): + tm = rules.default_type_manager + + i16 = types.int16 + i32 = types.int32 + i64 = types.int64 + f16 = types.float16 + f32 = types.float32 + + sig = (i16, f16, f16) + ovs = [ + # One unsafe, one promote, one exact + (f16, f32, f16), + # Two unsafe, one exact types + (f32, i32, f16), + ] + + self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True, + exact_match_required=False), 0) + + def test_type_casting_rules(self): + tm = TypeManager() + tcr = TypeCastingRules(tm) + + i16 = types.int16 + i32 = types.int32 + i64 = types.int64 + f64 = types.float64 + f32 = types.float32 + f16 = types.float16 + made_up = types.Dummy("made_up") + + tcr.promote_unsafe(i32, i64) + tcr.safe_unsafe(i32, f64) + tcr.promote_unsafe(f32, f64) + tcr.promote_unsafe(f16, f32) + tcr.unsafe_unsafe(i16, f16) + + def base_test(): + # As declared + self.assertEqual(tm.check_compatible(i32, i64), Conversion.promote) + self.assertEqual(tm.check_compatible(i32, f64), Conversion.safe) + self.assertEqual(tm.check_compatible(f16, f32), Conversion.promote) + self.assertEqual(tm.check_compatible(f32, f64), Conversion.promote) + self.assertEqual(tm.check_compatible(i64, i32), Conversion.unsafe) + self.assertEqual(tm.check_compatible(f64, i32), Conversion.unsafe) + self.assertEqual(tm.check_compatible(f64, f32), Conversion.unsafe) + + # Propagated + self.assertEqual(tm.check_compatible(i64, f64), Conversion.unsafe) + self.assertEqual(tm.check_compatible(f64, i64), Conversion.unsafe) + self.assertEqual(tm.check_compatible(i64, f32), Conversion.unsafe) + self.assertEqual(tm.check_compatible(i32, f32), Conversion.unsafe) + self.assertEqual(tm.check_compatible(f32, i32), Conversion.unsafe) + self.assertEqual(tm.check_compatible(i16, f16), Conversion.unsafe) + self.assertEqual(tm.check_compatible(f16, i16), Conversion.unsafe) + + # Test base graph + base_test() + + self.assertIsNone(tm.check_compatible(i64, made_up)) + self.assertIsNone(tm.check_compatible(i32, made_up)) + self.assertIsNone(tm.check_compatible(f32, made_up)) + self.assertIsNone(tm.check_compatible(made_up, f64)) + self.assertIsNone(tm.check_compatible(made_up, i64)) + + # Add new test + tcr.promote(f64, made_up) + tcr.unsafe(made_up, i32) + + # Ensure the graph did not change by adding the new type + base_test() + + # To "made up" type + self.assertEqual(tm.check_compatible(i64, made_up), Conversion.unsafe) + self.assertEqual(tm.check_compatible(i32, made_up), Conversion.safe) + self.assertEqual(tm.check_compatible(f32, made_up), Conversion.promote) + self.assertEqual(tm.check_compatible(made_up, f64), Conversion.unsafe) + self.assertEqual(tm.check_compatible(made_up, i64), Conversion.unsafe) + + def test_castgraph_propagate(self): + saved = [] + + def callback(src, dst, rel): + saved.append((src, dst, rel)) + + tg = castgraph.TypeGraph(callback) + + i32 = types.int32 + i64 = types.int64 + f64 = types.float64 + f32 = types.float32 + + tg.insert_rule(i32, i64, Conversion.promote) + tg.insert_rule(i64, i32, Conversion.unsafe) + + saved.append(None) + + tg.insert_rule(i32, f64, Conversion.safe) + tg.insert_rule(f64, i32, Conversion.unsafe) + + saved.append(None) + + tg.insert_rule(f32, f64, Conversion.promote) + tg.insert_rule(f64, f32, Conversion.unsafe) + + self.assertIn((i32, i64, Conversion.promote), saved[0:2]) + self.assertIn((i64, i32, Conversion.unsafe), saved[0:2]) + self.assertIs(saved[2], None) + + self.assertIn((i32, f64, Conversion.safe), saved[3:7]) + self.assertIn((f64, i32, Conversion.unsafe), saved[3:7]) + self.assertIn((i64, f64, Conversion.unsafe), saved[3:7]) + self.assertIn((i64, f64, Conversion.unsafe), saved[3:7]) + self.assertIs(saved[7], None) + + self.assertIn((f32, f64, Conversion.promote), saved[8:14]) + self.assertIn((f64, f32, Conversion.unsafe), saved[8:14]) + self.assertIn((f32, i32, Conversion.unsafe), saved[8:14]) + self.assertIn((i32, f32, Conversion.unsafe), saved[8:14]) + self.assertIn((f32, i64, Conversion.unsafe), saved[8:14]) + self.assertIn((i64, f32, Conversion.unsafe), saved[8:14]) + self.assertEqual(len(saved[14:]), 0) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typedlist.py b/venv/lib/python3.10/site-packages/numba/tests/test_typedlist.py new file mode 100644 index 0000000000000000000000000000000000000000..79098980158c59ae4930d8b7b885ee04c74aa2d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typedlist.py @@ -0,0 +1,1689 @@ +import sys +import subprocess +from itertools import product +from textwrap import dedent + +import numpy as np + +from numba import config +from numba import njit +from numba import int32, float32, prange, uint8 +from numba.core import types +from numba import typeof +from numba.typed import List, Dict +from numba.core.errors import TypingError +from numba.tests.support import (TestCase, MemoryLeakMixin, override_config, + forbid_codegen, skip_parfors_unsupported) +from numba.core.unsafe.refcount import get_refcount +from numba.experimental import jitclass + + +def to_tl(l): + """ Convert cpython list to typed-list. """ + tl = List.empty_list(int32) + for k in l: + tl.append(k) + return tl + + +class TestTypedList(MemoryLeakMixin, TestCase): + + def test_basic(self): + l = List.empty_list(int32) + # len + self.assertEqual(len(l), 0) + # append + l.append(0) + # len + self.assertEqual(len(l), 1) + # setitem + l.append(0) + l.append(0) + l[0] = 10 + l[1] = 11 + l[2] = 12 + # getitem + self.assertEqual(l[0], 10) + self.assertEqual(l[1], 11) + self.assertEqual(l[2], 12) + self.assertEqual(l[-3], 10) + self.assertEqual(l[-2], 11) + self.assertEqual(l[-1], 12) + # __iter__ + # the default __iter__ from MutableSequence will raise an IndexError + # via __getitem__ and thus leak an exception, so this shouldn't + for i in l: + pass + # contains + self.assertTrue(10 in l) + self.assertFalse(0 in l) + # count + l.append(12) + self.assertEqual(l.count(0), 0) + self.assertEqual(l.count(10), 1) + self.assertEqual(l.count(12), 2) + # pop + self.assertEqual(len(l), 4) + self.assertEqual(l.pop(), 12) + self.assertEqual(len(l), 3) + self.assertEqual(l.pop(1), 11) + self.assertEqual(len(l), 2) + # extend + l.extend((100, 200, 300)) + self.assertEqual(len(l), 5) + self.assertEqual(list(l), [10, 12, 100, 200, 300]) + # insert + l.insert(0, 0) + self.assertEqual(list(l), [0, 10, 12, 100, 200, 300]) + l.insert(3, 13) + self.assertEqual(list(l), [0, 10, 12, 13, 100, 200, 300]) + l.insert(100, 400) + self.assertEqual(list(l), [0, 10, 12, 13, 100, 200, 300, 400]) + # remove + l.remove(0) + l.remove(400) + l.remove(13) + self.assertEqual(list(l), [10, 12, 100, 200, 300]) + # clear + l.clear() + self.assertEqual(len(l), 0) + self.assertEqual(list(l), []) + # reverse + l.extend(tuple(range(10, 20))) + l.reverse() + self.assertEqual(list(l), list(range(10, 20))[::-1]) + # copy + new = l.copy() + self.assertEqual(list(new), list(range(10, 20))[::-1]) + # equal + self.assertEqual(l, new) + # not equal + new[-1] = 42 + self.assertNotEqual(l, new) + # index + self.assertEqual(l.index(15), 4) + + def test_list_extend_refines_on_unicode_type(self): + @njit + def foo(string): + l = List() + l.extend(string) + return l + + for func in (foo, foo.py_func): + for string in ("a", "abc", "\nabc\t"): + self.assertEqual(list(func(string)), list(string)) + + def test_unsigned_access(self): + L = List.empty_list(int32) + ui32_0 = types.uint32(0) + ui32_1 = types.uint32(1) + ui32_2 = types.uint32(2) + + # insert + L.append(types.uint32(10)) + L.append(types.uint32(11)) + L.append(types.uint32(12)) + self.assertEqual(len(L), 3) + + # getitem + self.assertEqual(L[ui32_0], 10) + self.assertEqual(L[ui32_1], 11) + self.assertEqual(L[ui32_2], 12) + + # setitem + L[ui32_0] = 123 + L[ui32_1] = 456 + L[ui32_2] = 789 + self.assertEqual(L[ui32_0], 123) + self.assertEqual(L[ui32_1], 456) + self.assertEqual(L[ui32_2], 789) + + # index + ui32_123 = types.uint32(123) + ui32_456 = types.uint32(456) + ui32_789 = types.uint32(789) + self.assertEqual(L.index(ui32_123), 0) + self.assertEqual(L.index(ui32_456), 1) + self.assertEqual(L.index(ui32_789), 2) + + # delitem + L.__delitem__(ui32_2) + del L[ui32_1] + self.assertEqual(len(L), 1) + self.assertEqual(L[ui32_0], 123) + + # pop + L.append(2) + L.append(3) + L.append(4) + self.assertEqual(len(L), 4) + self.assertEqual(L.pop(), 4) + self.assertEqual(L.pop(ui32_2), 3) + self.assertEqual(L.pop(ui32_1), 2) + self.assertEqual(L.pop(ui32_0), 123) + + def test_dtype(self): + + L = List.empty_list(int32) + self.assertEqual(L._dtype, int32) + + L = List.empty_list(float32) + self.assertEqual(L._dtype, float32) + + @njit + def foo(): + li, lf = List(), List() + li.append(int32(1)) + lf.append(float32(1.0)) + return li._dtype, lf._dtype + + self.assertEqual(foo(), (np.dtype('int32'), np.dtype('float32'))) + self.assertEqual(foo.py_func(), (int32, float32)) + + def test_dtype_raises_exception_on_untyped_list(self): + + with self.assertRaises(RuntimeError) as raises: + L = List() + L._dtype + self.assertIn( + "invalid operation on untyped list", + str(raises.exception), + ) + + @skip_parfors_unsupported + def test_unsigned_prange(self): + @njit(parallel=True) + def foo(a): + r = types.uint64(3) + s = types.uint64(0) + for i in prange(r): + s = s + a[i] + return s + + a = List.empty_list(types.uint64) + a.append(types.uint64(12)) + a.append(types.uint64(1)) + a.append(types.uint64(7)) + self.assertEqual(foo(a), 20) + + def test_compiled(self): + @njit + def producer(): + l = List.empty_list(int32) + l.append(23) + return l + + @njit + def consumer(l): + return l[0] + + l = producer() + val = consumer(l) + self.assertEqual(val, 23) + + def test_getitem_slice(self): + """ Test getitem using a slice. + + This tests suffers from combinatorial explosion, so we parametrize it + and compare results against the regular list in a quasi fuzzing + approach. + + """ + # initialize regular list + rl = list(range(10, 20)) + # initialize typed list + tl = List.empty_list(int32) + for i in range(10, 20): + tl.append(i) + # define the ranges + start_range = list(range(-20, 30)) + stop_range = list(range(-20, 30)) + step_range = [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5] + + # check that they are the same initially + self.assertEqual(rl, list(tl)) + # check that copy by slice works, no start, no stop, no step + self.assertEqual(rl[:], list(tl[:])) + + # start only + for sa in start_range: + self.assertEqual(rl[sa:], list(tl[sa:])) + # stop only + for so in stop_range: + self.assertEqual(rl[:so], list(tl[:so])) + # step only + for se in step_range: + self.assertEqual(rl[::se], list(tl[::se])) + + # start and stop + for sa, so in product(start_range, stop_range): + self.assertEqual(rl[sa:so], list(tl[sa:so])) + # start and step + for sa, se in product(start_range, step_range): + self.assertEqual(rl[sa::se], list(tl[sa::se])) + # stop and step + for so, se in product(stop_range, step_range): + self.assertEqual(rl[:so:se], list(tl[:so:se])) + + # start, stop and step + for sa, so, se in product(start_range, stop_range, step_range): + self.assertEqual(rl[sa:so:se], list(tl[sa:so:se])) + + def test_setitem_slice(self): + """ Test setitem using a slice. + + This tests suffers from combinatorial explosion, so we parametrize it + and compare results against the regular list in a quasi fuzzing + approach. + + """ + + def setup(start=10, stop=20): + # initialize regular list + rl_ = list(range(start, stop)) + # initialize typed list + tl_ = List.empty_list(int32) + # populate typed list + for i in range(start, stop): + tl_.append(i) + # check they are the same + self.assertEqual(rl_, list(tl_)) + return rl_, tl_ + + ### Simple slicing ### + + # assign to itself + rl, tl = setup() + rl[:], tl[:] = rl, tl + self.assertEqual(rl, list(tl)) + + # extend self + rl, tl = setup() + rl[len(rl):], tl[len(tl):] = rl, tl + self.assertEqual(rl, list(tl)) + # prepend self + rl, tl = setup() + rl[:0], tl[:0] = rl, tl + self.assertEqual(rl, list(tl)) + # partial assign to self, with equal length + rl, tl = setup() + rl[3:5], tl[3:5] = rl[6:8], tl[6:8] + self.assertEqual(rl, list(tl)) + # partial assign to self, with larger slice + rl, tl = setup() + rl[3:5], tl[3:5] = rl[6:9], tl[6:9] + self.assertEqual(rl, list(tl)) + # partial assign to self, with smaller slice + rl, tl = setup() + rl[3:5], tl[3:5] = rl[6:7], tl[6:7] + self.assertEqual(rl, list(tl)) + + # extend + rl, tl = setup() + rl[len(rl):] = list(range(110, 120)) + tl[len(tl):] = to_tl(range(110,120)) + self.assertEqual(rl, list(tl)) + # extend empty + rl, tl = setup(0, 0) + rl[len(rl):] = list(range(110, 120)) + tl[len(tl):] = to_tl(range(110,120)) + self.assertEqual(rl, list(tl)) + # extend singleton + rl, tl = setup(0, 1) + rl[len(rl):] = list(range(110, 120)) + tl[len(tl):] = to_tl(range(110,120)) + self.assertEqual(rl, list(tl)) + + # prepend + rl, tl = setup() + rl[:0], tl[:0] = list(range(110, 120)), to_tl(range(110,120)) + self.assertEqual(rl, list(tl)) + # prepend empty + rl, tl = setup(0,0) + rl[:0], tl[:0] = list(range(110, 120)), to_tl(range(110,120)) + self.assertEqual(rl, list(tl)) + # prepend singleton + rl, tl = setup(0,1) + rl[:0], tl[:0] = list(range(110, 120)), to_tl(range(110,120)) + self.assertEqual(rl, list(tl)) + + # simple equal length assignment, just replace + rl, tl = setup() + rl[1:3], tl[1:3] = [100, 200], to_tl([100, 200]) + self.assertEqual(rl, list(tl)) + + # slice for assignment is larger, need to replace and insert + rl, tl = setup() + rl[1:3], tl[1:3] = [100, 200, 300, 400], to_tl([100, 200, 300, 400]) + self.assertEqual(rl, list(tl)) + + # slice for assignment is smaller, need to replace and delete + rl, tl = setup() + rl[1:3], tl[1:3] = [100], to_tl([100]) + self.assertEqual(rl, list(tl)) + + # slice for assignment is smaller and item is empty, need to delete + rl, tl = setup() + rl[1:3], tl[1:3] = [], to_tl([]) + self.assertEqual(rl, list(tl)) + + # Synonym for clear + rl, tl = setup() + rl[:], tl[:] = [], to_tl([]) + self.assertEqual(rl, list(tl)) + + ### Extended slicing ### + + # replace every second element + rl, tl = setup() + rl[::2], tl[::2] = [100,200,300,400,500], to_tl([100,200,300,400,500]) + self.assertEqual(rl, list(tl)) + # replace every second element, backwards + rl, tl = setup() + rl[::-2], tl[::-2] = [100,200,300,400,500], to_tl([100,200,300,400,500]) + self.assertEqual(rl, list(tl)) + + # reverse assign to itself + rl, tl = setup() + rl[::-1], tl[::-1] = rl, tl + self.assertEqual(rl, list(tl)) + + def test_setitem_slice_value_error(self): + self.disable_leak_check() + + tl = List.empty_list(int32) + for i in range(10,20): + tl.append(i) + + assignment = List.empty_list(int32) + for i in range(1, 4): + assignment.append(i) + + with self.assertRaises(ValueError) as raises: + tl[8:3:-1] = assignment + self.assertIn( + "length mismatch for extended slice and sequence", + str(raises.exception), + ) + + def test_delitem_slice(self): + """ Test delitem using a slice. + + This tests suffers from combinatorial explosion, so we parametrize it + and compare results against the regular list in a quasi fuzzing + approach. + + """ + + def setup(start=10, stop=20): + # initialize regular list + rl_ = list(range(start, stop)) + # initialize typed list + tl_ = List.empty_list(int32) + # populate typed list + for i in range(start, stop): + tl_.append(i) + # check they are the same + self.assertEqual(rl_, list(tl_)) + return rl_, tl_ + + # define the ranges + start_range = list(range(-20, 30)) + stop_range = list(range(-20, 30)) + step_range = [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5] + + rl, tl = setup() + # check that they are the same initially + self.assertEqual(rl, list(tl)) + # check that deletion of the whole list by slice works + del rl[:] + del tl[:] + self.assertEqual(rl, list(tl)) + + # start only + for sa in start_range: + rl, tl = setup() + del rl[sa:] + del tl[sa:] + self.assertEqual(rl, list(tl)) + # stop only + for so in stop_range: + rl, tl = setup() + del rl[:so] + del tl[:so] + self.assertEqual(rl, list(tl)) + # step only + for se in step_range: + rl, tl = setup() + del rl[::se] + del tl[::se] + self.assertEqual(rl, list(tl)) + + # start and stop + for sa, so in product(start_range, stop_range): + rl, tl = setup() + del rl[sa:so] + del tl[sa:so] + self.assertEqual(rl, list(tl)) + # start and step + for sa, se in product(start_range, step_range): + rl, tl = setup() + del rl[sa::se] + del tl[sa::se] + self.assertEqual(rl, list(tl)) + # stop and step + for so, se in product(stop_range, step_range): + rl, tl = setup() + del rl[:so:se] + del tl[:so:se] + self.assertEqual(rl, list(tl)) + + # start, stop and step + for sa, so, se in product(start_range, stop_range, step_range): + rl, tl = setup() + del rl[sa:so:se] + del tl[sa:so:se] + self.assertEqual(rl, list(tl)) + + def test_list_create_no_jit_using_empty_list(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + l = List.empty_list(types.int32) + self.assertEqual(type(l), list) + + def test_list_create_no_jit_using_List(self): + with override_config('DISABLE_JIT', True): + with forbid_codegen(): + l = List() + self.assertEqual(type(l), list) + + def test_catch_global_typed_list(self): + from numba.tests.typedlist_usecases import catch_global + + expected_message = ("The use of a ListType[int32] type, assigned to " + "variable 'global_typed_list' in globals, is not " + "supported as globals are considered compile-time " + "constants and there is no known way to compile " + "a ListType[int32] type as a constant.") + with self.assertRaises(TypingError) as raises: + njit(catch_global)() + self.assertIn( + expected_message, + str(raises.exception), + ) + self.disable_leak_check() + + def test_repr(self): + l = List() + expected = "ListType[Undefined]([])" + self.assertEqual(expected, repr(l)) + + l = List([int32(i) for i in (1, 2, 3)]) + expected = "ListType[int32]([1, 2, 3])" + self.assertEqual(expected, repr(l)) + + def test_repr_long_list(self): + l = List(range(1005)) + expected = f"{typeof(l)}([{', '.join(map(str, l))}])" + self.assertEqual(expected, repr(l)) + + def test_repr_long_list_ipython(self): + + # Test repr of long typed Lists in an IPython session + args = ["-m", "IPython", "--quiet", "--quick", "--no-banner", + "--colors=NoColor", "-c"] + base_cmd = [sys.executable] + args + try: + subprocess.check_output(base_cmd + ["--version"]) + except subprocess.CalledProcessError as e: + self.skipTest("ipython not found: return code %d" % e.returncode) + + def run_repr_cmd(repr_cmd_str): + cmd = base_cmd + [repr_cmd_str] + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + out, err = p.communicate() + return err + + l = List(range(1000)) + expected = f"{typeof(l)}([{', '.join(map(str, l))}])" + actual = run_repr_cmd(" ".join( + [ + "import sys;", + "from numba.typed import List;", + "res = repr(List(range(1000)));", + "sys.stderr.write(res);" + ] + )) + self.assertEqual(expected, actual) + + l = List(range(1005)) + # Assert that the long list is truncated + expected = f"{typeof(l)}([{', '.join(map(str, l[:1000]))}, ...])" + actual = run_repr_cmd(" ".join( + [ + "import sys;", + "from numba.typed import List;", + "res = repr(List(range(1005)));", + "sys.stderr.write(res);" + ] + )) + self.assertEqual(expected, actual) + + def test_iter_mutates_self(self): + self.disable_leak_check() + + @njit + def foo(x): + count = 0 + for i in x: + if count > 1: + x.append(2.) + count += 1 + + l = List() + l.append(1.) + l.append(1.) + l.append(1.) + with self.assertRaises(RuntimeError) as raises: + foo(l) + + msg = "list was mutated during iteration" + self.assertIn(msg, str(raises.exception)) + + +class TestNoneType(MemoryLeakMixin, TestCase): + + def test_append_none(self): + @njit + def impl(): + l = List() + l.append(None) + return l + + self.assertEqual(impl.py_func(), impl()) + + def test_len_none(self): + @njit + def impl(): + l = List() + l.append(None) + return len(l) + + self.assertEqual(impl.py_func(), impl()) + + def test_getitem_none(self): + @njit + def impl(): + l = List() + l.append(None) + return l[0] + + self.assertEqual(impl.py_func(), impl()) + + def test_setitem_none(self): + @njit + def impl(): + l = List() + l.append(None) + l[0] = None + return l + + self.assertEqual(impl.py_func(), impl()) + + def test_equals_none(self): + @njit + def impl(): + l = List() + l.append(None) + m = List() + m.append(None) + return l == m, l != m, l < m, l <= m, l > m, l >= m + + self.assertEqual(impl.py_func(), impl()) + + def test_not_equals_none(self): + @njit + def impl(): + l = List() + l.append(None) + m = List() + m.append(1) + return l == m, l != m, l < m, l <= m, l > m, l >= m + + self.assertEqual(impl.py_func(), impl()) + + def test_iter_none(self): + @njit + def impl(): + l = List() + l.append(None) + l.append(None) + l.append(None) + count = 0 + for i in l: + count += 1 + return count + + self.assertEqual(impl.py_func(), impl()) + + def test_none_typed_method_fails(self): + """ Test that unsupported operations on List[None] raise. """ + def generate_function(line1, line2): + context = {} + exec(dedent(""" + from numba.typed import List + def bar(): + lst = List() + {} + {} + """.format(line1, line2)), context) + return njit(context["bar"]) + for line1, line2 in ( + ("lst.append(None)", "lst.pop()"), + ("lst.append(None)", "del lst[0]"), + ("lst.append(None)", "lst.count(None)"), + ("lst.append(None)", "lst.index(None)"), + ("lst.append(None)", "lst.insert(0, None)"), + ("" , "lst.insert(0, None)"), + ("lst.append(None)", "lst.clear()"), + ("lst.append(None)", "lst.copy()"), + ("lst.append(None)", "lst.extend([None])"), + ("", "lst.extend([None])"), + ("lst.append(None)", "lst.remove(None)"), + ("lst.append(None)", "lst.reverse()"), + ("lst.append(None)", "None in lst"), + ): + with self.assertRaises(TypingError) as raises: + foo = generate_function(line1, line2) + foo() + self.assertIn( + "method support for List[None] is limited", + str(raises.exception), + ) + + +class TestAllocation(MemoryLeakMixin, TestCase): + + def test_allocation(self): + # kwarg version + for i in range(16): + tl = List.empty_list(types.int32, allocated=i) + self.assertEqual(tl._allocated(), i) + + # posarg version + for i in range(16): + tl = List.empty_list(types.int32, i) + self.assertEqual(tl._allocated(), i) + + def test_allocation_njit(self): + # kwarg version + @njit + def foo(i): + tl = List.empty_list(types.int32, allocated=i) + return tl._allocated() + + for j in range(16): + self.assertEqual(foo(j), j) + + # posarg version + @njit + def foo(i): + tl = List.empty_list(types.int32, i) + return tl._allocated() + + for j in range(16): + self.assertEqual(foo(j), j) + + def test_growth_and_shrinkage(self): + tl = List.empty_list(types.int32) + growth_before = {0: 0, 4:4, 8:8, 16:16} + growth_after = {0: 4, 4:8, 8:16, 16:25} + for i in range(17): + if i in growth_before: + self.assertEqual(growth_before[i], tl._allocated()) + tl.append(i) + if i in growth_after: + self.assertEqual(growth_after[i], tl._allocated()) + + shrink_before = {17: 25, 12:25, 9:18, 6:12, 4:8, 3:6, 2:5, 1:4} + shrink_after = {17: 25, 12:18, 9:12, 6:8, 4:6, 3:5, 2:4, 1:0} + for i in range(17, 0, -1): + if i in shrink_before: + self.assertEqual(shrink_before[i], tl._allocated()) + tl.pop() + if i in shrink_after: + self.assertEqual(shrink_after[i], tl._allocated()) + + +class TestExtend(MemoryLeakMixin, TestCase): + + def test_extend_other(self): + @njit + def impl(other): + l = List.empty_list(types.int32) + for x in range(10): + l.append(x) + l.extend(other) + return l + + other = List.empty_list(types.int32) + for x in range(10): + other.append(x) + + expected = impl.py_func(other) + got = impl(other) + self.assertEqual(expected, got) + + def test_extend_self(self): + @njit + def impl(): + l = List.empty_list(types.int32) + for x in range(10): + l.append(x) + l.extend(l) + return l + + expected = impl.py_func() + got = impl() + self.assertEqual(expected, got) + + def test_extend_tuple(self): + @njit + def impl(): + l = List.empty_list(types.int32) + for x in range(10): + l.append(x) + l.extend((100,200,300)) + return l + + expected = impl.py_func() + got = impl() + self.assertEqual(expected, got) + + def test_extend_single_value_container(self): + @njit + def impl(): + l = List() + l.extend((100,)) + return l + + expected = impl.py_func() + got = impl() + self.assertEqual(expected, got) + + def test_extend_empty_unrefined(self): + # Extending an unrefined list with an empty iterable doesn't work in a + # jit compiled function as the list remains untyped. + l = List() + ret = l.extend(tuple()) + self.assertIsNone(ret) + self.assertEqual(len(l), 0) + self.assertFalse(l._typed) + + def test_extend_empty_refiend(self): + # Extending a refined list with an empty iterable doesn't work in a + # jit compiled function as the (empty) argument can't be typed + l = List((1,)) + l.extend(tuple()) + self.assertEqual(len(l), 1) + self.assertTrue(l._typed) + + +@njit +def cmp(a, b): + return a < b, a <= b, a == b, a != b, a >= b, a > b + + +class TestComparisons(MemoryLeakMixin, TestCase): + + def _cmp_dance(self, expected, pa, pb, na, nb): + # interpreter with regular list + self.assertEqual(cmp.py_func(pa, pb), expected) + + # interpreter with typed-list + py_got = cmp.py_func(na, nb) + self.assertEqual(py_got, expected) + + # compiled with typed-list + jit_got = cmp(na, nb) + self.assertEqual(jit_got, expected) + + def test_empty_vs_empty(self): + pa, pb = [], [] + na, nb = to_tl(pa), to_tl(pb) + expected = False, True, True, False, True, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_empty_vs_singleton(self): + pa, pb = [], [0] + na, nb = to_tl(pa), to_tl(pb) + expected = True, True, False, True, False, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_singleton_vs_empty(self): + pa, pb = [0], [] + na, nb = to_tl(pa), to_tl(pb) + expected = False, False, False, True, True, True + self._cmp_dance(expected, pa, pb, na, nb) + + def test_singleton_vs_singleton_equal(self): + pa, pb = [0], [0] + na, nb = to_tl(pa), to_tl(pb) + expected = False, True, True, False, True, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_singleton_vs_singleton_less_than(self): + pa, pb = [0], [1] + na, nb = to_tl(pa), to_tl(pb) + expected = True, True, False, True, False, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_singleton_vs_singleton_greater_than(self): + pa, pb = [1], [0] + na, nb = to_tl(pa), to_tl(pb) + expected = False, False, False, True, True, True + self._cmp_dance(expected, pa, pb, na, nb) + + def test_equal(self): + pa, pb = [1, 2, 3], [1, 2, 3] + na, nb = to_tl(pa), to_tl(pb) + expected = False, True, True, False, True, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_first_shorter(self): + pa, pb = [1, 2], [1, 2, 3] + na, nb = to_tl(pa), to_tl(pb) + expected = True, True, False, True, False, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_second_shorter(self): + pa, pb = [1, 2, 3], [1, 2] + na, nb = to_tl(pa), to_tl(pb) + expected = False, False, False, True, True, True + self._cmp_dance(expected, pa, pb, na, nb) + + def test_first_less_than(self): + pa, pb = [1, 2, 2], [1, 2, 3] + na, nb = to_tl(pa), to_tl(pb) + expected = True, True, False, True, False, False + self._cmp_dance(expected, pa, pb, na, nb) + + def test_first_greater_than(self): + pa, pb = [1, 2, 3], [1, 2, 2] + na, nb = to_tl(pa), to_tl(pb) + expected = False, False, False, True, True, True + self._cmp_dance(expected, pa, pb, na, nb) + + def test_equals_non_list(self): + l = to_tl([1, 2, 3]) + self.assertFalse(any(cmp.py_func(l, 1))) + self.assertFalse(any(cmp(l, 1))) + + +class TestListInferred(TestCase): + + def test_simple_refine_append(self): + @njit + def foo(): + l = List() + l.append(1) + return l + + expected = foo.py_func() + got = foo() + self.assertEqual(expected, got) + self.assertEqual(list(got), [1]) + self.assertEqual(typeof(got).item_type, typeof(1)) + + def test_simple_refine_insert(self): + @njit + def foo(): + l = List() + l.insert(0, 1) + return l + + expected = foo.py_func() + got = foo() + self.assertEqual(expected, got) + self.assertEqual(list(got), [1]) + self.assertEqual(typeof(got).item_type, typeof(1)) + + def test_refine_extend_list(self): + @njit + def foo(): + a = List() + b = List() + for i in range(3): + b.append(i) + a.extend(b) + return a + + expected = foo.py_func() + got = foo() + self.assertEqual(expected, got) + self.assertEqual(list(got), [0, 1, 2]) + self.assertEqual(typeof(got).item_type, typeof(1)) + + def test_refine_extend_set(self): + @njit + def foo(): + l = List() + l.extend((0, 1, 2)) + return l + + expected = foo.py_func() + got = foo() + self.assertEqual(expected, got) + self.assertEqual(list(got), [0, 1, 2]) + self.assertEqual(typeof(got).item_type, typeof(1)) + + def test_refine_list_extend_iter(self): + @njit + def foo(): + l = List() + d = Dict() + d[0] = 0 + # d.keys() provides a DictKeysIterableType + l.extend(d.keys()) + return l + + got = foo() + self.assertEqual(0, got[0]) + + +class TestListRefctTypes(MemoryLeakMixin, TestCase): + + def test_str_item(self): + @njit + def foo(): + l = List.empty_list(types.unicode_type) + for s in ("a", "ab", "abc", "abcd"): + l.append(s) + return l + + l = foo() + expected = ["a", "ab", "abc", "abcd"] + for i, s in enumerate(expected): + self.assertEqual(l[i], s) + self.assertEqual(list(l), expected) + # Test insert replacement + l[3] = 'uxyz' + self.assertEqual(l[3], 'uxyz') + # Test list growth + nelem = 100 + for i in range(4, nelem): + l.append(str(i)) + self.assertEqual(l[i], str(i)) + + def test_str_item_refcount_replace(self): + @njit + def foo(): + # use some tricks to make ref-counted unicode + i, j = 'ab', 'c' + a = i + j + m, n = 'zy', 'x' + z = m + n + l = List.empty_list(types.unicode_type) + l.append(a) + # This *should* dec' a and inc' z thus tests that items that are + # replaced are also dec'ed. + l[0] = z + ra, rz = get_refcount(a), get_refcount(z) + return l, ra, rz + + l, ra, rz = foo() + self.assertEqual(l[0], "zyx") + self.assertEqual(ra, 1) + self.assertEqual(rz, 2) + + def test_dict_as_item_in_list(self): + @njit + def foo(): + l = List.empty_list(Dict.empty(int32, int32)) + d = Dict.empty(int32, int32) + d[0] = 1 + # This increments the refcount for d + l.append(d) + return get_refcount(d) + + c = foo() + if config.LLVM_REFPRUNE_PASS: + # Because the pruner cleared all other increfs + self.assertEqual(1, c) + else: + self.assertEqual(2, c) + + def test_dict_as_item_in_list_multi_refcount(self): + @njit + def foo(): + l = List.empty_list(Dict.empty(int32, int32)) + d = Dict.empty(int32, int32) + d[0] = 1 + # This increments the refcount for d, twice + l.append(d) + l.append(d) + return get_refcount(d) + + c = foo() + if config.LLVM_REFPRUNE_PASS: + # Because the pruner cleared all other increfs + self.assertEqual(1, c) + else: + self.assertEqual(3, c) + + def test_list_as_value_in_dict(self): + @njit + def foo(): + d = Dict.empty(int32, List.empty_list(int32)) + l = List.empty_list(int32) + l.append(0) + # This increments the refcount for l + d[0] = l + return get_refcount(l) + + c = foo() + if config.LLVM_REFPRUNE_PASS: + # Because the pruner cleared all other increfs + self.assertEqual(1, c) + else: + self.assertEqual(2, c) + + def test_list_as_item_in_list(self): + nested_type = types.ListType(types.int32) + + @njit + def foo(): + la = List.empty_list(nested_type) + lb = List.empty_list(types.int32) + lb.append(1) + la.append(lb) + return la + + expected = foo.py_func() + got = foo() + self.assertEqual(expected, got) + + def test_array_as_item_in_list(self): + nested_type = types.Array(types.float64, 1, 'C') + + @njit + def foo(): + l = List.empty_list(nested_type) + a = np.zeros((1,)) + l.append(a) + return l + + expected = foo.py_func() + got = foo() + # Need to compare the nested arrays + self.assertTrue(np.all(expected[0] == got[0])) + + def test_array_pop_from_single_value_list(self): + @njit + def foo(): + l = List((np.zeros((1,)),)) + l.pop() + return l + + expected, got = foo.py_func(), foo() + # Need to compare the nested arrays + self.assertEqual(len(expected), 0) + self.assertEqual(len(got), 0) + # FIXME comparison of empty array-typed lists fails + # self.assertEqual(expected, got) + + def test_5264(self): + # Test the reproducer from #5264 and make sure it doesn't segfault + float_array = types.float64[:] + l = List.empty_list(float_array) + l.append(np.ones(3,dtype=np.float64)) + l.pop() + self.assertEqual(0, len(l)) + + def test_jitclass_as_item_in_list(self): + + spec = [ + ('value', int32), # a simple scalar field + ('array', float32[:]), # an array field + ] + + @jitclass(spec) + class Bag(object): + def __init__(self, value): + self.value = value + self.array = np.zeros(value, dtype=np.float32) + + @property + def size(self): + return self.array.size + + def increment(self, val): + for i in range(self.size): + self.array[i] += val + return self.array + + @njit + def foo(): + l = List() + l.append(Bag(21)) + l.append(Bag(22)) + l.append(Bag(23)) + return l + + expected = foo.py_func() + got = foo() + + def bag_equal(one, two): + # jitclasses couldn't override __eq__ at time of writing + self.assertEqual(one.value, two.value) + np.testing.assert_allclose(one.array, two.array) + + [bag_equal(a, b) for a, b in zip(expected, got)] + + def test_4960(self): + # Test the reproducer from #4960 and make sure it doesn't segfault + @jitclass([('value', int32)]) + class Simple(object): + def __init__(self, value): + self.value = value + + @njit + def foo(): + l = List((Simple(23),Simple(24))) + l.pop() + return l + + l = foo() + self.assertEqual(1, len(l)) + + def test_storage_model_mismatch(self): + # https://github.com/numba/numba/issues/4520 + # check for storage model mismatch in refcount ops generation + lst = List() + ref = [ + ("a", True, "a"), + ("b", False, "b"), + ("c", False, "c"), + ] + # populate + for x in ref: + lst.append(x) + # test + for i, x in enumerate(ref): + self.assertEqual(lst[i], ref[i]) + + def test_equals_on_list_with_dict_for_equal_lists(self): + # https://github.com/numba/numba/issues/4879 + a, b = List(), Dict() + b["a"] = 1 + a.append(b) + + c, d = List(), Dict() + d["a"] = 1 + c.append(d) + + self.assertEqual(a, c) + + def test_equals_on_list_with_dict_for_unequal_dicts(self): + # https://github.com/numba/numba/issues/4879 + a, b = List(), Dict() + b["a"] = 1 + a.append(b) + + c, d = List(), Dict() + d["a"] = 2 + c.append(d) + + self.assertNotEqual(a, c) + + def test_equals_on_list_with_dict_for_unequal_lists(self): + # https://github.com/numba/numba/issues/4879 + a, b = List(), Dict() + b["a"] = 1 + a.append(b) + + c, d, e = List(), Dict(), Dict() + d["a"] = 1 + e["b"] = 2 + c.append(d) + c.append(e) + + self.assertNotEqual(a, c) + + +class TestListSort(MemoryLeakMixin, TestCase): + def setUp(self): + super(TestListSort, self).setUp() + np.random.seed(0) + + def make(self, ctor, data): + lst = ctor() + lst.extend(data) + return lst + + def make_both(self, data): + return { + 'py': self.make(list, data), + 'nb': self.make(List, data), + } + + def test_sort_no_args(self): + def udt(lst): + lst.sort() + return lst + + for nelem in [13, 29, 127]: + my_lists = self.make_both(np.random.randint(0, nelem, nelem)) + self.assertEqual(list(udt(my_lists['nb'])), udt(my_lists['py'])) + + def test_sort_all_args(self): + def udt(lst, key, reverse): + lst.sort(key=key, reverse=reverse) + return lst + + possible_keys = [ + lambda x: -x, # negative + lambda x: 1 / (1 + x), # make float + lambda x: (x, -x), # tuple + lambda x: x, # identity + ] + possible_reverse = [True, False] + for key, reverse in product(possible_keys, possible_reverse): + my_lists = self.make_both(np.random.randint(0, 100, 23)) + msg = "case for key={} reverse={}".format(key, reverse) + self.assertEqual( + list(udt(my_lists['nb'], key=key, reverse=reverse)), + udt(my_lists['py'], key=key, reverse=reverse), + msg=msg, + ) + + def test_sort_dispatcher_key(self): + def udt(lst, key): + lst.sort(key=key) + return lst + + my_lists = self.make_both(np.random.randint(0, 100, 31)) + py_key = lambda x: x + 1 + nb_key = njit(lambda x: x + 1) + # test typedlist with jitted function + self.assertEqual( + list(udt(my_lists['nb'], key=nb_key)), + udt(my_lists['py'], key=py_key), + ) + # test typedlist with and without jitted function + self.assertEqual( + list(udt(my_lists['nb'], key=nb_key)), + list(udt(my_lists['nb'], key=py_key)), + ) + + def test_sort_in_jit_w_lambda_key(self): + @njit + def udt(lst): + lst.sort(key=lambda x: -x) + return lst + + lst = self.make(List, np.random.randint(0, 100, 31)) + self.assertEqual(udt(lst), udt.py_func(lst)) + + def test_sort_in_jit_w_global_key(self): + @njit + def keyfn(x): + return -x + + @njit + def udt(lst): + lst.sort(key=keyfn) + return lst + + lst = self.make(List, np.random.randint(0, 100, 31)) + self.assertEqual(udt(lst), udt.py_func(lst)) + + def test_sort_on_arrays(self): + @njit + def foo(lst): + lst.sort(key=lambda arr: np.sum(arr)) + return lst + + arrays = [np.random.random(3) for _ in range(10)] + my_lists = self.make_both(arrays) + self.assertEqual( + list(foo(my_lists['nb'])), + foo.py_func(my_lists['py']), + ) + + +class TestImmutable(MemoryLeakMixin, TestCase): + + def test_is_immutable(self): + @njit + def foo(): + l = List() + l.append(1) + return l._is_mutable() + self.assertTrue(foo()) + self.assertTrue(foo.py_func()) + + def test_make_immutable_is_immutable(self): + @njit + def foo(): + l = List() + l.append(1) + l._make_immutable() + return l._is_mutable() + self.assertFalse(foo()) + self.assertFalse(foo.py_func()) + + def test_length_still_works_when_immutable(self): + @njit + def foo(): + l = List() + l.append(1) + l._make_immutable() + return len(l),l._is_mutable() + length, mutable = foo() + self.assertEqual(length, 1) + self.assertFalse(mutable) + + def test_getitem_still_works_when_immutable(self): + @njit + def foo(): + l = List() + l.append(1) + l._make_immutable() + return l[0], l._is_mutable() + test_item, mutable = foo() + self.assertEqual(test_item, 1) + self.assertFalse(mutable) + + def test_append_fails(self): + self.disable_leak_check() + + @njit + def foo(): + l = List() + l.append(1) + l._make_immutable() + l.append(1) + + for func in (foo, foo.py_func): + with self.assertRaises(ValueError) as raises: + func() + self.assertIn( + 'list is immutable', + str(raises.exception), + ) + + def test_mutation_fails(self): + """ Test that any attempt to mutate an immutable typed list fails. """ + self.disable_leak_check() + + def generate_function(line): + context = {} + exec(dedent(""" + from numba.typed import List + def bar(): + lst = List() + lst.append(1) + lst._make_immutable() + {} + """.format(line)), context) + return njit(context["bar"]) + for line in ("lst.append(0)", + "lst[0] = 0", + "lst.pop()", + "del lst[0]", + "lst.extend((0,))", + "lst.insert(0, 0)", + "lst.clear()", + "lst.reverse()", + "lst.sort()", + ): + foo = generate_function(line) + for func in (foo, foo.py_func): + with self.assertRaises(ValueError) as raises: + func() + self.assertIn( + "list is immutable", + str(raises.exception), + ) + + +class TestGetItemIndexType(MemoryLeakMixin, TestCase): + + def test_indexing_with_uint8(self): + """ Test for reproducer at https://github.com/numba/numba/issues/7250 + """ + @njit + def foo(): + l = List.empty_list(uint8) + for i in range(129): + l.append(uint8(i)) + a = uint8(128) + return l[a] + + self.assertEqual(foo(), 128) + + +class TestListFromIter(MemoryLeakMixin, TestCase): + + def test_simple_iterable_types(self): + """Test all simple iterables that a List can be constructed from.""" + + def generate_function(line): + context = {} + code = dedent(""" + from numba.typed import List + def bar(): + {} + return l + """).format(line) + exec(code, context) + return njit(context["bar"]) + for line in ("l = List([0, 1, 2])", + "l = List(range(3))", + "l = List(List([0, 1, 2]))", + "l = List((0, 1, 2))", + "l = List(set([0, 1, 2]))", + ): + foo = generate_function(line) + cf_received, py_received = foo(), foo.py_func() + for result in (cf_received, py_received): + for i in range(3): + self.assertEqual(i, result[i]) + + def test_unicode(self): + """Test that a List can be created from a unicode string.""" + @njit + def foo(): + l = List("abc") + return l + expected = List() + for i in ("a", "b", "c"): + expected.append(i) + self.assertEqual(foo.py_func(), expected) + self.assertEqual(foo(), expected) + + def test_dict_iters(self): + """Test that a List can be created from Dict iterators.""" + + def generate_function(line): + context = {} + code = dedent(""" + from numba.typed import List, Dict + def bar(): + d = Dict() + d[0], d[1], d[2] = "a", "b", "c" + {} + return l + """).format(line) + exec(code, context) + return njit(context["bar"]) + + def generate_expected(values): + expected = List() + for i in values: + expected.append(i) + return expected + + for line, values in ( + ("l = List(d)", (0, 1, 2)), + ("l = List(d.keys())", (0, 1, 2)), + ("l = List(d.values())", ("a", "b", "c")), + ("l = List(d.items())", ((0, "a"), (1, "b"), (2, "c"))), + ): + foo, expected = generate_function(line), generate_expected(values) + for func in (foo, foo.py_func): + self.assertEqual(func(), expected) + + def test_ndarray_scalar(self): + + @njit + def foo(): + return List(np.ones(3)) + + expected = List() + for i in range(3): + expected.append(1) + + self.assertEqual(expected, foo()) + self.assertEqual(expected, foo.py_func()) + + def test_ndarray_oned(self): + + @njit + def foo(): + return List(np.array(1)) + + expected = List() + expected.append(1) + + self.assertEqual(expected, foo()) + self.assertEqual(expected, foo.py_func()) + + def test_ndarray_twod(self): + + @njit + def foo(x): + return List(x) + + carr = np.array([[1, 2], [3, 4]]) + farr = np.asfortranarray(carr) + aarr = np.arange(8).reshape((2, 4))[:, ::2] + + for layout, arr in zip('CFA', (carr, farr, aarr)): + self.assertEqual(typeof(arr).layout, layout) + expected = List() + expected.append(arr[0, :]) + expected.append(arr[1, :]) + received = foo(arr) + + np.testing.assert_equal(expected[0], received[0]) + np.testing.assert_equal(expected[1], received[1]) + + pyreceived = foo.py_func(arr) + + np.testing.assert_equal(expected[0], pyreceived[0]) + np.testing.assert_equal(expected[1], pyreceived[1]) + + def test_exception_on_plain_int(self): + @njit + def foo(): + l = List(23) + return l + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "List() argument must be iterable", + str(raises.exception), + ) + + with self.assertRaises(TypeError) as raises: + List(23) + self.assertIn( + "List() argument must be iterable", + str(raises.exception), + ) + + def test_exception_on_inhomogeneous_tuple(self): + @njit + def foo(): + l = List((1, 1.0)) + return l + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "List() argument must be iterable", + str(raises.exception), + ) + + with self.assertRaises(TypingError) as raises: + List((1, 1.0)) + # FIXME this bails with a length casting error when we attempt to + # append 1.0 to an int typed list. + + def test_exception_on_too_many_args(self): + @njit + def foo(): + l = List((0, 1, 2), (3, 4, 5)) + return l + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "List() expected at most 1 argument, got 2", + str(raises.exception), + ) + + with self.assertRaises(TypeError) as raises: + List((0, 1, 2), (3, 4, 5)) + self.assertIn( + "List() expected at most 1 argument, got 2", + str(raises.exception), + ) + + @njit + def foo(): + l = List((0, 1, 2), (3, 4, 5), (6, 7, 8)) + return l + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "List() expected at most 1 argument, got 3", + str(raises.exception), + ) + + with self.assertRaises(TypeError) as raises: + List((0, 1, 2), (3, 4, 5), (6, 7, 8)) + self.assertIn( + "List() expected at most 1 argument, got 3", + str(raises.exception), + ) + + def test_exception_on_kwargs(self): + @njit + def foo(): + l = List(iterable=(0, 1, 2)) + return l + + with self.assertRaises(TypingError) as raises: + foo() + self.assertIn( + "List() takes no keyword arguments", + str(raises.exception), + ) + + with self.assertRaises(TypeError) as raises: + List(iterable=(0, 1, 2)) + self.assertIn( + "List() takes no keyword arguments", + str(raises.exception), + ) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typedobjectutils.py b/venv/lib/python3.10/site-packages/numba/tests/test_typedobjectutils.py new file mode 100644 index 0000000000000000000000000000000000000000..f93d6252db85d83595cbf1ce814b7167a32426d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typedobjectutils.py @@ -0,0 +1,68 @@ +""" +Unit-tests for `typedobjectutils.py` +""" + +import warnings + + +from numba.core import types +from numba.tests.support import TestCase +from numba.typed.typedobjectutils import _sentry_safe_cast + + +class TestTypedObjectUtils(TestCase): + def test_sentry_safe_cast_warnings(self): + warn_cases = [] + warn_cases += [ + # integer cases + (types.int32, types.int16), + (types.int32, types.uint32), + (types.int64, types.uint32), + # float cases + (types.float64, types.float32), + # complex cases + (types.complex128, types.complex64), + # int to float cases + (types.int32, types.float32), + (types.int64, types.float32), + # tuple-of-ints to tuple-of-floats, + (types.Tuple([types.int32]), types.Tuple([types.float32])), + ] + + for fromty, toty in warn_cases: + with self.subTest(fromty=fromty, toty=toty): + with warnings.catch_warnings(record=True) as w: + _sentry_safe_cast(fromty, toty) + self.assertEqual(len(w), 1) + # Make sure the warning is about unsafe cast + self.assertIn( + "unsafe cast from {} to {}".format(fromty, toty), + str(w[0]), + ) + + def test_sentry_safe_cast_no_warn(self): + ok_cases = [] + ok_cases += [ + # integer cases + (types.int32, types.int64), + (types.uint8, types.int32), + # float cases + (types.float32, types.float64), + # complex cases + (types.complex64, types.complex128), + # int to float cases + (types.int32, types.float64), + (types.uint8, types.float32), + # float to complex cases + (types.float32, types.complex128), + (types.float64, types.complex128), + # tuple-of-ints to tuple-of-ints, + (types.Tuple([types.int32]), types.Tuple([types.int64])), + ] + + for fromty, toty in ok_cases: + with self.subTest(fromty=fromty, toty=toty): + with warnings.catch_warnings(record=True) as w: + _sentry_safe_cast(fromty, toty) + # Expect no warnings + self.assertEqual(len(w), 0) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typeguard.py b/venv/lib/python3.10/site-packages/numba/tests/test_typeguard.py new file mode 100644 index 0000000000000000000000000000000000000000..37ab82dd8fc321fe560b87ed10f62fcfba36b277 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typeguard.py @@ -0,0 +1,44 @@ +""" +Tests to ensure that typeguard is working as expected. +This mostly contains negative tests as proof that typeguard can catch errors. +""" +import unittest +from numba.tests.support import TestCase, skip_unless_typeguard + + +def guard_args(val: int): + return + + +def guard_ret(val) -> int: + return val + + +@skip_unless_typeguard +class TestTypeGuard(TestCase): + + def setUp(self): + super().setUp() + import typeguard + # This is a test class invariant but the Numba multiprocesses test + # runner doesn't respect `setUpClass` so just use `setUp`. + # typeguard 3+ uses typeguard.TypeCheckError, 2.x uses TypeError + self._exception_type = getattr(typeguard, 'TypeCheckError', TypeError) + + def test_check_args(self): + with self.assertRaises(self._exception_type): + guard_args(float(1.2)) + + def test_check_ret(self): + with self.assertRaises(self._exception_type): + guard_ret(float(1.2)) + + def test_check_does_not_work_with_inner_func(self): + def guard(val: int) -> int: + return + + guard(float(1.2)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typeinfer.py b/venv/lib/python3.10/site-packages/numba/tests/test_typeinfer.py new file mode 100644 index 0000000000000000000000000000000000000000..067b803201cc8e8c72658e2f6f7fdf12d45867f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typeinfer.py @@ -0,0 +1,936 @@ +import os, sys, subprocess +import dis +import itertools + +import numpy as np + +import numba +from numba import jit, njit +from numba.core import errors, ir, types, typing, typeinfer, utils +from numba.core.typeconv import Conversion +from numba.extending import overload_method + +from numba.tests.support import TestCase, tag +from numba.tests.test_typeconv import CompatibilityTestMixin +from numba.core.untyped_passes import TranslateByteCode, IRProcessing +from numba.core.typed_passes import PartialTypeInference +from numba.core.compiler_machinery import FunctionPass, register_pass +import unittest + + +i8 = types.int8 +i16 = types.int16 +i32 = types.int32 +i64 = types.int64 +u8 = types.uint8 +u16 = types.uint16 +u32 = types.uint32 +u64 = types.uint64 +f32 = types.float32 +f64 = types.float64 +c64 = types.complex64 +c128 = types.complex128 + +skip_unless_load_fast_and_clear = unittest.skipUnless( + "LOAD_FAST_AND_CLEAR" in dis.opmap, + "Requires LOAD_FAST_AND_CLEAR opcode", +) + + +class TestArgRetCasting(unittest.TestCase): + def test_arg_ret_casting(self): + def foo(x): + return x + + args = (i32,) + return_type = f32 + cfunc = njit(return_type(*args))(foo) + cres = cfunc.overloads[args] + self.assertTrue(isinstance(cfunc(123), float)) + self.assertEqual(cres.signature.args, args) + self.assertEqual(cres.signature.return_type, return_type) + + def test_arg_ret_mismatch(self): + def foo(x): + return x + + args = (types.Array(i32, 1, 'C'),) + return_type = f32 + try: + njit(return_type(*args))(foo) + except errors.TypingError as e: + pass + else: + self.fail("Should complain about array casting to float32") + + def test_invalid_arg_type_forcing(self): + def foo(iters): + a = range(iters) + return iters + + args = (u32,) + return_type = u8 + cfunc = njit(return_type(*args))(foo) + cres = cfunc.overloads[args] + typemap = cres.type_annotation.typemap + # Argument "iters" must be uint32 + self.assertEqual(typemap['iters'], u32) + + +class TestUnify(unittest.TestCase): + """ + Tests for type unification with a typing context. + """ + + int_unify = { + ('uint8', 'uint8'): 'uint8', + ('int8', 'int8'): 'int8', + ('uint16', 'uint16'): 'uint16', + ('int16', 'int16'): 'int16', + ('uint32', 'uint32'): 'uint32', + ('int32', 'int32'): 'int32', + ('uint64', 'uint64'): 'uint64', + ('int64', 'int64'): 'int64', + + ('int8', 'uint8'): 'int16', + ('int8', 'uint16'): 'int32', + ('int8', 'uint32'): 'int64', + + ('uint8', 'int32'): 'int32', + ('uint8', 'uint64'): 'uint64', + + ('int16', 'int8'): 'int16', + ('int16', 'uint8'): 'int16', + ('int16', 'uint16'): 'int32', + ('int16', 'uint32'): 'int64', + ('int16', 'int64'): 'int64', + ('int16', 'uint64'): 'float64', + + ('uint16', 'uint8'): 'uint16', + ('uint16', 'uint32'): 'uint32', + ('uint16', 'int32'): 'int32', + ('uint16', 'uint64'): 'uint64', + + ('int32', 'int8'): 'int32', + ('int32', 'int16'): 'int32', + ('int32', 'uint32'): 'int64', + ('int32', 'int64'): 'int64', + + ('uint32', 'uint8'): 'uint32', + ('uint32', 'int64'): 'int64', + ('uint32', 'uint64'): 'uint64', + + ('int64', 'int8'): 'int64', + ('int64', 'uint8'): 'int64', + ('int64', 'uint16'): 'int64', + + ('uint64', 'int8'): 'float64', + ('uint64', 'int32'): 'float64', + ('uint64', 'int64'): 'float64', + } + + def assert_unify(self, aty, bty, expected): + ctx = typing.Context() + template = "{0}, {1} -> {2} != {3}" + for unify_func in ctx.unify_types, ctx.unify_pairs: + unified = unify_func(aty, bty) + self.assertEqual(unified, expected, + msg=template.format(aty, bty, unified, expected)) + unified = unify_func(bty, aty) + self.assertEqual(unified, expected, + msg=template.format(bty, aty, unified, expected)) + + def assert_unify_failure(self, aty, bty): + self.assert_unify(aty, bty, None) + + def test_integer(self): + ctx = typing.Context() + for aty, bty in itertools.product(types.integer_domain, + types.integer_domain): + key = (str(aty), str(bty)) + try: + expected = self.int_unify[key] + except KeyError: + expected = self.int_unify[key[::-1]] + self.assert_unify(aty, bty, getattr(types, expected)) + + def test_bool(self): + aty = types.boolean + for bty in types.integer_domain: + self.assert_unify(aty, bty, bty) + # Not sure about this one, but it respects transitivity + for cty in types.real_domain: + self.assert_unify(aty, cty, cty) + + def unify_number_pair_test(self, n): + """ + Test all permutations of N-combinations of numeric types and ensure + that the order of types in the sequence is irrelevant. + """ + ctx = typing.Context() + for tys in itertools.combinations(types.number_domain, n): + res = [ctx.unify_types(*comb) + for comb in itertools.permutations(tys)] + first_result = res[0] + # Sanity check + self.assertIsInstance(first_result, types.Number) + # All results must be equal + for other in res[1:]: + self.assertEqual(first_result, other) + + def test_unify_number_pair(self): + self.unify_number_pair_test(2) + self.unify_number_pair_test(3) + + def test_none_to_optional(self): + """ + Test unification of `none` and multiple number types to optional type + """ + ctx = typing.Context() + for tys in itertools.combinations(types.number_domain, 2): + # First unify without none, to provide the control value + tys = list(tys) + expected = types.Optional(ctx.unify_types(*tys)) + results = [ctx.unify_types(*comb) + for comb in itertools.permutations(tys + [types.none])] + # All results must be equal + for res in results: + self.assertEqual(res, expected) + + def test_none(self): + aty = types.none + bty = types.none + self.assert_unify(aty, bty, types.none) + + def test_optional(self): + aty = types.Optional(i32) + bty = types.none + self.assert_unify(aty, bty, aty) + aty = types.Optional(i32) + bty = types.Optional(i64) + self.assert_unify(aty, bty, bty) + aty = types.Optional(i32) + bty = i64 + self.assert_unify(aty, bty, types.Optional(i64)) + # Failure + aty = types.Optional(i32) + bty = types.Optional(types.slice3_type) + self.assert_unify_failure(aty, bty) + + def test_tuple(self): + aty = types.UniTuple(i32, 3) + bty = types.UniTuple(i64, 3) + self.assert_unify(aty, bty, types.UniTuple(i64, 3)) + # (Tuple, UniTuple) -> Tuple + aty = types.UniTuple(i32, 2) + bty = types.Tuple((i16, i64)) + self.assert_unify(aty, bty, types.Tuple((i32, i64))) + aty = types.UniTuple(i64, 0) + bty = types.Tuple(()) + self.assert_unify(aty, bty, bty) + # (Tuple, Tuple) -> Tuple + aty = types.Tuple((i8, i16, i32)) + bty = types.Tuple((i32, i16, i8)) + self.assert_unify(aty, bty, types.Tuple((i32, i16, i32))) + aty = types.Tuple((i8, i32)) + bty = types.Tuple((i32, i8)) + self.assert_unify(aty, bty, types.Tuple((i32, i32))) + aty = types.Tuple((i8, i16)) + bty = types.Tuple((i16, i8)) + self.assert_unify(aty, bty, types.Tuple((i16, i16))) + # Different number kinds + aty = types.UniTuple(f64, 3) + bty = types.UniTuple(c64, 3) + self.assert_unify(aty, bty, types.UniTuple(c128, 3)) + # Tuples of tuples + aty = types.UniTuple(types.Tuple((u32, f32)), 2) + bty = types.UniTuple(types.Tuple((i16, f32)), 2) + self.assert_unify(aty, bty, + types.UniTuple(types.Tuple((i64, f32)), 2)) + # Failures + aty = types.UniTuple(i32, 1) + bty = types.UniTuple(types.slice3_type, 1) + self.assert_unify_failure(aty, bty) + aty = types.UniTuple(i32, 1) + bty = types.UniTuple(i32, 2) + self.assert_unify_failure(aty, bty) + aty = types.Tuple((i8, types.slice3_type)) + bty = types.Tuple((i32, i8)) + self.assert_unify_failure(aty, bty) + + def test_optional_tuple(self): + # Unify to optional tuple + aty = types.none + bty = types.UniTuple(i32, 2) + self.assert_unify(aty, bty, types.Optional(types.UniTuple(i32, 2))) + aty = types.Optional(types.UniTuple(i16, 2)) + bty = types.UniTuple(i32, 2) + self.assert_unify(aty, bty, types.Optional(types.UniTuple(i32, 2))) + # Unify to tuple of optionals + aty = types.Tuple((types.none, i32)) + bty = types.Tuple((i16, types.none)) + self.assert_unify(aty, bty, types.Tuple((types.Optional(i16), + types.Optional(i32)))) + aty = types.Tuple((types.Optional(i32), i64)) + bty = types.Tuple((i16, types.Optional(i8))) + self.assert_unify(aty, bty, types.Tuple((types.Optional(i32), + types.Optional(i64)))) + + def test_arrays(self): + aty = types.Array(i32, 3, "C") + bty = types.Array(i32, 3, "A") + self.assert_unify(aty, bty, bty) + aty = types.Array(i32, 3, "C") + bty = types.Array(i32, 3, "F") + self.assert_unify(aty, bty, types.Array(i32, 3, "A")) + aty = types.Array(i32, 3, "C") + bty = types.Array(i32, 3, "C", readonly=True) + self.assert_unify(aty, bty, bty) + aty = types.Array(i32, 3, "A") + bty = types.Array(i32, 3, "C", readonly=True) + self.assert_unify(aty, bty, + types.Array(i32, 3, "A", readonly=True)) + # Failures + aty = types.Array(i32, 2, "C") + bty = types.Array(i32, 3, "C") + self.assert_unify_failure(aty, bty) + aty = types.Array(i32, 2, "C") + bty = types.Array(u32, 2, "C") + self.assert_unify_failure(aty, bty) + + def test_list(self): + aty = types.List(types.undefined) + bty = types.List(i32) + self.assert_unify(aty, bty, bty) + aty = types.List(i16) + bty = types.List(i32) + self.assert_unify(aty, bty, bty) + aty = types.List(types.Tuple([i32, i16])) + bty = types.List(types.Tuple([i16, i64])) + cty = types.List(types.Tuple([i32, i64])) + self.assert_unify(aty, bty, cty) + # Different reflections + aty = types.List(i16, reflected=True) + bty = types.List(i32) + cty = types.List(i32, reflected=True) + self.assert_unify(aty, bty, cty) + # Incompatible dtypes + aty = types.List(i16) + bty = types.List(types.Tuple([i16])) + self.assert_unify_failure(aty, bty) + + def test_set(self): + # Different reflections + aty = types.Set(i16, reflected=True) + bty = types.Set(i32) + cty = types.Set(i32, reflected=True) + self.assert_unify(aty, bty, cty) + # Incompatible dtypes + aty = types.Set(i16) + bty = types.Set(types.Tuple([i16])) + self.assert_unify_failure(aty, bty) + + def test_range(self): + aty = types.range_state32_type + bty = types.range_state64_type + self.assert_unify(aty, bty, bty) + + +class TestTypeConversion(CompatibilityTestMixin, unittest.TestCase): + """ + Test for conversion between types with a typing context. + """ + + def assert_can_convert(self, aty, bty, expected): + ctx = typing.Context() + got = ctx.can_convert(aty, bty) + self.assertEqual(got, expected) + + def assert_cannot_convert(self, aty, bty): + ctx = typing.Context() + got = ctx.can_convert(aty, bty) + self.assertIsNone(got) + + def test_convert_number_types(self): + # Check that Context.can_convert() is compatible with the default + # number conversion rules registered in the typeconv module + # (which is used internally by the C _Dispatcher object). + ctx = typing.Context() + self.check_number_compatibility(ctx.can_convert) + + def test_tuple(self): + # UniTuple -> UniTuple + aty = types.UniTuple(i32, 3) + bty = types.UniTuple(i64, 3) + self.assert_can_convert(aty, aty, Conversion.exact) + self.assert_can_convert(aty, bty, Conversion.promote) + aty = types.UniTuple(i32, 3) + bty = types.UniTuple(f64, 3) + self.assert_can_convert(aty, bty, Conversion.safe) + # Tuple -> Tuple + aty = types.Tuple((i32, i32)) + bty = types.Tuple((i32, i64)) + self.assert_can_convert(aty, bty, Conversion.promote) + # UniTuple <-> Tuple + aty = types.UniTuple(i32, 2) + bty = types.Tuple((i32, i64)) + self.assert_can_convert(aty, bty, Conversion.promote) + self.assert_can_convert(bty, aty, Conversion.unsafe) + # Empty tuples + aty = types.UniTuple(i64, 0) + bty = types.UniTuple(i32, 0) + cty = types.Tuple(()) + self.assert_can_convert(aty, bty, Conversion.safe) + self.assert_can_convert(bty, aty, Conversion.safe) + self.assert_can_convert(aty, cty, Conversion.safe) + self.assert_can_convert(cty, aty, Conversion.safe) + # Failures + aty = types.UniTuple(i64, 3) + bty = types.UniTuple(types.none, 3) + self.assert_cannot_convert(aty, bty) + aty = types.UniTuple(i64, 2) + bty = types.UniTuple(i64, 3) + + def test_arrays(self): + # Different layouts + aty = types.Array(i32, 3, "C") + bty = types.Array(i32, 3, "A") + self.assert_can_convert(aty, bty, Conversion.safe) + aty = types.Array(i32, 2, "C") + bty = types.Array(i32, 2, "F") + self.assert_cannot_convert(aty, bty) + # Different mutabilities + aty = types.Array(i32, 3, "C") + bty = types.Array(i32, 3, "C", readonly=True) + self.assert_can_convert(aty, aty, Conversion.exact) + self.assert_can_convert(bty, bty, Conversion.exact) + self.assert_can_convert(aty, bty, Conversion.safe) + self.assert_cannot_convert(bty, aty) + # Various failures + aty = types.Array(i32, 2, "C") + bty = types.Array(i32, 3, "C") + self.assert_cannot_convert(aty, bty) + aty = types.Array(i32, 2, "C") + bty = types.Array(i64, 2, "C") + self.assert_cannot_convert(aty, bty) + + def test_optional(self): + aty = types.int32 + bty = types.Optional(i32) + self.assert_can_convert(types.none, bty, Conversion.promote) + self.assert_can_convert(aty, bty, Conversion.promote) + self.assert_cannot_convert(bty, types.none) + self.assert_can_convert(bty, aty, Conversion.safe) # XXX ??? + # Optional array + aty = types.Array(i32, 2, "C") + bty = types.Optional(aty) + self.assert_can_convert(types.none, bty, Conversion.promote) + self.assert_can_convert(aty, bty, Conversion.promote) + self.assert_can_convert(bty, aty, Conversion.safe) + aty = types.Array(i32, 2, "C") + bty = types.Optional(aty.copy(layout="A")) + self.assert_can_convert(aty, bty, Conversion.safe) # C -> A + self.assert_cannot_convert(bty, aty) # A -> C + aty = types.Array(i32, 2, "C") + bty = types.Optional(aty.copy(layout="F")) + self.assert_cannot_convert(aty, bty) + self.assert_cannot_convert(bty, aty) + + +class TestResolveOverload(unittest.TestCase): + """ + Tests for typing.Context.resolve_overload(). + """ + + def assert_resolve_overload(self, cases, args, expected): + ctx = typing.Context() + got = ctx.resolve_overload("foo", cases, args, {}) + self.assertEqual(got, expected) + + def test_non_ambiguous_match(self): + def check(args, expected): + self.assert_resolve_overload(cases, args, expected) + # Order shouldn't matter here + self.assert_resolve_overload(cases[::-1], args, expected) + + cases = [i8(i8, i8), i32(i32, i32), f64(f64, f64)] + # Exact match + check((i8, i8), cases[0]) + check((i32, i32), cases[1]) + check((f64, f64), cases[2]) + # "Promote" conversion + check((i8, i16), cases[1]) + check((i32, i8), cases[1]) + check((i32, i8), cases[1]) + check((f32, f32), cases[2]) + # "Safe" conversion + check((u32, u32), cases[2]) + # "Unsafe" conversion + check((i64, i64), cases[2]) + + def test_ambiguous_match(self): + # When the best match is ambiguous (there is a tie), the first + # best case in original sequence order should be returned. + def check(args, expected, expected_reverse): + self.assert_resolve_overload(cases, args, expected) + self.assert_resolve_overload(cases[::-1], args, expected_reverse) + + cases = [i16(i16, i16), i32(i32, i32), f64(f64, f64)] + # Two "promote" conversions + check((i8, i8), cases[0], cases[1]) + # Two "safe" conversions + check((u16, u16), cases[1], cases[2]) + + cases = [i32(i32, i32), f32(f32, f32)] + # Two "unsafe" conversions + check((u32, u32), cases[0], cases[1]) + + def test_ambiguous_error(self): + ctx = typing.Context() + cases = [i16(i16, i16), i32(i32, i32)] + with self.assertRaises(TypeError) as raises: + ctx.resolve_overload("foo", cases, (i8, i8), {}, + allow_ambiguous=False) + self.assertEqual(str(raises.exception).splitlines(), + ["Ambiguous overloading for foo (int8, int8):", + "(int16, int16) -> int16", + "(int32, int32) -> int32", + ]) + + +class TestUnifyUseCases(unittest.TestCase): + """ + Concrete cases where unification would fail. + """ + + @staticmethod + def _actually_test_complex_unify(): + def pyfunc(a): + res = 0.0 + for i in range(len(a)): + res += a[i] + return res + + argtys = (types.Array(c128, 1, 'C'),) + cfunc = njit(argtys)(pyfunc) + return (pyfunc, cfunc) + + def test_complex_unify_issue599(self): + pyfunc, cfunc = self._actually_test_complex_unify() + arg = np.array([1.0j]) + self.assertEqual(cfunc(arg), pyfunc(arg)) + + def test_complex_unify_issue599_multihash(self): + """ + Test issue #599 for multiple values of PYTHONHASHSEED. + """ + env = os.environ.copy() + for seedval in (1, 2, 1024): + env['PYTHONHASHSEED'] = str(seedval) + subproc = subprocess.Popen( + [sys.executable, '-c', + 'import numba.tests.test_typeinfer as test_mod\n' + + 'test_mod.TestUnifyUseCases._actually_test_complex_unify()'], + env=env) + subproc.wait() + self.assertEqual(subproc.returncode, 0, 'Child process failed.') + + def test_int_tuple_unify(self): + """ + Test issue #493 + """ + def foo(an_int32, an_int64): + a = an_int32, an_int32 + while True: # infinite loop + a = an_int32, an_int64 + return a + + args = (i32, i64) + # Check if compilation is successful + njit(args)(foo) + + +def issue_797(x0, y0, x1, y1, grid): + nrows, ncols = grid.shape + + dx = abs(x1 - x0) + dy = abs(y1 - y0) + + sx = 0 + if x0 < x1: + sx = 1 + else: + sx = -1 + sy = 0 + if y0 < y1: + sy = 1 + else: + sy = -1 + + err = dx - dy + + while True: + if x0 == x1 and y0 == y1: + break + + if 0 <= x0 < nrows and 0 <= y0 < ncols: + grid[x0, y0] += 1 + + e2 = 2 * err + if e2 > -dy: + err -= dy + x0 += sx + if e2 < dx: + err += dx + y0 += sy + + +def issue_1080(a, b): + if not a: + return True + return b + + +def list_unify_usecase1(n): + res = 0 + x = [] + if n < 10: + x.append(np.int32(n)) + else: + for i in range(n): + x.append(np.int64(i)) + x.append(5.0) + + # Note `i` and `j` may have different types (int64 vs. int32) + for j in range(len(x)): + res += j * x[j] + for val in x: + res += int(val) & len(x) + while len(x) > 0: + res += x.pop() + return res + +def list_unify_usecase2(n): + res = [] + for i in range(n): + if i & 1: + res.append((i, 1.0)) + else: + res.append((2.0, i)) + res.append((123j, 42)) + return res + +def range_unify_usecase(v): + if v: + r = range(np.int32(3)) + else: + r = range(np.int64(5)) + for x in r: + return x + +def issue_1394(a): + if a: + for i in range(a): + a += i + i = 1.2 + else: + i = 3 + return a, i + + +class TestMiscIssues(TestCase): + + def test_issue_797(self): + """https://github.com/numba/numba/issues/797#issuecomment-58592401 + + Undeterministic triggering of tuple coercion error + """ + foo = jit(nopython=True)(issue_797) + g = np.zeros(shape=(10, 10), dtype=np.int32) + foo(np.int32(0), np.int32(0), np.int32(1), np.int32(1), g) + + def test_issue_1080(self): + """https://github.com/numba/numba/issues/1080 + + Erroneous promotion of boolean args to int64 + """ + foo = jit(nopython=True)(issue_1080) + foo(True, False) + + def test_list_unify1(self): + """ + Exercise back-propagation of refined list type. + """ + pyfunc = list_unify_usecase1 + cfunc = jit(nopython=True)(pyfunc) + for n in [5, 100]: + res = cfunc(n) + self.assertPreciseEqual(res, pyfunc(n)) + + def test_list_unify2(self): + pyfunc = list_unify_usecase2 + cfunc = jit(nopython=True)(pyfunc) + res = cfunc(3) + # NOTE: the types will differ (Numba returns a homogeneous list with + # converted values). + self.assertEqual(res, pyfunc(3)) + + def test_range_unify(self): + pyfunc = range_unify_usecase + cfunc = jit(nopython=True)(pyfunc) + for v in (0, 1): + res = cfunc(v) + self.assertPreciseEqual(res, pyfunc(v)) + + def test_issue_1394(self): + pyfunc = issue_1394 + cfunc = jit(nopython=True)(pyfunc) + for v in (0, 1, 2): + res = cfunc(v) + self.assertEqual(res, pyfunc(v)) + + def test_issue_6293(self): + """https://github.com/numba/numba/issues/6293 + + Typer does not propagate return type to all return variables + """ + @jit(nopython=True) + def confuse_typer(x): + if x == x: + return int(x) + else: + return x + + confuse_typer.compile((types.float64,)) + cres = confuse_typer.overloads[(types.float64,)] + typemap = cres.type_annotation.typemap + return_vars = {} + + for block in cres.type_annotation.blocks.values(): + for inst in block.body: + if isinstance(inst, ir.Return): + varname = inst.value.name + return_vars[varname] = typemap[varname] + + self.assertTrue(all(vt == types.float64 for vt in return_vars.values())) + + def test_issue_9162(self): + @overload_method(types.Array, "aabbcc") + def ol_aabbcc(self): + + def impl(self): + return self.sum() + + return impl + + @jit + def foo(ar): + return ar.aabbcc() + + ar = np.ones(2) + ret = foo(ar) + + overload = [value for value in foo.overloads.values()][0] + typemap = overload.type_annotation.typemap + calltypes = overload.type_annotation.calltypes + for call_op in calltypes: + name = call_op.list_vars()[0].name + fc_ty = typemap[name] + self.assertIsInstance(fc_ty, types.BoundFunction) + tmplt = fc_ty.template + info = tmplt.get_template_info(tmplt) + py_file = info["filename"] + self.assertIn("test_typeinfer.py", py_file) + + @skip_unless_load_fast_and_clear + def test_load_fast_and_clear(self): + @njit + def foo(a): + [x for x in (0,)] + if a: + # the test code cannot use a constant here due to constant + # propagation issues. + x = 3 + a + x += 10 + return x + + self.assertEqual(foo(True), foo.py_func(True)) + # Interpreted version should raise an exception + with self.assertRaises(UnboundLocalError): + foo.py_func(False) + # Compiled version returns 10 as x is zero initialized. + self.assertEqual(foo(False), 10) + + @skip_unless_load_fast_and_clear + def test_load_fast_and_clear_variant_2(self): + @njit + def foo(): + # The use of a literal False triggers different bytecode generation + # necessary for this test. See test_load_fast_and_clear_variant_4. + if False: + x = 1 + [x for x in (1,)] + # This return uses undefined variable + return x + + with self.assertRaises(errors.TypingError) as raises: + foo() + self.assertIn("return value is undefined", str(raises.exception)) + + @skip_unless_load_fast_and_clear + def test_load_fast_and_clear_variant_3(self): + @njit + def foo(): + # The use of a literal False triggers different bytecode generation + # necessary for this test. See test_load_fast_and_clear_variant_4. + if False: + x = 1 + [x for x in (1,)] + # This print uses undefined variable + print(1, 2, 3, x) + + with self.assertRaises(errors.TypingError) as raises: + foo() + self.assertIn("undefined variable used in call argument #4", str(raises.exception)) + + @skip_unless_load_fast_and_clear + def test_load_fast_and_clear_variant_4(self): + @njit + def foo(a): + # This test variant is to show that non-literal boolean value here + # produces a different behavior. + if a: + x = a + [x for x in (1,)] + return x + self.assertEqual(foo(123), 123) + self.assertEqual(foo(0), 0) + + +class TestFoldArguments(unittest.TestCase): + def check_fold_arguments_list_inputs(self, func, args, kws): + def make_tuple(*args): + return args + + unused_handler = None + + pysig = utils.pysignature(func) + names = list(pysig.parameters) + + with self.subTest(kind='dict'): + folded_dict = typing.fold_arguments( + pysig, args, kws, make_tuple, unused_handler, unused_handler, + ) + # correct ordering + for i, (j, k) in enumerate(zip(folded_dict, names)): + (got_index, got_param, got_name) = j + self.assertEqual(got_index, i) + self.assertEqual(got_name, f'arg.{k}') + + kws = list(kws.items()) + with self.subTest(kind='list'): + folded_list = typing.fold_arguments( + pysig, args, kws, make_tuple, unused_handler, unused_handler, + ) + self.assertEqual(folded_list, folded_dict) + + def test_fold_arguments_list_inputs(self): + cases = [ + dict( + func=lambda a, b, c, d: None, + args=['arg.a', 'arg.b'], + kws=dict(c='arg.c', d='arg.d') + ), + dict( + func=lambda: None, + args=[], + kws=dict(), + ), + dict( + func=lambda a: None, + args=['arg.a'], + kws={}, + ), + dict( + func=lambda a: None, + args=[], + kws=dict(a='arg.a'), + ), + ] + for case in cases: + with self.subTest(**case): + self.check_fold_arguments_list_inputs(**case) + + +@register_pass(mutates_CFG=False, analysis_only=True) +class DummyCR(FunctionPass): + """Dummy pass to add "cr" to compiler state to avoid errors in TyperCompiler since + it doesn't have lowering. + """ + + _name = "dummy_cr" + + def __init__(self): + FunctionPass.__init__(self) + + def run_pass(self, state): + state.cr = 1 # arbitrary non-None value + return True + + +class TyperCompiler(numba.core.compiler.CompilerBase): + """A compiler pipeline that skips passes after typing (provides partial typing info + but not lowering). + """ + + def define_pipelines(self): + pm = numba.core.compiler_machinery.PassManager("custom_pipeline") + pm.add_pass(TranslateByteCode, "analyzing bytecode") + pm.add_pass(IRProcessing, "processing IR") + pm.add_pass(PartialTypeInference, "do partial typing") + pm.add_pass_after(DummyCR, PartialTypeInference) + pm.finalize() + return [pm] + + +def get_func_typing_errs(func, arg_types): + """ + Get typing errors for function 'func'. It creates a pipeline that runs untyped + passes as well as type inference. + """ + typingctx = numba.core.registry.cpu_target.typing_context + targetctx = numba.core.registry.cpu_target.target_context + library = None + return_type = None + _locals = {} + flags = numba.core.compiler.Flags() + flags.nrt = True + + pipeline = TyperCompiler( + typingctx, targetctx, library, arg_types, return_type, flags, _locals + ) + pipeline.compile_extra(func) + return pipeline.state.typing_errors + + +class TestPartialTypingErrors(unittest.TestCase): + """ + Make sure partial typing stores type errors in compiler state properly + """ + def test_partial_typing_error(self): + # example with type unification error + def impl(flag): + if flag: + a = 1 + else: + a = str(1) + return a + + typing_errs = get_func_typing_errs(impl, (types.bool_,)) + self.assertTrue(isinstance(typing_errs, list) and len(typing_errs) == 1) + self.assertTrue(isinstance(typing_errs[0], errors.TypingError) and + "Cannot unify" in typing_errs[0].msg) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typenames.py b/venv/lib/python3.10/site-packages/numba/tests/test_typenames.py new file mode 100644 index 0000000000000000000000000000000000000000..531a8929747d8d565d71d7594904fc8229d01e0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typenames.py @@ -0,0 +1,17 @@ +import numpy as np + +from numba.core import types +import unittest + + +class TestTypeNames(unittest.TestCase): + def test_numpy_integers(self): + expect = getattr(types, "int%d" % (np.dtype("int").itemsize * 8)) + self.assertEqual(types.int_, expect) + + expect = getattr(types, "uint%d" % (np.dtype("uint").itemsize * 8)) + self.assertEqual(types.uint, expect) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typeof.py b/venv/lib/python3.10/site-packages/numba/tests/test_typeof.py new file mode 100644 index 0000000000000000000000000000000000000000..077af16966037a3edb15f29ad91753669e32e6b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typeof.py @@ -0,0 +1,601 @@ +""" +Tests for the typeof() machinery. +""" +import array +from collections import namedtuple +import enum +import mmap +import typing as py_typing +import random +import string + +import numpy as np + +import unittest +from numba import jit, _dispatcher +from numba.core import types +from numba.core.errors import NumbaValueError, NumbaTypeError +from numba.misc.special import typeof +from numba.core.dispatcher import OmittedArg +from numba._dispatcher import compute_fingerprint + +from numba.tests.support import TestCase, skip_unless_cffi, tag +from numba.tests.test_numpy_support import ValueTypingTestBase +from numba.tests.ctypes_usecases import * +from numba.tests.enum_usecases import * +from numba.np import numpy_support + + +recordtype = np.dtype([('a', np.float64), + ('b', np.int32), + ('c', np.complex64), + ('d', (np.str_, 5))]) + +recordtype2 = np.dtype([('e', np.int8), + ('f', np.float64)]) + +recordtype3 = np.dtype([('e', np.int8), + ('f', np.float64)], align=True) + +Point = namedtuple('Point', ('x', 'y')) + +Rect = namedtuple('Rect', ('width', 'height')) + + +class Custom(object): + + @property + def _numba_type_(self): + """ + Magic attribute expected by Numba to get the numba type that + represents this object. + """ + return types.UniTuple(types.boolean, 42) + + +class TestTypeof(ValueTypingTestBase, TestCase): + """ + Test typeof() and, implicitly, typing.Context.get_argument_type(). + """ + + def test_number_values(self): + """ + Test special.typeof() with scalar number values. + """ + self.check_number_values(typeof) + # These values mirror Dispatcher semantics + self.assertEqual(typeof(1), types.intp) + self.assertEqual(typeof(-1), types.intp) + self.assertEqual(typeof(2**40), types.int64) + self.assertEqual(typeof(2**63), types.uint64) + self.assertEqual(typeof(2**63 - 1), types.int64) + self.assertEqual(typeof(-2**63), types.int64) + + def test_datetime_values(self): + """ + Test special.typeof() with np.timedelta64 values. + """ + self.check_datetime_values(typeof) + + def test_timedelta_values(self): + """ + Test special.typeof() with np.timedelta64 values. + """ + self.check_timedelta_values(typeof) + + def test_array_values(self): + """ + Test special.typeof() with ndarray values. + """ + def check(arr, ndim, layout, mutable, aligned): + ty = typeof(arr) + self.assertIsInstance(ty, types.Array) + self.assertEqual(ty.ndim, ndim) + self.assertEqual(ty.layout, layout) + self.assertEqual(ty.mutable, mutable) + self.assertEqual(ty.aligned, aligned) + + a1 = np.arange(10) + check(a1, 1, 'C', True, True) + a2 = np.arange(10).reshape(2, 5) + check(a2, 2, 'C', True, True) + check(a2.T, 2, 'F', True, True) + a3 = (np.arange(60))[::2].reshape((2, 5, 3)) + check(a3, 3, 'A', True, True) + a4 = np.arange(1).reshape(()) + check(a4, 0, 'C', True, True) + a4.flags.writeable = False + check(a4, 0, 'C', False, True) + + # Unsupported dtype + a5 = a1.astype(a1.dtype.newbyteorder()) + with self.assertRaises(NumbaValueError) as raises: + typeof(a5) + self.assertIn("Unsupported array dtype: %s" % (a5.dtype,), + str(raises.exception)) + + # Unsupported array type (masked array) + with self.assertRaises(NumbaTypeError) as raises: + masked_arr = np.ma.MaskedArray([1]) + typeof(masked_arr) + self.assertIn(f"Unsupported array type: numpy.ma.MaskedArray", + str(raises.exception)) + + def test_structured_arrays(self): + def check(arr, dtype, ndim, layout, aligned): + ty = typeof(arr) + self.assertIsInstance(ty, types.Array) + self.assertEqual(ty.dtype, dtype) + self.assertEqual(ty.ndim, ndim) + self.assertEqual(ty.layout, layout) + self.assertEqual(ty.aligned, aligned) + + dtype = np.dtype([('m', np.int32), ('n', 'S5')]) + rec_ty = numpy_support.from_struct_dtype(dtype) + + arr = np.empty(4, dtype=dtype) + check(arr, rec_ty, 1, "C", False) + arr = np.recarray(4, dtype=dtype) + check(arr, rec_ty, 1, "C", False) + + dtype = np.dtype([('m', np.int32), ('n', 'S5')], align=True) + rec_ty = numpy_support.from_struct_dtype(dtype) + + arr = np.empty(4, dtype=dtype) + check(arr, rec_ty, 1, "C", True) + arr = np.recarray(4, dtype=dtype) + check(arr, rec_ty, 1, "C", True) + + def test_buffers(self): + b = b"xx" + ty = typeof(b) + self.assertEqual(ty, types.Bytes(types.uint8, 1, "C")) + self.assertFalse(ty.mutable) + ty = typeof(memoryview(b)) + self.assertEqual(ty, types.MemoryView(types.uint8, 1, "C", + readonly=True)) + self.assertFalse(ty.mutable) + ty = typeof(array.array('i', [0, 1, 2])) + self.assertEqual(ty, types.PyArray(types.int32, 1, "C")) + self.assertTrue(ty.mutable) + + b = bytearray(10) + ty = typeof(b) + self.assertEqual(ty, types.ByteArray(types.uint8, 1, "C")) + self.assertTrue(ty.mutable) + + def test_none(self): + ty = typeof(None) + self.assertEqual(ty, types.none) + + def test_ellipsis(self): + ty = typeof(Ellipsis) + self.assertEqual(ty, types.ellipsis) + + def test_str(self): + ty = typeof("abc") + self.assertEqual(ty, types.string) + + def test_slices(self): + for args in [(1,), (1, 2), (1, 2, 1), (1, 2, None)]: + v = slice(*args) + self.assertIs(typeof(v), types.slice2_type) + for args in [(1, 2, 2), (1, 2, -1), (None, None, -2)]: + v = slice(*args) + self.assertIs(typeof(v), types.slice3_type) + + def test_tuples(self): + v = (1, 2) + self.assertEqual(typeof(v), types.UniTuple(types.intp, 2)) + v = (1, (2.0, 3)) + self.assertEqual(typeof(v), + types.Tuple((types.intp, + types.Tuple((types.float64, types.intp)))) + ) + + def test_lists(self): + v = [1.0] * 100 + self.assertEqual(typeof(v), types.List(types.float64, reflected=True)) + + bad_v = [{1: 3}] + with self.assertRaises(ValueError) as raises: + typeof(bad_v) + self.assertIn("Cannot type list element type", str(raises.exception)) + + def test_sets(self): + v = set([1.0, 2.0, 3.0]) + self.assertEqual(typeof(v), types.Set(types.float64, reflected=True)) + v = frozenset(v) + with self.assertRaises(ValueError) as raises: + typeof(v) + self.assertIn("Cannot determine Numba type of", str(raises.exception)) + + def test_namedtuple(self): + v = Point(1, 2) + tp_point = typeof(v) + self.assertEqual(tp_point, + types.NamedUniTuple(types.intp, 2, Point)) + v = Point(1, 2.0) + self.assertEqual(typeof(v), + types.NamedTuple([types.intp, types.float64], Point)) + w = Rect(3, 4) + tp_rect = typeof(w) + self.assertEqual(tp_rect, + types.NamedUniTuple(types.intp, 2, Rect)) + self.assertNotEqual(tp_rect, tp_point) + self.assertNotEqual(tp_rect, types.UniTuple(tp_rect.dtype, tp_rect.count)) + + def test_enum(self): + tp_red = typeof(Color.red) + self.assertEqual(tp_red, types.EnumMember(Color, types.intp)) + self.assertEqual(tp_red, typeof(Color.blue)) + tp_choc = typeof(Shake.chocolate) + self.assertEqual(tp_choc, types.EnumMember(Shake, types.intp)) + self.assertEqual(tp_choc, typeof(Shake.mint)) + self.assertNotEqual(tp_choc, tp_red) + tp_404 = typeof(RequestError.not_found) + self.assertEqual(tp_404, types.IntEnumMember(RequestError, types.intp)) + self.assertEqual(tp_404, typeof(RequestError.internal_error)) + + with self.assertRaises(ValueError) as raises: + typeof(HeterogeneousEnum.red) + self.assertEqual(str(raises.exception), + "Cannot type heterogeneous enum: got value types complex128, float64") + + def test_enum_class(self): + tp_color = typeof(Color) + self.assertEqual(tp_color, types.EnumClass(Color, types.intp)) + tp_shake = typeof(Shake) + self.assertEqual(tp_shake, types.EnumClass(Shake, types.intp)) + self.assertNotEqual(tp_shake, tp_color) + tp_shape = typeof(Shape) + self.assertEqual(tp_shape, types.IntEnumClass(Shape, types.intp)) + tp_error = typeof(RequestError) + self.assertEqual(tp_error, + types.IntEnumClass(RequestError, types.intp)) + self.assertNotEqual(tp_error, tp_shape) + + with self.assertRaises(ValueError) as raises: + typeof(HeterogeneousEnum) + self.assertEqual(str(raises.exception), + "Cannot type heterogeneous enum: got value types complex128, float64") + + def test_dtype(self): + dtype = np.dtype('int64') + self.assertEqual(typeof(dtype), types.DType(types.int64)) + + dtype = np.dtype([('m', np.int32), ('n', 'S5')]) + rec_ty = numpy_support.from_struct_dtype(dtype) + self.assertEqual(typeof(dtype), types.DType(rec_ty)) + + def test_dtype_values(self): + self.assertEqual(typeof(np.int64), types.NumberClass(types.int64)) + self.assertEqual(typeof(np.float64), types.NumberClass(types.float64)) + self.assertEqual(typeof(np.int32), types.NumberClass(types.int32)) + self.assertEqual(typeof(np.int8), types.NumberClass(types.int8)) + + def test_ctypes(self): + ty_cos = typeof(c_cos) + ty_sin = typeof(c_sin) + self.assertIsInstance(ty_cos, types.ExternalFunctionPointer) + self.assertEqual(ty_cos.sig.args, (types.float64,)) + self.assertEqual(ty_cos.sig.return_type, types.float64) + self.assertEqual(ty_cos, ty_sin) + self.assertNotEqual(ty_cos.get_pointer(c_cos), + ty_sin.get_pointer(c_sin)) + + @skip_unless_cffi + def test_cffi(self): + from numba.tests import cffi_usecases as mod + mod.init() + ty_cffi_cos = typeof(mod.cffi_cos) + ty_cffi_sin = typeof(mod.cffi_sin) + ty_cffi_boolean = typeof(mod.cffi_bool) + self.assertIsInstance(ty_cffi_cos, types.ExternalFunctionPointer) + self.assertEqual(ty_cffi_boolean.sig.return_type, types.boolean) + self.assertEqual(ty_cffi_cos.sig.args, (types.float64,)) + self.assertEqual(ty_cffi_cos.sig.return_type, types.float64) + self.assertEqual(ty_cffi_cos, ty_cffi_sin) + ty_ctypes_cos = typeof(c_cos) + self.assertNotEqual(ty_cffi_cos, ty_ctypes_cos) + self.assertNotEqual(ty_cffi_cos.get_pointer(mod.cffi_cos), + ty_cffi_sin.get_pointer(mod.cffi_sin)) + self.assertEqual(ty_cffi_cos.get_pointer(mod.cffi_cos), + ty_ctypes_cos.get_pointer(c_cos)) + + def test_custom(self): + ty = typeof(Custom()) + self.assertEqual(ty, types.UniTuple(types.boolean, 42)) + + def test_omitted_args(self): + ty0 = typeof(OmittedArg(0.0)) + ty1 = typeof(OmittedArg(1)) + ty2 = typeof(OmittedArg(1.0)) + ty3 = typeof(OmittedArg(1.0)) + self.assertEqual(ty0, types.Omitted(0.0)) + self.assertEqual(ty1, types.Omitted(1)) + self.assertEqual(ty2, types.Omitted(1.0)) + self.assertEqual(len({ty0, ty1, ty2}), 3) + self.assertEqual(ty3, ty2) + + def test_np_random(self): + rng = np.random.default_rng() + ty_rng = typeof(rng) + ty_bitgen = typeof(rng.bit_generator) + + self.assertEqual(ty_rng, types.npy_rng) + self.assertEqual(ty_bitgen, types.npy_bitgen) + + +class DistinctChecker(object): + + def __init__(self): + self._distinct = set() + + def add(self, obj): + if obj in self._distinct: + raise AssertionError("%r already in %r" % (obj, self._distinct)) + self._distinct.add(obj) + + +class TestFingerprint(TestCase): + """ + Tests for _dispatcher.compute_fingerprint() + + Each fingerprint must denote values of only one Numba type (this is + the condition for correctness), but values of a Numba type may be + denoted by several distinct fingerprints (it only makes the cache + less efficient). + """ + + def test_floats(self): + s = compute_fingerprint(1.0) + self.assertEqual(compute_fingerprint(2.0), s) + s = compute_fingerprint(np.float32()) + self.assertEqual(compute_fingerprint(np.float32(2.0)), s) + self.assertNotEqual(compute_fingerprint(np.float64()), s) + + def test_ints(self): + s = compute_fingerprint(1) + for v in (-1, 2**60): + self.assertEqual(compute_fingerprint(v), s) + # Different int widths resolve to different fingerprints + distinct = set() + for tp in ('int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64'): + tp = getattr(np, tp) + distinct.add(compute_fingerprint(tp())) + self.assertEqual(len(distinct), 8, distinct) + + def test_bool(self): + s = compute_fingerprint(True) + self.assertEqual(compute_fingerprint(False), s) + self.assertNotEqual(compute_fingerprint(1), s) + + def test_complex(self): + s = compute_fingerprint(1j) + self.assertEqual(s, compute_fingerprint(1+0j)) + s = compute_fingerprint(np.complex64()) + self.assertEqual(compute_fingerprint(np.complex64(2.0)), s) + self.assertNotEqual(compute_fingerprint(np.complex128()), s) + + def test_none(self): + compute_fingerprint(None) + + def test_enums(self): + # Enums should fail fingerprinting, even IntEnums + with self.assertRaises(NotImplementedError): + compute_fingerprint(Color.red) + with self.assertRaises(NotImplementedError): + compute_fingerprint(RequestError.not_found) + + def test_records(self): + d1 = np.dtype([('m', np.int32), ('n', np.int64)]) + d2 = np.dtype([('m', np.int32), ('n', np.int16)]) + v1 = np.empty(1, dtype=d1)[0] + v2 = np.empty(1, dtype=d2)[0] + self.assertNotEqual(compute_fingerprint(v1), + compute_fingerprint(v2)) + + def test_datetime(self): + a = np.datetime64(1, 'Y') + b = np.datetime64(2, 'Y') + c = np.datetime64(2, 's') + d = np.timedelta64(2, 's') + self.assertEqual(compute_fingerprint(a), + compute_fingerprint(b)) + distinct = set(compute_fingerprint(x) for x in (a, c, d)) + self.assertEqual(len(distinct), 3, distinct) + + def test_arrays(self): + distinct = DistinctChecker() + + # 1D + arr = np.empty(4, dtype=np.float64) + s = compute_fingerprint(arr) + distinct.add(s) + self.assertEqual(compute_fingerprint(arr[:1]), s) + # Non-contiguous + distinct.add(compute_fingerprint(arr[::2])) + # Other type + distinct.add(compute_fingerprint(arr.astype(np.complex64))) + # Readonly + arr.setflags(write=False) + distinct.add(compute_fingerprint(arr)) + + # 2D + arr = np.empty((4, 4), dtype=np.float64) + distinct.add(compute_fingerprint(arr)) + # F-contiguous + distinct.add(compute_fingerprint(arr.T)) + # Non-contiguous + distinct.add(compute_fingerprint(arr[::2])) + + # 0D + arr = np.empty((), dtype=np.float64) + distinct.add(compute_fingerprint(arr)) + + # Structured arrays + arr = np.empty(5, dtype=recordtype) + s = compute_fingerprint(arr) + distinct.add(s) + self.assertEqual(compute_fingerprint(arr[:1]), s) + arr = np.empty(5, dtype=recordtype2) + distinct.add(compute_fingerprint(arr)) + arr = np.empty(5, dtype=recordtype3) + distinct.add(compute_fingerprint(arr)) + + # np.recarray() is peculiar: it creates a new dtype instance in + # its constructor; check that the fingerprint remains efficient + a = np.recarray(1, dtype=recordtype) + b = np.recarray(1, dtype=recordtype) + self.assertEqual(compute_fingerprint(a), + compute_fingerprint(b)) + + def test_buffers(self): + distinct = DistinctChecker() + + s = compute_fingerprint(b'') + self.assertEqual(compute_fingerprint(b'xx'), s) + distinct.add(s) + distinct.add(compute_fingerprint(bytearray())) + distinct.add(compute_fingerprint(memoryview(b''))) + m_uint8_1d = compute_fingerprint(memoryview(bytearray())) + distinct.add(m_uint8_1d) + + arr = array.array('B', [42]) + distinct.add(compute_fingerprint(arr)) + self.assertEqual(compute_fingerprint(memoryview(arr)), m_uint8_1d) + for array_code in 'bi': + arr = array.array(array_code, [0, 1, 2]) + distinct.add(compute_fingerprint(arr)) + distinct.add(compute_fingerprint(memoryview(arr))) + + arr = np.empty(16, dtype=np.uint8) + distinct.add(compute_fingerprint(arr)) + self.assertEqual(compute_fingerprint(memoryview(arr)), m_uint8_1d) + arr = arr.reshape((4, 4)) + distinct.add(compute_fingerprint(arr)) + distinct.add(compute_fingerprint(memoryview(arr))) + arr = arr.T + distinct.add(compute_fingerprint(arr)) + distinct.add(compute_fingerprint(memoryview(arr))) + arr = arr[::2] + distinct.add(compute_fingerprint(arr)) + distinct.add(compute_fingerprint(memoryview(arr))) + + m = mmap.mmap(-1, 16384) + distinct.add(compute_fingerprint(m)) + self.assertEqual(compute_fingerprint(memoryview(m)), m_uint8_1d) + + def test_dtype(self): + distinct = DistinctChecker() + + s = compute_fingerprint(np.dtype('int64')) + self.assertEqual(compute_fingerprint(np.dtype('int64')), s) + distinct.add(s) + + for descr in ('int32', 'm8[s]', 'm8[W]', 'M8[s]'): + distinct.add(np.dtype(descr)) + + distinct.add(recordtype) + distinct.add(recordtype2) + + # np.recarray() is peculiar: it creates a new dtype instance in + # its constructor; check that the fingerprint remains efficient + a = np.recarray(1, dtype=recordtype) + b = np.recarray(1, dtype=recordtype) + self.assertEqual(compute_fingerprint(a.dtype), + compute_fingerprint(b.dtype)) + + def test_tuples(self): + distinct = DistinctChecker() + + s = compute_fingerprint((1,)) + self.assertEqual(compute_fingerprint((2,)), s) + distinct.add(s) + + distinct.add(compute_fingerprint(())) + distinct.add(compute_fingerprint((1, 2, 3))) + distinct.add(compute_fingerprint((1j, 2, 3))) + distinct.add(compute_fingerprint((1, (), np.empty(5)))) + distinct.add(compute_fingerprint((1, (), np.empty((5, 1))))) + + def test_lists(self): + distinct = DistinctChecker() + + s = compute_fingerprint([1]) + self.assertEqual(compute_fingerprint([2, 3]), s) + distinct.add(s) + + distinct.add(compute_fingerprint([1j])) + distinct.add(compute_fingerprint([4.5, 6.7])) + distinct.add(compute_fingerprint([(1,)])) + + with self.assertRaises(ValueError): + compute_fingerprint([]) + + def test_sets(self): + distinct = DistinctChecker() + + s = compute_fingerprint(set([1])) + self.assertEqual(compute_fingerprint(set([2, 3])), s) + distinct.add(s) + + distinct.add(compute_fingerprint([1])) + distinct.add(compute_fingerprint(set([1j]))) + distinct.add(compute_fingerprint(set([4.5, 6.7]))) + distinct.add(compute_fingerprint(set([(1,)]))) + + with self.assertRaises(ValueError): + compute_fingerprint(set()) + with self.assertRaises(NotImplementedError): + compute_fingerprint(frozenset([2, 3])) + + def test_omitted_args(self): + distinct = DistinctChecker() + + v0 = OmittedArg(0.0) + v1 = OmittedArg(1.0) + v2 = OmittedArg(1) + + s = compute_fingerprint(v0) + self.assertEqual(compute_fingerprint(v1), s) + distinct.add(s) + distinct.add(compute_fingerprint(v2)) + distinct.add(compute_fingerprint(0.0)) + distinct.add(compute_fingerprint(1)) + + def test_complicated_type(self): + # Generating a large fingerprint + t = None + for i in range(1000): + t = (t,) + s = compute_fingerprint(t) + + +class TestTypeOfMemCpy(TestCase): + + def test_memcpy_typeof_buffer(self): + # https://github.com/numba/numba/issues/9097 + # bug is fixed if the code below compiles + random.seed(0) + chars = string.ascii_letters + n = 256 + field = "".join([chars[random.randint(0, len(chars) - 1)] for x in range(n)]) + for i in range(1, n): + lfield = field[:i] + nt_ty = namedtuple("tuplename", lfield) + nt = nt_ty(1) + fp = _dispatcher.compute_fingerprint(nt) + nt_name = nt.__class__.__name__ + expected = f"{nt_name}({lfield}i)" + self.assertEqual( + expected, + fp.decode(), + f"iteration {i} failed, {expected} != {fp.decode()}" + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_types.py b/venv/lib/python3.10/site-packages/numba/tests/test_types.py new file mode 100644 index 0000000000000000000000000000000000000000..ce05fab65a4992a87cfd4b39a6e360b04a150a2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_types.py @@ -0,0 +1,879 @@ +""" +Tests for numba.types. +""" + + +from collections import namedtuple +import gc +import os +import operator +import sys +import weakref + +import numpy as np + +from numba.core import types, typing, errors, sigutils +from numba.core.types.abstract import _typecache +from numba.core.types.functions import _header_lead +from numba.core.typing.templates import make_overload_template +from numba import jit, njit, typeof +from numba.core.extending import (overload, register_model, models, unbox, + NativeValue, typeof_impl) +from numba.tests.support import TestCase, create_temp_module +from numba.tests.enum_usecases import Color, Shake, Shape +import unittest +from numba.np import numpy_support + + +try: + import cPickle as pickle +except ImportError: + import pickle + + +Point = namedtuple('Point', ('x', 'y')) + +Rect = namedtuple('Rect', ('width', 'height')) + + +def gen(x): + yield x + 1 + + +class Dummy(object): + pass + + +class TestTypes(TestCase): + + def test_equality(self): + self.assertEqual(types.int32, types.int32) + self.assertEqual(types.uint32, types.uint32) + self.assertEqual(types.complex64, types.complex64) + self.assertEqual(types.float32, types.float32) + # Different signedness + self.assertNotEqual(types.int32, types.uint32) + # Different width + self.assertNotEqual(types.int64, types.int32) + self.assertNotEqual(types.float64, types.float32) + self.assertNotEqual(types.complex64, types.complex128) + # Different domain + self.assertNotEqual(types.int64, types.float64) + self.assertNotEqual(types.uint64, types.float64) + self.assertNotEqual(types.complex64, types.float64) + # Same arguments but different return types + get_pointer = None + sig_a = typing.signature(types.intp, types.intp) + sig_b = typing.signature(types.voidptr, types.intp) + a = types.ExternalFunctionPointer(sig=sig_a, get_pointer=get_pointer) + b = types.ExternalFunctionPointer(sig=sig_b, get_pointer=get_pointer) + self.assertNotEqual(a, b) + # Different call convention + a = types.ExternalFunctionPointer(sig=sig_a, get_pointer=get_pointer) + b = types.ExternalFunctionPointer(sig=sig_a, get_pointer=get_pointer, + cconv='stdcall') + self.assertNotEqual(a, b) + # Different get_pointer + a = types.ExternalFunctionPointer(sig=sig_a, get_pointer=get_pointer) + b = types.ExternalFunctionPointer(sig=sig_a, get_pointer=object()) + self.assertNotEqual(a, b) + + # Different template classes bearing the same name + class DummyTemplate(object): + key = "foo" + a = types.BoundFunction(DummyTemplate, types.int32) + + class DummyTemplate(object): + key = "bar" + b = types.BoundFunction(DummyTemplate, types.int32) + self.assertNotEqual(a, b) + + # Different dtypes + self.assertNotEqual(types.DType(types.int32), types.DType(types.int64)) + + # CPointer with same addrspace + self.assertEqual(types.CPointer(types.float32), + types.CPointer(types.float32)) + + # CPointer with different addrspace + self.assertNotEqual(types.CPointer(types.float32, 0), + types.CPointer(types.float32, 1)) + + def test_weaktype(self): + d = Dummy() + e = Dummy() + a = types.Dispatcher(d) + b = types.Dispatcher(d) + c = types.Dispatcher(e) + self.assertIs(a.dispatcher, d) + self.assertIs(b.dispatcher, d) + self.assertIs(c.dispatcher, e) + # Equality of alive references + self.assertTrue(a == b) + self.assertFalse(a != b) + self.assertTrue(a != c) + self.assertFalse(a == c) + z = types.int8 + self.assertFalse(a == z) + self.assertFalse(b == z) + self.assertFalse(c == z) + self.assertTrue(a != z) + self.assertTrue(b != z) + self.assertTrue(c != z) + # Hashing and mappings + s = set([a, b, c]) + self.assertEqual(len(s), 2) + self.assertIn(a, s) + self.assertIn(b, s) + self.assertIn(c, s) + # Kill the references + d = e = None + gc.collect() + with self.assertRaises(ReferenceError): + a.dispatcher + with self.assertRaises(ReferenceError): + b.dispatcher + with self.assertRaises(ReferenceError): + c.dispatcher + # Dead references are always unequal + self.assertFalse(a == b) + self.assertFalse(a == c) + self.assertFalse(b == c) + self.assertFalse(a == z) + self.assertTrue(a != b) + self.assertTrue(a != c) + self.assertTrue(b != c) + self.assertTrue(a != z) + + def test_interning(self): + # Test interning and lifetime of dynamic types. + a = types.Dummy('xyzzyx') + code = a._code + b = types.Dummy('xyzzyx') + self.assertIs(b, a) + wr = weakref.ref(a) + del a + gc.collect() + c = types.Dummy('xyzzyx') + self.assertIs(c, b) + # The code is always the same + self.assertEqual(c._code, code) + del b, c + gc.collect() + self.assertIs(wr(), None) + d = types.Dummy('xyzzyx') + # The original code wasn't reused. + self.assertNotEqual(d._code, code) + + def test_cache_trimming(self): + # Test that the cache doesn't grow in size when types are + # created and disposed of. + cache = _typecache + gc.collect() + # Keep strong references to existing types, to avoid spurious failures + existing_types = [wr() for wr in cache] # noqa: F841 + cache_len = len(cache) + a = types.Dummy('xyzzyx') + b = types.Dummy('foox') + self.assertEqual(len(cache), cache_len + 2) + del a, b + gc.collect() + self.assertEqual(len(cache), cache_len) + + def test_array_notation(self): + def check(arrty, scalar, ndim, layout): + self.assertIs(arrty.dtype, scalar) + self.assertEqual(arrty.ndim, ndim) + self.assertEqual(arrty.layout, layout) + + def check_index_error(callable): + with self.assertRaises(KeyError) as raises: + callable() + self.assertIn( + "Can only index numba types with slices with no start or " + "stop, got", str(raises.exception)) + + scalar = types.int32 + check(scalar[:], scalar, 1, 'A') + check(scalar[::1], scalar, 1, 'C') + check(scalar[:, :], scalar, 2, 'A') + check(scalar[:, ::1], scalar, 2, 'C') + check(scalar[::1, :], scalar, 2, 'F') + + check_index_error(lambda: scalar[0]) + check_index_error(lambda: scalar[:, 4]) + check_index_error(lambda: scalar[::1, 1:]) + check_index_error(lambda: scalar[:2]) + check_index_error(lambda: list(scalar)) + + def test_array_notation_for_dtype(self): + def check(arrty, scalar, ndim, layout): + self.assertIs(arrty.dtype, scalar) + self.assertEqual(arrty.ndim, ndim) + self.assertEqual(arrty.layout, layout) + scalar = types.int32 + dtyped = types.DType(scalar) + check(dtyped[:], scalar, 1, 'A') + check(dtyped[::1], scalar, 1, 'C') + check(dtyped[:, :], scalar, 2, 'A') + check(dtyped[:, ::1], scalar, 2, 'C') + check(dtyped[::1, :], scalar, 2, 'F') + + def test_call_notation(self): + # Function call signature + i = types.int32 + d = types.double + self.assertEqual(i(), typing.signature(i)) + self.assertEqual(i(d), typing.signature(i, d)) + self.assertEqual(i(d, d), typing.signature(i, d, d)) + # Value cast + self.assertPreciseEqual(i(42.5), 42) + self.assertPreciseEqual(d(-5), -5.0) + ty = types.NPDatetime('Y') + self.assertPreciseEqual(ty('1900'), np.datetime64('1900', 'Y')) + self.assertPreciseEqual(ty('NaT'), np.datetime64('NaT', 'Y')) + ty = types.NPTimedelta('s') + self.assertPreciseEqual(ty(5), np.timedelta64(5, 's')) + self.assertPreciseEqual(ty('NaT'), np.timedelta64('NaT', 's')) + ty = types.NPTimedelta('') + self.assertPreciseEqual(ty(5), np.timedelta64(5)) + self.assertPreciseEqual(ty('NaT'), np.timedelta64('NaT')) + + def test_list_type_getitem(self): + for listty in (types.int64, types.Array(types.float64, 1, 'C')): + l_int = types.List(listty) + self.assertTrue(isinstance(l_int, types.List)) + self.assertTrue(isinstance(l_int[0], type(listty))) + + def test_function_incompatible_templates(self): + # issue 4345 + def func_stub(): + pass + + def func_stub2(): + pass + + def ol(): + pass + + template1 = make_overload_template(func_stub, ol, {}, True, 'never') + template2 = make_overload_template(func_stub2, ol, {}, True, 'never') + + with self.assertRaises(ValueError) as raises: + types.Function((template1, template2)) + self.assertIn("incompatible templates:", str(raises.exception)) + + +class TestNumbers(TestCase): + """ + Tests for number types. + """ + + def test_bitwidth(self): + """ + All numeric types have bitwidth attribute + """ + for ty in types.number_domain: + self.assertTrue(hasattr(ty, "bitwidth")) + + def test_minval_maxval(self): + self.assertEqual(types.int8.maxval, 127) + self.assertEqual(types.int8.minval, -128) + self.assertEqual(types.uint8.maxval, 255) + self.assertEqual(types.uint8.minval, 0) + self.assertEqual(types.int64.maxval, (1 << 63) - 1) + self.assertEqual(types.int64.minval, -(1 << 63)) + self.assertEqual(types.uint64.maxval, (1 << 64) - 1) + self.assertEqual(types.uint64.minval, 0) + + def test_from_bidwidth(self): + f = types.Integer.from_bitwidth + self.assertIs(f(32), types.int32) + self.assertIs(f(8, signed=False), types.uint8) + + def test_ordering(self): + def check_order(values): + for i in range(len(values)): + self.assertLessEqual(values[i], values[i]) + self.assertGreaterEqual(values[i], values[i]) + self.assertFalse(values[i] < values[i]) + self.assertFalse(values[i] > values[i]) + for j in range(i): + self.assertLess(values[j], values[i]) + self.assertLessEqual(values[j], values[i]) + self.assertGreater(values[i], values[j]) + self.assertGreaterEqual(values[i], values[j]) + self.assertFalse(values[i] < values[j]) + self.assertFalse(values[i] <= values[j]) + self.assertFalse(values[j] > values[i]) + self.assertFalse(values[j] >= values[i]) + + check_order([types.int8, types.int16, types.int32, types.int64]) + check_order([types.uint8, types.uint16, types.uint32, types.uint64]) + check_order([types.float32, types.float64]) + check_order([types.complex64, types.complex128]) + + with self.assertRaises(TypeError): + types.int8 <= types.uint32 + with self.assertRaises(TypeError): + types.int8 <= types.float32 + with self.assertRaises(TypeError): + types.float64 <= types.complex128 + + +class TestNdIter(TestCase): + + def test_properties(self): + def check(ty, dtypes, ndim, layout, indexers=None): + self.assertEqual(ty.ndim, ndim) + self.assertEqual(ty.layout, layout) + self.assertEqual(ty.dtypes, dtypes) + views = [types.Array(dtype, 0, "C") for dtype in dtypes] + if len(views) > 1: + self.assertEqual( + ty.yield_type, + types.BaseTuple.from_types(views)) + else: + self.assertEqual(ty.yield_type, views[0]) + if indexers is not None: + self.assertEqual(ty.indexers, indexers) + + f32 = types.float32 + c64 = types.complex64 + i16 = types.int16 + a = types.Array(f32, 1, "C") + b = types.Array(f32, 2, "C") + c = types.Array(c64, 2, "F") + d = types.Array(i16, 2, "A") + e = types.Array(i16, 0, "C") + f = types.Array(f32, 1, "A") + g = types.Array(f32, 0, "C") + + # 0-dim iterator + ty = types.NumpyNdIterType((e,)) + check(ty, (i16,), 0, "C", [('0d', 0, 0, [0])]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((e, g)) + check(ty, (i16, f32), 0, "C", [('0d', 0, 0, [0, 1])]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((e, c64)) + check(ty, (i16, c64), 0, "C", + [('0d', 0, 0, [0]), ('scalar', 0, 0, [1])]) + self.assertFalse(ty.need_shaped_indexing) + + # 1-dim iterator + ty = types.NumpyNdIterType((a,)) + check(ty, (f32,), 1, "C", + [('flat', 0, 1, [0])]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((a, a)) + check(ty, (f32, f32), 1, "C", + [('flat', 0, 1, [0, 1])]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((a, e, e, c64)) + check(ty, (f32, i16, i16, c64), 1, "C", + [('flat', 0, 1, [0]), # a + ('0d', 0, 0, [1, 2]), # e, e + ('scalar', 0, 0, [3]), # c64 + ]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((a, f)) + check(ty, (f32, f32), 1, "C", + [('flat', 0, 1, [0]), ('indexed', 0, 1, [1])]) + self.assertTrue(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((f,)) + check(ty, (f32,), 1, "C", [('indexed', 0, 1, [0])]) + self.assertTrue(ty.need_shaped_indexing) + + # 2-dim C-order iterator + ty = types.NumpyNdIterType((b,)) + check(ty, (f32,), 2, "C", [('flat', 0, 2, [0])]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((b, c)) + check( + ty, (f32, c64), 2, "C", [ + ('flat', 0, 2, [0]), ('indexed', 0, 2, [1])]) + self.assertTrue(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((d,)) + check(ty, (i16,), 2, "C", [('indexed', 0, 2, [0])]) + self.assertTrue(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((b, c, d, d, e)) + check(ty, (f32, c64, i16, i16, i16), 2, "C", + [('flat', 0, 2, [0]), # b + ('indexed', 0, 2, [1, 2, 3]), # c, d, d + ('0d', 0, 0, [4]), # e + ]) + self.assertTrue(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((a, b, c, d, d, f)) + check(ty, (f32, f32, c64, i16, i16, f32), 2, "C", + [('flat', 1, 2, [0]), # a + ('flat', 0, 2, [1]), # b + ('indexed', 0, 2, [2, 3, 4]), # c, d, d + ('indexed', 1, 2, [5]), # f + ]) + self.assertTrue(ty.need_shaped_indexing) + + # 2-dim F-order iterator + ty = types.NumpyNdIterType((c,)) + check(ty, (c64,), 2, "F", [('flat', 0, 2, [0])]) + self.assertFalse(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((c, b, c, f)) + check(ty, (c64, f32, c64, f32), 2, "F", + [('flat', 0, 2, [0, 2]), # c, c + ('indexed', 0, 2, [1]), # b + ('indexed', 0, 1, [3]), # f + ]) + self.assertTrue(ty.need_shaped_indexing) + ty = types.NumpyNdIterType((b, c, c, d, d, a, e)) + check(ty, (f32, c64, c64, i16, i16, f32, i16), 2, "F", + [('indexed', 0, 2, [0, 3, 4]), # b, d, d + ('flat', 0, 2, [1, 2]), # c, c + ('flat', 0, 1, [5]), # a + ('0d', 0, 0, [6]), # e + ]) + self.assertTrue(ty.need_shaped_indexing) + + +class TestPickling(TestCase): + """ + Pickling and unpickling should preserve type identity (singleton-ness) + and the _code attribute. This is only a requirement for types that + can be part of function signatures. + """ + + def predefined_types(self): + """ + Yield all predefined type instances + """ + for ty in types.__dict__.values(): + if isinstance(ty, types.Type): + yield ty + + def check_pickling(self, orig): + pickled = pickle.dumps(orig, protocol=-1) + ty = pickle.loads(pickled) + self.assertIs(ty, orig) + self.assertGreaterEqual(ty._code, 0) + + def test_predefined_types(self): + tys = list(self.predefined_types()) + self.assertIn(types.int16, tys) + for ty in tys: + self.check_pickling(ty) + + def test_atomic_types(self): + for unit in ('M', 'ms'): + ty = types.NPDatetime(unit) + self.check_pickling(ty) + ty = types.NPTimedelta(unit) + self.check_pickling(ty) + + def test_arrays(self): + for ndim in (0, 1, 2): + for layout in ('A', 'C', 'F'): + ty = types.Array(types.int16, ndim, layout) + self.check_pickling(ty) + + def test_records(self): + recordtype = np.dtype([('a', np.float64), + ('b', np.int32), + ('c', np.complex64), + ('d', (np.str_, 5))]) + ty = numpy_support.from_dtype(recordtype) + self.check_pickling(ty) + self.check_pickling(types.Array(ty, 1, 'A')) + + def test_optional(self): + ty = types.Optional(types.int32) + self.check_pickling(ty) + + def test_tuples(self): + ty1 = types.UniTuple(types.int32, 3) + self.check_pickling(ty1) + ty2 = types.Tuple((types.int32, ty1)) + self.check_pickling(ty2) + + def test_namedtuples(self): + ty1 = types.NamedUniTuple(types.intp, 2, Point) + self.check_pickling(ty1) + ty2 = types.NamedTuple((types.intp, types.float64), Point) + self.check_pickling(ty2) + + def test_enums(self): + ty1 = types.EnumMember(Color, types.int32) + self.check_pickling(ty1) + ty2 = types.EnumMember(Shake, types.int64) + self.check_pickling(ty2) + ty3 = types.IntEnumMember(Shape, types.int64) + self.check_pickling(ty3) + + def test_lists(self): + ty = types.List(types.int32) + self.check_pickling(ty) + + def test_generator(self): + cfunc = jit("(int32,)", nopython=True)(gen) + sigs = list(cfunc.nopython_signatures) + ty = sigs[0].return_type + self.assertIsInstance(ty, types.Generator) + self.check_pickling(ty) + + # call templates are not picklable + @unittest.expectedFailure + def test_external_function_pointers(self): + from numba.core.typing import ctypes_utils + from numba.tests.ctypes_usecases import c_sin, c_cos + for fnptr in (c_sin, c_cos): + ty = ctypes_utils.make_function_type(fnptr) + self.assertIsInstance(ty, types.ExternalFunctionPointer) + self.check_pickling(ty) + + +class TestSignatures(TestCase): + + def test_normalize_signature(self): + f = sigutils.normalize_signature + + def check(sig, args, return_type): + self.assertEqual(f(sig), (args, return_type)) + + def check_error(sig, msg): + with self.assertRaises(TypeError) as raises: + f(sig) + self.assertIn(msg, str(raises.exception)) + + f32 = types.float32 + c64 = types.complex64 + i16 = types.int16 + a = types.Array(f32, 1, "C") + + check((c64,), (c64,), None) + check((f32, i16), (f32, i16), None) + check(a(i16), (i16,), a) + check("int16(complex64)", (c64,), i16) + check("(complex64, int16)", (c64, i16), None) + check(typing.signature(i16, c64), (c64,), i16) + + msg = "invalid type in signature: expected a type instance" + check_error((types.Integer,), msg) + check_error((None,), msg) + check_error([], "invalid signature") + + +class TestRecordDtype(unittest.TestCase): + def test_record_type_equiv(self): + rec_dt = np.dtype([('a', np.int32), ('b', np.float32)]) + rec_ty = typeof(rec_dt) + art1 = rec_ty[::1] + arr = np.zeros(5, dtype=rec_dt) + art2 = typeof(arr) + self.assertEqual(art2.dtype.dtype, rec_ty) + self.assertEqual(art1, art2) + + def test_user_specified(self): + rec_dt = np.dtype([('a', np.int32), ('b', np.float32)]) + rec_type = typeof(rec_dt) + + @jit((rec_type[:],), nopython=True) + def foo(x): + return x['a'], x['b'] + + arr = np.zeros(1, dtype=rec_dt) + arr[0]['a'] = 123 + arr[0]['b'] = 32.1 + + a, b = foo(arr) + + self.assertEqual(a, arr[0]['a']) + self.assertEqual(b, arr[0]['b']) + + +class TestDType(TestCase): + def test_type_attr(self): + # Test .type attribute of dtype + def conv(arr, val): + return arr.dtype.type(val) + + jit_conv = jit(nopython=True)(conv) + + def assert_matches(arr, val, exact): + expect = conv(arr, val) + got = jit_conv(arr, val) + self.assertPreciseEqual(expect, exact) + self.assertPreciseEqual(typeof(expect), typeof(got)) + self.assertPreciseEqual(expect, got) + + arr = np.zeros(5) + assert_matches(arr.astype(np.intp), 1.2, 1) + assert_matches(arr.astype(np.float64), 1.2, 1.2) + assert_matches(arr.astype(np.complex128), 1.2, (1.2 + 0j)) + assert_matches(arr.astype(np.complex128), 1.2j, 1.2j) + + def test_kind(self): + def tkind(A): + return A.dtype.kind == 'f' + jit_tkind = jit(nopython=True)(tkind) + self.assertEqual(tkind(np.ones(3)), jit_tkind(np.ones(3))) + self.assertEqual(tkind(np.ones(3, dtype=np.intp)), + jit_tkind(np.ones(3, dtype=np.intp))) + + def test_dtype_with_type(self): + def impl(): + a = np.dtype(np.float64) + return a.type(0) + jit_impl = jit(nopython=True)(impl) + self.assertEqual(impl(), jit_impl()) + + def test_dtype_with_string(self): + def impl(): + a = np.dtype('float64') + return a.type(0) + jit_impl = jit(nopython=True)(impl) + self.assertEqual(impl(), jit_impl()) + + +class TestIsInternalTypeMarker(TestCase): + """Tests the use of the Type metaclass init correctly setting the flag on + the `is_internal` attr of a concrete Type class + """ + source_lines = """ +from numba.core import types + +class FooType(types.Type): + def __init__(self): + super(FooType, self).__init__(name='Foo') +""" + + def test_create_temp_module(self): + sys_path_original = list(sys.path) + sys_modules_original = dict(sys.modules) + with create_temp_module(self.source_lines) as test_module: + temp_module_dir = os.path.dirname(test_module.__file__) + self.assertEqual(temp_module_dir, sys.path[0]) + self.assertEqual(sys.path[1:], sys_path_original) + self.assertTrue(test_module.__name__ in sys.modules) + # Test that modifications to sys.path / sys.modules are reverted + self.assertEqual(sys.path, sys_path_original) + self.assertEqual(sys.modules, sys_modules_original) + + def test_create_temp_module_with_exception(self): + try: + sys_path_original = list(sys.path) + sys_modules_original = dict(sys.modules) + with create_temp_module(self.source_lines): + raise ValueError("Something went wrong!") + except ValueError: + # Test that modifications to sys.path / sys.modules are reverted + self.assertEqual(sys.path, sys_path_original) + self.assertEqual(sys.modules, sys_modules_original) + + def test_externally_defined_type_is_external(self): + + with create_temp_module(self.source_lines) as test_module: + FooType = test_module.FooType + self.assertFalse(FooType().is_internal) + + # set up an extension type + class Foo(object): + pass + + register_model(FooType)(models.OpaqueModel) + + @typeof_impl.register(Foo) + def _typ_foo(val, c): + return FooType() + + @unbox(FooType) + def unbox_foo(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + # function to overload + def false_if_not_array(a): + pass + + # Set up an overload which will accept all types irrespective of + # whether they are from Numba's closed type system + @overload(false_if_not_array) + def ol_false_if_not_array(a): + if isinstance(a, types.Array): + return lambda a : True + else: + return lambda a : False + + @njit + def call_false_if_not_array(a): + return false_if_not_array(a) + + self.assertTrue(call_false_if_not_array(np.zeros(10))) + self.assertFalse(call_false_if_not_array(10)) + + # The extension type was accepted + self.assertFalse(call_false_if_not_array(Foo())) + + # Now do the same sort of overload but put in a guard based on the + # use of internal types + + def false_if_not_array_closed_system(a): + pass + + @overload(false_if_not_array_closed_system) + def ol_false_if_not_array_closed_system(a): + if a.is_internal: # guard + if isinstance(a, types.Array): + return lambda a : True + else: + return lambda a : False + + @njit + def call_false_if_not_array_closed_system(a): + return false_if_not_array_closed_system(a) + + self.assertTrue(call_false_if_not_array_closed_system(np.zeros(10))) + self.assertFalse(call_false_if_not_array_closed_system(10)) + + with self.assertRaises(errors.TypingError) as raises: + call_false_if_not_array_closed_system(Foo()) + estr = str(raises.exception) + self.assertIn(_header_lead, estr) + self.assertIn("false_if_not_array_closed_system", estr) + self.assertIn("(Foo)", estr) + + def test_mixin_against_real_example(self): + # See issue #4970, this checks that unicode eq/ne now ignores extension + # types. + + with create_temp_module(self.source_lines) as test_module: + FooType = test_module.FooType + self.assertFalse(FooType().is_internal) + + # set up an extension type + class Foo(object): + pass + + register_model(FooType)(models.OpaqueModel) + + @typeof_impl.register(Foo) + def _typ_foo(val, c): + return FooType() + + @unbox(FooType) + def unbox_foo(typ, obj, c): + return NativeValue(c.context.get_dummy_value()) + + @overload(operator.eq) + def foo_eq(a, b): + if a == FooType(): + return lambda a, b: "RAN CUSTOM EQ OVERLOAD" + + @overload(operator.ne) + def foo_ne(a, b): + if a == FooType(): + return lambda a, b: "RAN CUSTOM NE OVERLOAD" + + @njit + def f(a): + return a == "A", a != "A" + + self.assertEqual(("RAN CUSTOM EQ OVERLOAD", + "RAN CUSTOM NE OVERLOAD"), + f(Foo())) + + +class TestIssues(TestCase): + def test_omitted_type(self): + # issue https://github.com/numba/numba/issues/5471 + def inner(a): + pass + + @overload(inner) + def inner_overload(a): + if not isinstance(a, types.Literal): + return + return lambda a: a + + @njit + def my_func(a='a'): + return inner(a) + + @njit + def f(): + return my_func() + + @njit + def g(): + return my_func('b') + + self.assertEqual(f(), 'a') + self.assertEqual(g(), 'b') + + def test_type_of_literal(self): + # type(val) where val is a literal should not give a literal type. + def inner(a): + pass + + @overload(inner) + def inner_overload(a): + if not isinstance(a, types.Literal): + return + self.assertIsInstance(a, types.Literal) + # NOTE: using 1.23 to ensure that the result is indeed an int. + return lambda a: type(a)(a + 1.23) + + @njit + def my_func(a=1): + return inner(a) + + @njit + def f(): + return my_func() + + @njit + def g(): + return my_func(100) + + self.assertEqual(f(), 2) + self.assertEqual(g(), 101) + + def test_issue_typeref_key(self): + # issue https://github.com/numba/numba/issues/6336 + class NoUniqueNameType(types.Dummy): + def __init__(self, param): + super(NoUniqueNameType, self).__init__('NoUniqueNameType') + self.param = param + + @property + def key(self): + return self.param + + no_unique_name_type_1 = NoUniqueNameType(1) + no_unique_name_type_2 = NoUniqueNameType(2) + + for ty1 in (no_unique_name_type_1, no_unique_name_type_2): + for ty2 in (no_unique_name_type_1, no_unique_name_type_2): + self.assertIs( + types.TypeRef(ty1) == types.TypeRef(ty2), ty1 == ty2) + + def test_issue_list_type_key(self): + # https://github.com/numba/numba/issues/6397 + class NoUniqueNameType(types.Dummy): + def __init__(self, param): + super(NoUniqueNameType, self).__init__('NoUniqueNameType') + self.param = param + + @property + def key(self): + return self.param + + no_unique_name_type_1 = NoUniqueNameType(1) + no_unique_name_type_2 = NoUniqueNameType(2) + + for ty1 in (no_unique_name_type_1, no_unique_name_type_2): + for ty2 in (no_unique_name_type_1, no_unique_name_type_2): + self.assertIs( + types.ListType(ty1) == types.ListType(ty2), # noqa: E721 + ty1 == ty2 + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_typingerror.py b/venv/lib/python3.10/site-packages/numba/tests/test_typingerror.py new file mode 100644 index 0000000000000000000000000000000000000000..ce6b6cd3e0e9f6a93e1b508699fbddbe35dd85ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_typingerror.py @@ -0,0 +1,236 @@ +import math +import re +import textwrap +import operator + +import numpy as np + +import unittest +from numba import jit, njit +from numba.core import types +from numba.core.errors import TypingError +from numba.core.types.functions import _header_lead +from numba.tests.support import TestCase + + +def what(): + pass + +def foo(): + return what() + +def bar(x): + return x.a + +def issue_868(a): + return a.shape * 2 + +def impossible_return_type(x): + if x > 0: + return () + else: + return 1j + +def bad_hypot_usage(): + return math.hypot(1) + +def imprecise_list(): + l = [] + return len(l) + +def using_imprecise_list(): + a = np.array([]) + return a.astype(np.int32) + +def unknown_module(): + return numpyz.int32(0) + +def nop(x, y, z): + pass + +def array_setitem_invalid_cast(): + arr = np.empty(1, dtype=np.float64) + arr[0] = 1j # invalid cast from complex to float + return arr + + +class Foo(object): + def __repr__(self): + return "" + + +class TestTypingError(unittest.TestCase): + + def test_unknown_function(self): + try: + njit((),)(foo) + except TypingError as e: + self.assertIn("Untyped global name 'what'", str(e)) + else: + self.fail("Should raise error") + + def test_unknown_attrs(self): + try: + njit((types.int32,),)(bar) + except TypingError as e: + self.assertIn("Unknown attribute 'a' of type int32", str(e)) + else: + self.fail("Should raise error") + + def test_unknown_module(self): + # This used to print "'object' object has no attribute 'int32'" + with self.assertRaises(TypingError) as raises: + njit((),)(unknown_module) + self.assertIn("name 'numpyz' is not defined", str(raises.exception)) + + def test_issue_868(self): + ''' + Summary: multiplying a scalar by a non-scalar would cause a crash in + type inference because TimeDeltaMixOp always assumed at least one of + its operands was an NPTimeDelta in its generic() method. + ''' + with self.assertRaises(TypingError) as raises: + njit((types.Array(types.int32, 1, 'C'),))(issue_868) + + expected = ((_header_lead + " Function() found " + "for signature:\n \n >>> mul(UniTuple({} x 1), {})") + .format(str(types.intp), types.IntegerLiteral(2))) + self.assertIn(expected, str(raises.exception)) + self.assertIn("During: typing of", str(raises.exception)) + + def test_return_type_unification(self): + with self.assertRaises(TypingError) as raises: + njit((types.int32,))(impossible_return_type,) + msg = ("Can't unify return type from the following types: Tuple(), " + "complex128") + self.assertIn(msg, str(raises.exception)) + + def test_bad_hypot_usage(self): + with self.assertRaises(TypingError) as raises: + njit((),)(bad_hypot_usage,) + + errmsg = str(raises.exception) + # Make sure it listed the known signatures. + # This is sensitive to the formatting of the error message. + self.assertIn(" * (float64, float64) -> float64", errmsg) + + # find the context lines + ctx_lines = [x for x in errmsg.splitlines() if "During:" in x ] + + # Check contextual msg + self.assertTrue(re.search(r'.*During: resolving callee type: Function.*hypot', ctx_lines[0])) + self.assertTrue(re.search(r'.*During: typing of call .*test_typingerror.py', ctx_lines[1])) + + + def test_imprecise_list(self): + """ + Type inference should catch that a list type's remain imprecise, + instead of letting lowering fail. + """ + with self.assertRaises(TypingError) as raises: + njit((),)(imprecise_list) + + errmsg = str(raises.exception) + msg = ("Cannot infer the type of variable 'l', have imprecise type: " + "list(undefined)") + self.assertIn(msg, errmsg) + # check help message has gone in + self.assertIn("For Numba to be able to compile a list", errmsg) + + def test_using_imprecise_list(self): + """ + Type inference should report informative error about untyped list. + TODO: #2931 + """ + with self.assertRaises(TypingError) as raises: + njit((),)(using_imprecise_list) + + errmsg = str(raises.exception) + self.assertIn("Undecided type", errmsg) + + def test_array_setitem_invalid_cast(self): + with self.assertRaises(TypingError) as raises: + njit((),)(array_setitem_invalid_cast) + + errmsg = str(raises.exception) + self.assertIn( + _header_lead + " Function({})".format(operator.setitem), + errmsg, + ) + self.assertIn( + "(array(float64, 1d, C), Literal[int](0), complex128)", + errmsg, + ) + + def test_template_rejection_error_message_cascade(self): + from numba import njit + @njit + def foo(): + z = 1 + for a, b in enumerate(z): + pass + return z + + with self.assertRaises(TypingError) as raises: + foo() + errmsg = str(raises.exception) + expected = "No match." + self.assertIn(expected, errmsg) + + ctx_lines = [x for x in errmsg.splitlines() if "During:" in x ] + search = [r'.*During: resolving callee type: Function.*enumerate', + r'.*During: typing of call .*test_typingerror.py'] + for i, x in enumerate(search): + self.assertTrue(re.search(x, ctx_lines[i])) + + +class TestArgumentTypingError(unittest.TestCase): + """ + Test diagnostics of typing errors caused by argument inference failure. + """ + + def test_unsupported_array_dtype(self): + # See issue #1943 + cfunc = jit(nopython=True)(nop) + a = np.ones(3) + a = a.astype(a.dtype.newbyteorder()) + with self.assertRaises(TypingError) as raises: + cfunc(1, a, a) + expected = f"Unsupported array dtype: {a.dtype}" + self.assertIn(expected, str(raises.exception)) + + def test_unsupported_type(self): + cfunc = jit(nopython=True)(nop) + foo = Foo() + with self.assertRaises(TypingError) as raises: + cfunc(1, foo, 1) + + expected=re.compile(("This error may have been caused by the following " + r"argument\(s\):\n- argument 1:.*Cannot determine " + "Numba type of " + "")) + self.assertTrue(expected.search(str(raises.exception)) is not None) + + +class TestCallError(unittest.TestCase): + def test_readonly_array(self): + @jit("(f8[:],)", nopython=True) + def inner(x): + return x + + @jit(nopython=True) + def outer(): + return inner(gvalues) + + gvalues = np.ones(10, dtype=np.float64) + + with self.assertRaises(TypingError) as raises: + outer() + + got = str(raises.exception) + pat = r"Invalid use of.*readonly array\(float64, 1d, C\)" + self.assertIsNotNone(re.search(pat, got)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_ufuncs.py b/venv/lib/python3.10/site-packages/numba/tests/test_ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..95e56af2ef46e9e5e0f6ea7a1e3efa72315c9da4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_ufuncs.py @@ -0,0 +1,1906 @@ +import functools +import itertools +import sys +import warnings +import threading +import operator + +import numpy as np + +import unittest +from numba import guvectorize, njit, typeof, vectorize +from numba.core import types +from numba.np.numpy_support import from_dtype +from numba.core.errors import LoweringError, TypingError +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.core.typing.npydecl import supported_ufuncs +from numba.np import numpy_support +from numba.core.registry import cpu_target +from numba.core.base import BaseContext +from numba.np import ufunc_db + +is32bits = tuple.__itemsize__ == 4 +iswindows = sys.platform.startswith('win32') + + +def _unimplemented(func): + """An 'expectedFailure' like decorator that only expects compilation errors + caused by unimplemented functions that fail in no-python mode""" + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + func(*args, **kwargs) + except TypingError: + raise unittest._ExpectedFailure(sys.exc_info()) + raise unittest._UnexpectedSuccess + + +def _make_ufunc_usecase(ufunc): + ldict = {} + arg_str = ','.join(['a{0}'.format(i) for i in range(ufunc.nargs)]) + func_str = 'def fn({0}):\n np.{1}({0})'.format(arg_str, ufunc.__name__) + exec(func_str, globals(), ldict) + fn = ldict['fn'] + fn.__name__ = '{0}_usecase'.format(ufunc.__name__) + return fn + + +def _make_unary_ufunc_op_usecase(ufunc_op): + ldict = {} + exec("def fn(x):\n return {0}(x)".format(ufunc_op), globals(), ldict) + fn = ldict["fn"] + fn.__name__ = "usecase_{0}".format(hash(ufunc_op)) + return fn + + +def _make_binary_ufunc_op_usecase(ufunc_op): + ldict = {} + exec("def fn(x,y):\n return x{0}y".format(ufunc_op), globals(), ldict) + fn = ldict["fn"] + fn.__name__ = "usecase_{0}".format(hash(ufunc_op)) + return fn + + +def _make_inplace_ufunc_op_usecase(ufunc_op): + """Generates a function to be compiled that performs an inplace operation + + ufunc_op can be a string like '+=' or a function like operator.iadd + """ + if isinstance(ufunc_op, str): + ldict = {} + exec("def fn(x,y):\n x{0}y".format(ufunc_op), globals(), ldict) + fn = ldict["fn"] + fn.__name__ = "usecase_{0}".format(hash(ufunc_op)) + else: + def inplace_op(x, y): + ufunc_op(x, y) + fn = inplace_op + return fn + + +def _as_dtype_value(tyargs, args): + """Convert python values into numpy scalar objects. + """ + return [np.dtype(str(ty)).type(val) for ty, val in zip(tyargs, args)] + + +class BaseUFuncTest(MemoryLeakMixin): + + def setUp(self): + super(BaseUFuncTest, self).setUp() + self.inputs = [ + (np.uint32(0), types.uint32), + (np.uint32(1), types.uint32), + (np.int32(-1), types.int32), + (np.int32(0), types.int32), + (np.int32(1), types.int32), + (np.uint64(0), types.uint64), + (np.uint64(1), types.uint64), + (np.int64(-1), types.int64), + (np.int64(0), types.int64), + (np.int64(1), types.int64), + + (np.float32(-0.5), types.float32), + (np.float32(0.0), types.float32), + (np.float32(0.5), types.float32), + + (np.float64(-0.5), types.float64), + (np.float64(0.0), types.float64), + (np.float64(0.5), types.float64), + + (np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')), + (np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')), + (np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')), + (np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')), + (np.array([-0.5, 0.0, 0.5], dtype='f4'), + types.Array(types.float32, 1, 'C')), + (np.array([-0.5, 0.0, 0.5], dtype='f8'), + types.Array(types.float64, 1, 'C')), + + (np.array([0,1], dtype=np.int8), types.Array(types.int8, 1, 'C')), + (np.array([0,1], dtype=np.int16), types.Array(types.int16, 1, 'C')), + (np.array([0,1], dtype=np.uint8), types.Array(types.uint8, 1, 'C')), + (np.array([0,1], dtype=np.uint16), + types.Array(types.uint16, 1, 'C')), + ] + + @functools.lru_cache(maxsize=None) + def _compile(self, pyfunc, args, nrt=False): + # NOTE: to test the implementation of Numpy ufuncs, we disable + # rewriting of array expressions. + return njit(args, _nrt=nrt, no_rewrites=True)(pyfunc) + + def _determine_output_type(self, input_type, int_output_type=None, + float_output_type=None): + ty = input_type + if isinstance(ty, types.Array): + ndim = ty.ndim + ty = ty.dtype + else: + ndim = 1 + + if ty in types.signed_domain: + if int_output_type: + output_type = types.Array(int_output_type, ndim, 'C') + else: + output_type = types.Array(ty, ndim, 'C') + elif ty in types.unsigned_domain: + if int_output_type: + output_type = types.Array(int_output_type, ndim, 'C') + else: + output_type = types.Array(ty, ndim, 'C') + else: + if float_output_type: + output_type = types.Array(float_output_type, ndim, 'C') + else: + output_type = types.Array(ty, ndim, 'C') + return output_type + + +class BasicUFuncTest(BaseUFuncTest): + def _make_ufunc_usecase(self, ufunc): + return _make_ufunc_usecase(ufunc) + + def basic_ufunc_test(self, ufunc, skip_inputs=[], additional_inputs=[], + int_output_type=None, float_output_type=None, + kinds='ifc', positive_only=False): + + # Necessary to avoid some Numpy warnings being silenced, despite + # the simplefilter() call below. + self.reset_module_warnings(__name__) + + pyfunc = self._make_ufunc_usecase(ufunc) + + inputs = list(self.inputs) + additional_inputs + + for input_tuple in inputs: + input_operand = input_tuple[0] + input_type = input_tuple[1] + + is_tuple = isinstance(input_operand, tuple) + if is_tuple: + args = input_operand + else: + args = (input_operand,) * ufunc.nin + + if input_type in skip_inputs: + continue + if positive_only and np.any(args[0] < 0): + continue + + # Some ufuncs don't allow all kinds of arguments + if (args[0].dtype.kind not in kinds): + continue + + output_type = self._determine_output_type( + input_type, int_output_type, float_output_type) + + input_types = (input_type,) * ufunc.nin + output_types = (output_type,) * ufunc.nout + argtys = input_types + output_types + cfunc = self._compile(pyfunc, argtys) + + if isinstance(args[0], np.ndarray): + results = [ + np.zeros(args[0].shape, + dtype=out_ty.dtype.name) + for out_ty in output_types + ] + expected = [ + np.zeros(args[0].shape, dtype=out_ty.dtype.name) + for out_ty in output_types + ] + else: + results = [ + np.zeros(1, dtype=out_ty.dtype.name) + for out_ty in output_types + ] + expected = [ + np.zeros(1, dtype=out_ty.dtype.name) + for out_ty in output_types + ] + + invalid_flag = False + with warnings.catch_warnings(record=True) as warnlist: + warnings.simplefilter('always') + pyfunc(*args, *expected) + + warnmsg = "invalid value encountered" + for thiswarn in warnlist: + + if (issubclass(thiswarn.category, RuntimeWarning) + and str(thiswarn.message).startswith(warnmsg)): + invalid_flag = True + + cfunc(*args, *results) + + for expected_i, result_i in zip(expected, results): + msg = '\n'.join(["ufunc '{0}' failed", + "inputs ({1}):", "{2}", + "got({3})", "{4}", + "expected ({5}):", "{6}" + ]).format(ufunc.__name__, + input_type, input_operand, + output_type, result_i, + expected_i.dtype, expected_i) + try: + np.testing.assert_array_almost_equal( + expected_i, result_i, + decimal=5, + err_msg=msg) + except AssertionError: + if invalid_flag: + # Allow output to mismatch for invalid input + print("Output mismatch for invalid input", + input_tuple, result_i, expected_i) + else: + raise + + def signed_unsigned_cmp_test(self, comparison_ufunc): + self.basic_ufunc_test(comparison_ufunc) + + if numpy_support.numpy_version < (1, 25): + return + + # Test additional implementations that specifically handle signed / + # unsigned comparisons added in NumPy 1.25: + # https://github.com/numpy/numpy/pull/23713 + additional_inputs = ( + (np.int64(-1), np.uint64(0)), + (np.int64(-1), np.uint64(1)), + (np.int64(0), np.uint64(0)), + (np.int64(0), np.uint64(1)), + (np.int64(1), np.uint64(0)), + (np.int64(1), np.uint64(1)), + + (np.uint64(0), np.int64(-1)), + (np.uint64(0), np.int64(0)), + (np.uint64(0), np.int64(1)), + (np.uint64(1), np.int64(-1)), + (np.uint64(1), np.int64(0)), + (np.uint64(1), np.int64(1)), + + (np.array([-1, -1, 0, 0, 1, 1], dtype=np.int64), + np.array([0, 1, 0, 1, 0, 1], dtype=np.uint64)), + + (np.array([0, 1, 0, 1, 0, 1], dtype=np.uint64), + np.array([-1, -1, 0, 0, 1, 1], dtype=np.int64)) + ) + + pyfunc = self._make_ufunc_usecase(comparison_ufunc) + + for a, b in additional_inputs: + input_types = (typeof(a), typeof(b)) + output_type = types.Array(types.bool_, 1, 'C') + argtys = input_types + (output_type,) + cfunc = self._compile(pyfunc, argtys) + + if isinstance(a, np.ndarray): + result = np.zeros(a.shape, dtype=np.bool_) + else: + result = np.zeros(1, dtype=np.bool_) + + expected = np.zeros_like(result) + + pyfunc(a, b, expected) + cfunc(a, b, result) + np.testing.assert_equal(expected, result) + + +class TestUFuncs(BasicUFuncTest, TestCase): + def basic_int_ufunc_test(self, name=None): + skip_inputs = [ + types.float32, + types.float64, + types.Array(types.float32, 1, 'C'), + types.Array(types.float64, 1, 'C'), + ] + self.basic_ufunc_test(name, skip_inputs=skip_inputs) + + ############################################################################ + # Math operations + + def test_add_ufunc(self): + self.basic_ufunc_test(np.add) + + def test_subtract_ufunc(self): + self.basic_ufunc_test(np.subtract) + + def test_multiply_ufunc(self): + self.basic_ufunc_test(np.multiply) + + def test_divide_ufunc(self): + # Bear in mind that in python3 divide IS true_divide + # so the out type for int types will be a double + int_out_type = None + int_out_type = types.float64 + + self.basic_ufunc_test(np.divide, + int_output_type=int_out_type) + + def test_logaddexp_ufunc(self): + self.basic_ufunc_test(np.logaddexp, kinds='f') + + def test_logaddexp2_ufunc(self): + self.basic_ufunc_test(np.logaddexp2, kinds='f') + + def test_true_divide_ufunc(self): + self.basic_ufunc_test(np.true_divide, + int_output_type=types.float64) + + def test_floor_divide_ufunc(self): + self.basic_ufunc_test(np.floor_divide) + + def test_negative_ufunc(self): + # NumPy ufunc has bug with uint32 as input and int64 as output, + # so skip uint32 input. + skip_inputs = [types.Array(types.uint32, 1, 'C'), types.uint32] + self.basic_ufunc_test(np.negative, int_output_type=types.int64, + skip_inputs=skip_inputs) + + def test_positive_ufunc(self): + self.basic_ufunc_test(np.positive) + + def test_power_ufunc(self): + self.basic_ufunc_test(np.power, positive_only=True) + + def test_float_power_ufunc(self): + self.basic_ufunc_test(np.float_power, kinds="fc") + + def test_gcd_ufunc(self): + self.basic_ufunc_test(np.gcd, kinds="iu") + + def test_lcm_ufunc(self): + self.basic_ufunc_test(np.lcm, kinds="iu") + + def test_remainder_ufunc(self): + self.basic_ufunc_test(np.remainder) + + def test_mod_ufunc(self): + additional_inputs = [ + ((np.uint64(np.iinfo(np.uint64).max), np.uint64(16)), types.uint64) + ] + self.basic_ufunc_test(np.mod, kinds='ifcu', + additional_inputs=additional_inputs) + + def test_fmod_ufunc(self): + self.basic_ufunc_test(np.fmod) + + def test_abs_ufunc(self, ufunc=np.abs): + additional_inputs = [ + (np.uint32(np.iinfo(np.uint32).max), types.uint32), + (np.uint64(np.iinfo(np.uint64).max), types.uint64), + (np.float32(np.finfo(np.float32).min), types.float32), + (np.float64(np.finfo(np.float64).min), types.float64), + ] + self.basic_ufunc_test(ufunc, + additional_inputs=additional_inputs) + + def test_absolute_ufunc(self): + self.test_abs_ufunc(ufunc=np.absolute) + + def test_fabs_ufunc(self): + self.basic_ufunc_test(np.fabs, kinds='f') + + def test_rint_ufunc(self): + self.basic_ufunc_test(np.rint, kinds='cf') + + def test_sign_ufunc(self): + self.basic_ufunc_test(np.sign) + + def test_conj_ufunc(self): + self.basic_ufunc_test(np.conj) + + def test_exp_ufunc(self): + self.basic_ufunc_test(np.exp, kinds='cf') + + def test_exp2_ufunc(self): + self.basic_ufunc_test(np.exp2, kinds='cf') + + def test_log_ufunc(self): + self.basic_ufunc_test(np.log, kinds='cf') + + def test_log2_ufunc(self): + self.basic_ufunc_test(np.log2, kinds='cf') + + def test_log10_ufunc(self): + self.basic_ufunc_test(np.log10, kinds='cf') + + def test_expm1_ufunc(self): + self.basic_ufunc_test(np.expm1, kinds='cf') + + def test_log1p_ufunc(self): + self.basic_ufunc_test(np.log1p, kinds='cf') + + def test_sqrt_ufunc(self): + self.basic_ufunc_test(np.sqrt, kinds='cf') + + def test_square_ufunc(self): + self.basic_ufunc_test(np.square) + + def test_cbrt_ufunc(self): + self.basic_ufunc_test(np.cbrt, kinds='f') + + def test_reciprocal_ufunc(self): + # reciprocal for integers doesn't make much sense and is problematic + # in the case of division by zero, as an inf will overflow float to + # int conversions, which is undefined behavior. + to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32, + types.Array(types.int32, 1, 'C'), types.int32, + types.Array(types.uint64, 1, 'C'), types.uint64, + types.Array(types.int64, 1, 'C'), types.int64] + self.basic_ufunc_test(np.reciprocal, skip_inputs=to_skip) + + def test_conjugate_ufunc(self): + self.basic_ufunc_test(np.conjugate) + + ############################################################################ + # Trigonometric Functions + + def test_sin_ufunc(self): + self.basic_ufunc_test(np.sin, kinds='cf') + + def test_cos_ufunc(self): + self.basic_ufunc_test(np.cos, kinds='cf') + + def test_tan_ufunc(self): + self.basic_ufunc_test(np.tan, kinds='cf') + + def test_arcsin_ufunc(self): + self.basic_ufunc_test(np.arcsin, kinds='cf') + + def test_arccos_ufunc(self): + self.basic_ufunc_test(np.arccos, kinds='cf') + + def test_arctan_ufunc(self): + self.basic_ufunc_test(np.arctan, kinds='cf') + + def test_arctan2_ufunc(self): + self.basic_ufunc_test(np.arctan2, kinds='cf') + + def test_hypot_ufunc(self): + self.basic_ufunc_test(np.hypot, kinds='f') + + def test_sinh_ufunc(self): + self.basic_ufunc_test(np.sinh, kinds='cf') + + def test_cosh_ufunc(self): + self.basic_ufunc_test(np.cosh, kinds='cf') + + def test_tanh_ufunc(self): + self.basic_ufunc_test(np.tanh, kinds='cf') + + def test_arcsinh_ufunc(self): + self.basic_ufunc_test(np.arcsinh, kinds='cf') + + def test_arccosh_ufunc(self): + self.basic_ufunc_test(np.arccosh, kinds='cf') + + def test_arctanh_ufunc(self): + # arctanh is only valid is only finite in the range ]-1, 1[ + # This means that for any of the integer types it will produce + # conversion from infinity/-infinity to integer. That's undefined + # behavior in C, so the results may vary from implementation to + # implementation. This means that the result from the compiler + # used to compile NumPy may differ from the result generated by + # llvm. Skipping the integer types in this test avoids failed + # tests because of this. + to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32, + types.Array(types.int32, 1, 'C'), types.int32, + types.Array(types.uint64, 1, 'C'), types.uint64, + types.Array(types.int64, 1, 'C'), types.int64] + + self.basic_ufunc_test(np.arctanh, skip_inputs=to_skip, kinds='cf') + + def test_deg2rad_ufunc(self): + self.basic_ufunc_test(np.deg2rad, kinds='f') + + def test_rad2deg_ufunc(self): + self.basic_ufunc_test(np.rad2deg, kinds='f') + + def test_degrees_ufunc(self): + self.basic_ufunc_test(np.degrees, kinds='f') + + def test_radians_ufunc(self): + self.basic_ufunc_test(np.radians, kinds='f') + + ############################################################################ + # Bit-twiddling Functions + + def test_bitwise_and_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_and) + + def test_bitwise_or_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_or) + + def test_bitwise_xor_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_xor) + + def test_invert_ufunc(self): + self.basic_int_ufunc_test(np.invert) + + def test_bitwise_not_ufunc(self): + self.basic_int_ufunc_test(np.bitwise_not) + + # Note: there is no entry for left_shift and right_shift as this harness + # is not valid for them. This is so because left_shift and right + # shift implementation in NumPy has undefined behavior (in C-parlance) + # when the second argument is a negative (or bigger than the number + # of bits) value. + # Also, right_shift for negative first arguments also relies on + # implementation defined behavior, although numba warantees "sane" + # behavior (arithmetic shifts on signed integers, logic shifts on + # unsigned integers). + + ############################################################################ + # Comparison functions + def test_greater_ufunc(self): + self.signed_unsigned_cmp_test(np.greater) + + def test_greater_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.greater_equal) + + def test_less_ufunc(self): + self.signed_unsigned_cmp_test(np.less) + + def test_less_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.less_equal) + + def test_not_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.not_equal) + + def test_equal_ufunc(self): + self.signed_unsigned_cmp_test(np.equal) + + def test_logical_and_ufunc(self): + self.basic_ufunc_test(np.logical_and) + + def test_logical_or_ufunc(self): + self.basic_ufunc_test(np.logical_or) + + def test_logical_xor_ufunc(self): + self.basic_ufunc_test(np.logical_xor) + + def test_logical_not_ufunc(self): + self.basic_ufunc_test(np.logical_not) + + def test_maximum_ufunc(self): + self.basic_ufunc_test(np.maximum) + + def test_minimum_ufunc(self): + self.basic_ufunc_test(np.minimum) + + def test_fmax_ufunc(self): + self.basic_ufunc_test(np.fmax) + + def test_fmin_ufunc(self): + self.basic_ufunc_test(np.fmin) + + ############################################################################ + # Floating functions + + def bool_additional_inputs(self): + return [ + (np.array([True, False], dtype=np.bool_), + types.Array(types.bool_, 1, 'C')), + ] + + def test_isfinite_ufunc(self): + self.basic_ufunc_test( + np.isfinite, kinds='ifcb', + additional_inputs=self.bool_additional_inputs(), + ) + + def test_isinf_ufunc(self): + self.basic_ufunc_test( + np.isinf, kinds='ifcb', + additional_inputs=self.bool_additional_inputs(), + ) + + def test_isnan_ufunc(self): + self.basic_ufunc_test( + np.isnan, kinds='ifcb', + additional_inputs=self.bool_additional_inputs(), + ) + + def test_signbit_ufunc(self): + self.basic_ufunc_test(np.signbit) + + def test_copysign_ufunc(self): + self.basic_ufunc_test(np.copysign, kinds='f') + + def test_nextafter_ufunc(self): + self.basic_ufunc_test(np.nextafter, kinds='f') + + @_unimplemented + def test_modf_ufunc(self): + self.basic_ufunc_test(np.modf, kinds='f') + + # Note: there is no entry for ldexp as this harness isn't valid for this + # ufunc. this is so because ldexp requires heterogeneous inputs. + # However, this ufunc is tested by the TestLoopTypes test classes. + + @_unimplemented + def test_frexp_ufunc(self): + self.basic_ufunc_test(np.frexp, kinds='f') + + def test_floor_ufunc(self): + self.basic_ufunc_test(np.floor, kinds='f') + + def test_ceil_ufunc(self): + self.basic_ufunc_test(np.ceil, kinds='f') + + def test_trunc_ufunc(self): + self.basic_ufunc_test(np.trunc, kinds='f') + + def test_spacing_ufunc(self): + # additional input to check inf behaviour as Numba uses a different alg + # to NumPy + additional = [(np.array([np.inf, -np.inf], dtype=np.float64), + types.Array(types.float64, 1, 'C')),] + self.basic_ufunc_test(np.spacing, kinds='f', + additional_inputs=additional) + + ############################################################################ + # Other tests + + def binary_ufunc_mixed_types_test(self, ufunc): + ufunc_name = ufunc.__name__ + ufunc = _make_ufunc_usecase(ufunc) + inputs1 = [ + (1, types.uint64), + (-1, types.int64), + (0.5, types.float64), + + (np.array([0, 1], dtype='u8'), types.Array(types.uint64, 1, 'C')), + (np.array([-1, 1], dtype='i8'), types.Array(types.int64, 1, 'C')), + (np.array([-0.5, 0.5], dtype='f8'), + types.Array(types.float64, 1, 'C'))] + + inputs2 = inputs1 + + output_types = [types.Array(types.int64, 1, 'C'), + types.Array(types.float64, 1, 'C')] + + pyfunc = ufunc + + for vals in itertools.product(inputs1, inputs2, output_types): + input1, input2, output_type = vals + + input1_operand = input1[0] + input1_type = input1[1] + + input2_operand = input2[0] + input2_type = input2[1] + + # Skip division by unsigned int because of NumPy bugs + if ufunc_name == 'divide' and ( + input2_type == types.Array(types.uint32, 1, 'C') or + input2_type == types.Array(types.uint64, 1, 'C')): + continue + + # Skip some subtraction tests because of NumPy bugs + if (ufunc_name == 'subtract' + and input1_type == types.Array(types.uint32, 1, 'C') + and input2_type == types.uint32 + and types.Array(types.int64, 1, 'C')): + continue + if (ufunc_name == 'subtract' + and input1_type == types.Array(types.uint32, 1, 'C') + and input2_type == types.uint64 + and types.Array(types.int64, 1, 'C')): + continue + + if ((isinstance(input1_type, types.Array) or + isinstance(input2_type, types.Array)) and + not isinstance(output_type, types.Array)): + continue + + args = (input1_type, input2_type, output_type) + cfunc = self._compile(pyfunc, args) + + if isinstance(input1_operand, np.ndarray): + result = np.zeros(input1_operand.size, + dtype=output_type.dtype.name) + expected = np.zeros(input1_operand.size, + dtype=output_type.dtype.name) + elif isinstance(input2_operand, np.ndarray): + result = np.zeros(input2_operand.size, + dtype=output_type.dtype.name) + expected = np.zeros(input2_operand.size, + dtype=output_type.dtype.name) + else: + result = np.zeros(1, dtype=output_type.dtype.name) + expected = np.zeros(1, dtype=output_type.dtype.name) + + cfunc(input1_operand, input2_operand, result) + pyfunc(input1_operand, input2_operand, expected) + + scalar_type = getattr(output_type, 'dtype', output_type) + prec = ('single' + if scalar_type in (types.float32, types.complex64) + else 'double') + self.assertPreciseEqual(expected, result, prec=prec) + + def test_broadcasting(self): + + # Test unary ufunc + pyfunc = _make_ufunc_usecase(np.negative) + + input_operands = [ + np.arange(3, dtype='u8'), + np.arange(3, dtype='u8').reshape(3,1), + np.arange(3, dtype='u8').reshape(1,3), + np.arange(3, dtype='u8').reshape(3,1), + np.arange(3, dtype='u8').reshape(1,3), + np.arange(3 * 3, dtype='u8').reshape(3,3)] + + output_operands = [ + np.zeros(3 * 3, dtype='i8').reshape(3,3), + np.zeros(3 * 3, dtype='i8').reshape(3,3), + np.zeros(3 * 3, dtype='i8').reshape(3,3), + np.zeros(3 * 3 * 3, dtype='i8').reshape(3,3,3), + np.zeros(3 * 3 * 3, dtype='i8').reshape(3,3,3), + np.zeros(3 * 3 * 3, dtype='i8').reshape(3,3,3)] + + for x, result in zip(input_operands, output_operands): + + input_type = types.Array(types.uint64, x.ndim, 'C') + output_type = types.Array(types.int64, result.ndim, 'C') + args = (input_type, output_type) + + cfunc = self._compile(pyfunc, args) + + expected = np.zeros(result.shape, dtype=result.dtype) + np.negative(x, expected) + + cfunc(x, result) + + self.assertPreciseEqual(result, expected) + + # Test binary ufunc + pyfunc = _make_ufunc_usecase(np.add) + + input1_operands = [ + np.arange(3, dtype='u8'), + np.arange(3 * 3, dtype='u8').reshape(3,3), + np.arange(3 * 3 * 3, dtype='u8').reshape(3,3,3), + np.arange(3, dtype='u8').reshape(3,1), + np.arange(3, dtype='u8').reshape(1,3), + np.arange(3, dtype='u8').reshape(3,1,1), + np.arange(3 * 3, dtype='u8').reshape(3,3,1), + np.arange(3 * 3, dtype='u8').reshape(3,1,3), + np.arange(3 * 3, dtype='u8').reshape(1,3,3)] + + input2_operands = input1_operands + + for x, y in itertools.product(input1_operands, input2_operands): + + input1_type = types.Array(types.uint64, x.ndim, 'C') + input2_type = types.Array(types.uint64, y.ndim, 'C') + output_type = types.Array(types.uint64, max(x.ndim, y.ndim), 'C') + args = (input1_type, input2_type, output_type) + + cfunc = self._compile(pyfunc, args) + + expected = np.add(x, y) + result = np.zeros(expected.shape, dtype='u8') + + cfunc(x, y, result) + self.assertPreciseEqual(result, expected) + + def test_implicit_output_npm(self): + # Test for Issue #1078 (https://github.com/numba/numba/issues/1078) - + # ensures that the output of a ufunc is an array. + arr_ty = types.Array(types.uint64, 1, 'C') + sig = (arr_ty, arr_ty) + + @njit((arr_ty, arr_ty)) + def myadd(a0, a1): + return np.add(a0, a1) + + self.assertEqual(myadd.overloads[sig].signature.return_type, arr_ty) + + def test_broadcast_implicit_output_npm_nrt(self): + def pyfunc(a0, a1): + return np.add(a0, a1) + + input1_operands = [ + np.arange(3, dtype='u8'), + np.arange(3 * 3, dtype='u8').reshape(3,3), + np.arange(3 * 3 * 3, dtype='u8').reshape(3,3,3), + np.arange(3, dtype='u8').reshape(3,1), + np.arange(3, dtype='u8').reshape(1,3), + np.arange(3, dtype='u8').reshape(3,1,1), + np.arange(3 * 3, dtype='u8').reshape(3,3,1), + np.arange(3 * 3, dtype='u8').reshape(3,1,3), + np.arange(3 * 3, dtype='u8').reshape(1,3,3)] + + input2_operands = input1_operands + + for x, y in itertools.product(input1_operands, input2_operands): + + input1_type = types.Array(types.uint64, x.ndim, 'C') + input2_type = types.Array(types.uint64, y.ndim, 'C') + args = (input1_type, input2_type) + + cfunc = self._compile(pyfunc, args, nrt=True) + + expected = np.add(x, y) + result = cfunc(x, y) + np.testing.assert_array_equal(expected, result) + + def test_implicit_output_layout_binary(self): + def pyfunc(a0, a1): + return np.add(a0, a1) + + # C layout + X = np.linspace(0, 1, 20).reshape(4, 5) + # F layout + Y = np.array(X, order='F') + # A layout + Z = X.reshape(5, 4).T[0] + + Xty = typeof(X) + assert X.flags.c_contiguous and Xty.layout == 'C' + Yty = typeof(Y) + assert Y.flags.f_contiguous and Yty.layout == 'F' + Zty = typeof(Z) + assert Zty.layout == 'A' + assert not Z.flags.c_contiguous + assert not Z.flags.f_contiguous + + testcases = list(itertools.permutations([X, Y, Z], 2)) + testcases += [(X, X)] + testcases += [(Y, Y)] + testcases += [(Z, Z)] + + for arg0, arg1 in testcases: + args = (typeof(arg0), typeof(arg1)) + cfunc = self._compile(pyfunc, args, nrt=True) + expected = pyfunc(arg0, arg1) + result = cfunc(arg0, arg1) + + self.assertEqual(expected.flags.c_contiguous, + result.flags.c_contiguous) + self.assertEqual(expected.flags.f_contiguous, + result.flags.f_contiguous) + np.testing.assert_array_equal(expected, result) + + def test_implicit_output_layout_unary(self): + def pyfunc(a0): + return np.sqrt(a0) + + # C layout + X = np.linspace(0, 1, 20).reshape(4, 5) + # F layout + Y = np.array(X, order='F') + # A layout + Z = X.reshape(5, 4).T[0] + + Xty = typeof(X) + assert X.flags.c_contiguous and Xty.layout == 'C' + Yty = typeof(Y) + assert Y.flags.f_contiguous and Yty.layout == 'F' + Zty = typeof(Z) + assert Zty.layout == 'A' + assert not Z.flags.c_contiguous + assert not Z.flags.f_contiguous + + for arg0 in [X, Y, Z]: + args = (typeof(arg0),) + cfunc = self._compile(pyfunc, args, nrt=True) + expected = pyfunc(arg0) + result = cfunc(arg0) + + self.assertEqual(expected.flags.c_contiguous, + result.flags.c_contiguous) + self.assertEqual(expected.flags.f_contiguous, + result.flags.f_contiguous) + np.testing.assert_array_equal(expected, result) + + +class TestArrayOperators(BaseUFuncTest, TestCase): + + def _check_results(self, expected, got): + self.assertEqual(expected.dtype.kind, got.dtype.kind) + np.testing.assert_array_almost_equal(expected, got) + + def unary_op_test(self, operator, nrt=True, + skip_inputs=[], additional_inputs=[], + int_output_type=None, float_output_type=None): + operator_func = _make_unary_ufunc_op_usecase(operator) + inputs = list(self.inputs) + inputs.extend(additional_inputs) + pyfunc = operator_func + for input_tuple in inputs: + input_operand, input_type = input_tuple + + if ((input_type in skip_inputs) or + (not isinstance(input_type, types.Array))): + continue + + cfunc = self._compile(pyfunc, (input_type,), nrt=nrt) + expected = pyfunc(input_operand) + got = cfunc(input_operand) + self._check_results(expected, got) + + def binary_op_test(self, operator, nrt=True, + skip_inputs=[], additional_inputs=[], + int_output_type=None, float_output_type=None, + positive_rhs=False): + operator_func = _make_binary_ufunc_op_usecase(operator) + inputs = list(self.inputs) + inputs.extend(additional_inputs) + pyfunc = operator_func + # when generating arbitrary sequences, we use a fixed seed + # for deterministic testing + random_state = np.random.RandomState(1) + for input_tuple in inputs: + input_operand1, input_type = input_tuple + input_dtype = numpy_support.as_dtype( + getattr(input_type, "dtype", input_type)) + input_type1 = input_type + + if input_type in skip_inputs: + continue + + if positive_rhs: + zero = np.zeros(1, dtype=input_dtype)[0] + # If we only use two scalars, the code generator will not + # select the ufunctionalized operator, so we mix it up. + if isinstance(input_type, types.Array): + input_operand0 = input_operand1 + input_type0 = input_type + if positive_rhs and np.any(input_operand1 < zero): + continue + else: + input_operand0 = (random_state.uniform(0, 100, 10)).astype( + input_dtype) + input_type0 = typeof(input_operand0) + if positive_rhs and input_operand1 < zero: + continue + + args = (input_type0, input_type1) + cfunc = self._compile(pyfunc, args, nrt=nrt) + expected = pyfunc(input_operand0, input_operand1) + got = cfunc(input_operand0, input_operand1) + self._check_results(expected, got) + + def bitwise_additional_inputs(self): + # For bitwise operators, we want to check the results for boolean + # arrays (see #1813). + return [ + (True, types.boolean), + (False, types.boolean), + (np.array([True, False]), types.Array(types.boolean, 1, 'C')), + ] + + def binary_int_op_test(self, *args, **kws): + skip_inputs = kws.setdefault('skip_inputs', []) + skip_inputs += [ + types.float32, types.float64, + types.Array(types.float32, 1, 'C'), + types.Array(types.float64, 1, 'C'), + ] + return self.binary_op_test(*args, **kws) + + def binary_bitwise_op_test(self, *args, **kws): + additional_inputs = kws.setdefault('additional_inputs', []) + additional_inputs += self.bitwise_additional_inputs() + return self.binary_int_op_test(*args, **kws) + + def inplace_op_test(self, operator, lhs_values, rhs_values, + lhs_dtypes, rhs_dtypes, precise=True): + operator_func = _make_inplace_ufunc_op_usecase(operator) + pyfunc = operator_func + + if precise: + assertion = self.assertPreciseEqual + else: + assertion = np.testing.assert_allclose + + # The left operand can only be an array, while the right operand + # can be either an array or a scalar + lhs_inputs = [np.array(lhs_values, dtype=dtype) + for dtype in lhs_dtypes] + + rhs_arrays = [np.array(rhs_values, dtype=dtype) + for dtype in rhs_dtypes] + rhs_scalars = [dtype(v) for v in rhs_values for dtype in rhs_dtypes] + rhs_inputs = rhs_arrays + rhs_scalars + + for lhs, rhs in itertools.product(lhs_inputs, rhs_inputs): + lhs_type = typeof(lhs) + rhs_type = typeof(rhs) + args = (lhs_type, rhs_type) + cfunc = self._compile(pyfunc, args) + expected = lhs.copy() + pyfunc(expected, rhs) + got = lhs.copy() + cfunc(got, rhs) + assertion(got, expected) + + def inplace_float_op_test(self, operator, lhs_values, rhs_values, + precise=True): + # Also accept integer inputs for the right operand (they should + # be converted to float). + return self.inplace_op_test(operator, lhs_values, rhs_values, + (np.float32, np.float64), + (np.float32, np.float64, np.int64), + precise=precise) + + def inplace_int_op_test(self, operator, lhs_values, rhs_values): + self.inplace_op_test(operator, lhs_values, rhs_values, + (np.int16, np.int32, np.int64), + (np.int16, np.uint32)) + + def inplace_bitwise_op_test(self, operator, lhs_values, rhs_values): + self.inplace_int_op_test(operator, lhs_values, rhs_values) + self.inplace_op_test(operator, lhs_values, rhs_values, + (np.bool_,), (np.bool_, np.bool_)) + + # ____________________________________________________________ + # Unary operators + + def test_unary_positive_array_op(self): + self.unary_op_test('+') + + def test_unary_negative_array_op(self): + self.unary_op_test('-') + + def test_unary_invert_array_op(self): + self.unary_op_test('~', + skip_inputs=[types.float32, types.float64, + types.Array(types.float32, 1, 'C'), + types.Array(types.float64, 1, 'C')], + additional_inputs=self.bitwise_additional_inputs()) + + # ____________________________________________________________ + # Inplace operators + + def test_inplace_add(self): + self.inplace_float_op_test('+=', [-1, 1.5, 3], [-5, 0, 2.5]) + self.inplace_float_op_test(operator.iadd, [-1, 1.5, 3], [-5, 0, 2.5]) + + def test_inplace_sub(self): + self.inplace_float_op_test('-=', [-1, 1.5, 3], [-5, 0, 2.5]) + self.inplace_float_op_test(operator.isub, [-1, 1.5, 3], [-5, 0, 2.5]) + + def test_inplace_mul(self): + self.inplace_float_op_test('*=', [-1, 1.5, 3], [-5, 0, 2.5]) + self.inplace_float_op_test(operator.imul, [-1, 1.5, 3], [-5, 0, 2.5]) + + def test_inplace_floordiv(self): + self.inplace_float_op_test('//=', [-1, 1.5, 3], [-5, 1.25, 2.5]) + self.inplace_float_op_test(operator.ifloordiv, [-1, 1.5, 3], + [-5, 1.25, 2.5]) + + def test_inplace_div(self): + self.inplace_float_op_test('/=', [-1, 1.5, 3], [-5, 0, 2.5]) + self.inplace_float_op_test(operator.itruediv, [-1, 1.5, 3], + [-5, 1.25, 2.5]) + + def test_inplace_remainder(self): + self.inplace_float_op_test('%=', [-1, 1.5, 3], [-5, 2, 2.5]) + self.inplace_float_op_test(operator.imod, [-1, 1.5, 3], [-5, 2, 2.5]) + + def test_inplace_pow(self): + self.inplace_float_op_test('**=', [-1, 1.5, 3], [-5, 2, 2.5], + precise=False) + self.inplace_float_op_test(operator.ipow, [-1, 1.5, 3], [-5, 2, 2.5], + precise=False) + + def test_inplace_and(self): + self.inplace_bitwise_op_test('&=', [0, 1, 2, 3, 51], + [0, 13, 16, 42, 255]) + self.inplace_bitwise_op_test(operator.iand, [0, 1, 2, 3, 51], + [0, 13, 16, 42, 255]) + + def test_inplace_or(self): + self.inplace_bitwise_op_test('|=', [0, 1, 2, 3, 51], + [0, 13, 16, 42, 255]) + self.inplace_bitwise_op_test(operator.ior, [0, 1, 2, 3, 51], + [0, 13, 16, 42, 255]) + + def test_inplace_xor(self): + self.inplace_bitwise_op_test('^=', [0, 1, 2, 3, 51], + [0, 13, 16, 42, 255]) + self.inplace_bitwise_op_test(operator.ixor, [0, 1, 2, 3, 51], + [0, 13, 16, 42, 255]) + + def test_inplace_lshift(self): + self.inplace_int_op_test('<<=', [0, 5, -10, -51], [0, 1, 4, 14]) + self.inplace_int_op_test(operator.ilshift, [0, 5, -10, -51], + [0, 1, 4, 14]) + + def test_inplace_rshift(self): + self.inplace_int_op_test('>>=', [0, 5, -10, -51], [0, 1, 4, 14]) + self.inplace_int_op_test(operator.irshift, [0, 5, -10, -51], + [0, 1, 4, 14]) + + def test_unary_positive_array_op_2(self): + ''' + Verify that the unary positive operator copies values, and doesn't + just alias to the input array (mirrors normal Numpy/Python + interaction behavior). + ''' + # Test originally from @gmarkall + def f(a1): + a2 = +a1 + a1[0] = 3 + a2[1] = 4 + return a2 + + a1 = np.zeros(10) + a2 = f(a1) + self.assertTrue(a1[0] != a2[0] and a1[1] != a2[1]) + a3 = np.zeros(10) + a4 = njit(f)(a3) + self.assertTrue(a3[0] != a4[0] and a3[1] != a4[1]) + np.testing.assert_array_equal(a1, a3) + np.testing.assert_array_equal(a2, a4) + + # ____________________________________________________________ + # Binary operators + + def test_add_array_op(self): + self.binary_op_test('+') + + def test_subtract_array_op(self): + self.binary_op_test('-') + + def test_multiply_array_op(self): + self.binary_op_test('*') + + def test_divide_array_op(self): + int_out_type = None + int_out_type = types.float64 + self.binary_op_test('/', int_output_type=int_out_type) + + def test_floor_divide_array_op(self): + # Avoid floating-point zeros as x // 0.0 can have varying results + # depending on the algorithm (which changed across Numpy versions) + self.inputs = [ + (np.uint32(1), types.uint32), + (np.int32(-2), types.int32), + (np.int32(0), types.int32), + (np.uint64(4), types.uint64), + (np.int64(-5), types.int64), + (np.int64(0), types.int64), + + (np.float32(-0.5), types.float32), + (np.float32(1.5), types.float32), + + (np.float64(-2.5), types.float64), + (np.float64(3.5), types.float64), + + (np.array([1,2], dtype='u4'), types.Array(types.uint32, 1, 'C')), + (np.array([3,4], dtype='u8'), types.Array(types.uint64, 1, 'C')), + (np.array([-1,1,5], dtype='i4'), types.Array(types.int32, 1, 'C')), + (np.array([-1,1,6], dtype='i8'), types.Array(types.int64, 1, 'C')), + (np.array([-0.5, 1.5], dtype='f4'), + types.Array(types.float32, 1, 'C')), + (np.array([-2.5, 3.5], dtype='f8'), + types.Array(types.float64, 1, 'C')), + ] + self.binary_op_test('//') + + def test_remainder_array_op(self): + self.binary_op_test('%') + + def test_power_array_op(self): + self.binary_op_test('**', positive_rhs=True) + + def test_left_shift_array_op(self): + self.binary_int_op_test('<<', positive_rhs=True) + + def test_right_shift_array_op(self): + self.binary_int_op_test('>>', positive_rhs=True) + + def test_bitwise_and_array_op(self): + self.binary_bitwise_op_test('&') + + def test_bitwise_or_array_op(self): + self.binary_bitwise_op_test('|') + + def test_bitwise_xor_array_op(self): + self.binary_bitwise_op_test('^') + + def test_equal_array_op(self): + self.binary_op_test('==') + + def test_greater_array_op(self): + self.binary_op_test('>') + + def test_greater_equal_array_op(self): + self.binary_op_test('>=') + + def test_less_array_op(self): + self.binary_op_test('<') + + def test_less_equal_array_op(self): + self.binary_op_test('<=') + + def test_not_equal_array_op(self): + self.binary_op_test('!=') + + +class TestScalarUFuncs(TestCase): + """check the machinery of ufuncs works when the result is an scalar. + These are not exhaustive because: + - the machinery to support this case is the same for all the functions of a + given arity. + - the result of the inner function itself is already tested in TestUFuncs + """ + + def run_ufunc(self, pyfunc, arg_types, arg_values): + for tyargs, args in zip(arg_types, arg_values): + cfunc = njit(tyargs)(pyfunc) + got = cfunc(*args) + expected = pyfunc(*_as_dtype_value(tyargs, args)) + + msg = 'for args {0} typed {1}'.format(args, tyargs) + + # note: due to semantics of ufuncs, thing like adding a int32 to a + # uint64 results in doubles (as neither int32 can be cast safely + # to uint64 nor vice-versa, falling back to using the float version. + # Modify in those cases the expected value (the numpy version does + # not use typed integers as inputs so its result is an integer) + special = set([ + (types.int32, types.uint64), + (types.uint64, types.int32), + (types.int64, types.uint64), + (types.uint64, types.int64) + ]) + if tyargs in special: + expected = float(expected) + else: + # The numba version of scalar ufuncs return an actual value that + # gets converted to a Python type, instead of using NumPy + # scalars. although in python 2 NumPy scalars are considered + # and instance of the appropriate python type, in python 3 that + # is no longer the case. This is why the expected result is + # casted to the appropriate Python type (which is actually the + # expected behavior of the ufunc translation) + if np.issubdtype(expected.dtype, np.inexact): + expected = float(expected) + elif np.issubdtype(expected.dtype, np.integer): + expected = int(expected) + elif np.issubdtype(expected.dtype, np.bool_): + expected = bool(expected) + + alltypes = tyargs + (cfunc.overloads[tyargs].signature.return_type,) + + # select the appropriate precision for comparison: note that an + # argument typed at a lower precision can introduce precision + # problems. For this reason the argument types must be taken into + # account. + if any([t == types.float32 for t in alltypes]): + prec = 'single' + elif any([t == types.float64 for t in alltypes]): + prec = 'double' + else: + prec = 'exact' + + self.assertPreciseEqual(got, expected, msg=msg, prec=prec) + + def test_scalar_unary_ufunc(self): + def _func(x): + return np.sqrt(x) + + vals = [(2,), (2,), (1,), (2,), (.1,), (.2,)] + tys = [(types.int32,), (types.uint32,), + (types.int64,), (types.uint64,), + (types.float32,), (types.float64,)] + self.run_ufunc(_func, tys, vals) + + def test_scalar_binary_uniform_ufunc(self): + def _func(x,y): + return np.add(x,y) + + vals = [2, 2, 1, 2, .1, .2] + tys = [types.int32, types.uint32, + types.int64, types.uint64, types.float32, types.float64] + self.run_ufunc(_func, zip(tys, tys), zip(vals, vals)) + + def test_scalar_binary_mixed_ufunc(self): + def _func(x,y): + return np.add(x,y) + + vals = [2, 2, 1, 2, .1, .2] + tys = [types.int32, types.uint32, + types.int64, types.uint64, + types.float32, types.float64] + self.run_ufunc(_func, itertools.product(tys, tys), + itertools.product(vals, vals)) + + +class TestUfuncIssues(TestCase): + + def test_issue_651(self): + # Exercise the code path to make sure this does not fail + @vectorize(["(float64,float64)"]) + def foo(x1, x2): + return np.add(x1, x2) + np.add(x1, x2) + + a = np.arange(10, dtype='f8') + b = np.arange(10, dtype='f8') + self.assertPreciseEqual(foo(a, b), (a + b) + (a + b)) + + def test_issue_2006(self): + """ + should return float32, not float64. + """ + def foo(x, y): + return np.power(x, y) + pyfunc = foo + cfunc = njit(pyfunc) + + def check(x, y): + got = cfunc(x, y) + np.testing.assert_array_almost_equal(got, pyfunc(x, y)) + # Check the power operation conserved the input's dtype + # (this is different from Numpy, whose behaviour depends on + # the *values* of the arguments -- see PyArray_CanCastArrayTo). + self.assertEqual(got.dtype, x.dtype) + + xs = [np.float32([1, 2, 3]), np.complex64([1j, 2, 3 - 3j])] + for x in xs: + check(x, 3) + check(x, np.uint64(3)) + check(x, np.int64([2, 2, 3])) + + +class _LoopTypesTester(TestCase): + """Test code generation for the different loop types defined by ufunc. + + This test relies on class variables to configure the test. Subclasses + of this class can just override some of these variables to check other + ufuncs in a different compilation context. The variables supported are: + + _funcs: the ufuncs to test + _skip_types: letter types that force skipping the loop when testing + if present in the NumPy ufunc signature. + _supported_types: only test loops where all the types in the loop + signature are in this collection. If unset, all. + + Note that both, _skip_types and _supported_types must be met for a loop + to be tested. + + The NumPy ufunc signature has a form like 'ff->f' (for a binary ufunc + loop taking 2 floats and resulting in a float). In a NumPy ufunc object + you can get a list of supported signatures by accessing the attribute + 'types'. + """ + _skip_types = 'OegG' + + # Allowed deviation between Numpy and Numba results + _ulps = {('arccos', 'F'): 2, + ('arcsin', 'D'): 4, + ('arcsin', 'F'): 4, + ('log10', 'D'): 5, + ('tanh', 'F'): 2, + ('cbrt', 'd'): 2, + ('logaddexp2', 'd'): 2, + } + + def _arg_for_type(self, a_letter_type, index=0): + """return a suitable array argument for testing the letter type""" + # Note all possible arrays must have the same size, since they + # may be used as inputs to the same func. + if a_letter_type in 'bhilq': + # an integral + return np.array([1, 4, 0, -2], dtype=a_letter_type) + if a_letter_type in 'BHILQ': + return np.array([1, 2, 4, 0], dtype=a_letter_type) + elif a_letter_type in '?': + # a boolean + return np.array([True, False, False, True], dtype=a_letter_type) + elif a_letter_type[0] == 'm': + # timedelta64 + if len(a_letter_type) == 1: + a_letter_type = 'm8[D]' + return np.array([2, -3, 'NaT', 0], dtype=a_letter_type) + elif a_letter_type[0] == 'M': + # datetime64 + if len(a_letter_type) == 1: + a_letter_type = 'M8[D]' + return np.array(['Nat', 1, 25, 0], dtype=a_letter_type) + elif a_letter_type in 'fd': + # floating point + return np.array([1.5, -3.5, 0.0, float('nan')], + dtype=a_letter_type) + elif a_letter_type in 'FD': + # complex + if sys.platform != 'win32': + # Other platforms have better handling of negative zeros, + # test them + negzero = -(0.0 + 1.0j) + else: + negzero = 0.0 - 1.0j + return np.array([negzero, 1.5 + 1.5j, 1j * float('nan'), 0j], + dtype=a_letter_type) + else: + raise RuntimeError("type %r not understood" % (a_letter_type,)) + + def _check_loop(self, fn, ufunc, loop): + # the letter types for the args + letter_types = loop[:ufunc.nin] + loop[-ufunc.nout:] + + # ignore the loops containing an object argument. They will always + # fail in no python mode. Usually the last loop in ufuncs is an all + # object fallback + supported_types = getattr(self, '_supported_types', []) + if (supported_types and + any(l not in supported_types for l in letter_types)): + return + skip_types = getattr(self, '_skip_types', []) + if any(l in skip_types for l in letter_types): + return + # if the test case requires some types to be present, skip loops + # not involving any of those types. + required_types = getattr(self, '_required_types', []) + if required_types and not any(l in letter_types + for l in required_types): + return + + self._check_ufunc_with_dtypes(fn, ufunc, letter_types) + + def _check_ufunc_with_dtypes(self, fn, ufunc, dtypes): + # Arrays created with datetime and timedelta types (e.g. with np.array) + # will have units, so in order to ensure that the dtypes of arguments + # match the dtypes in the signature, we add units to unitless datetime + # and timedelta types. This corresponds with the addition of units in + # _arg_for_type() above. + dtypes_with_units = [] + for t in dtypes: + if t in ('m', 'M'): + t = t + '8[D]' + dtypes_with_units.append(t) + + arg_dty = [np.dtype(t) for t in dtypes_with_units] + arg_nbty = tuple([types.Array(from_dtype(t), 1, 'C') for t in arg_dty]) + cfunc = njit(arg_nbty)(fn) + + # Ensure a good mix of input values + c_args = [self._arg_for_type(t, index=index).repeat(2) + for index, t in enumerate(dtypes)] + for arr in c_args: + self.random.shuffle(arr) + py_args = [a.copy() for a in c_args] + + cfunc(*c_args) + fn(*py_args) + + # Check each array (including inputs, to ensure they weren't + # mutated). + for dtype, py_arg, c_arg in zip(arg_dty, py_args, c_args): + py_arg, c_arg = self._fixup_results(dtype, py_arg, c_arg) + typechar = c_arg.dtype.char + ulps = self._ulps.get((ufunc.__name__, typechar), 1) + prec = 'single' if typechar in 'fF' else 'exact' + prec = 'double' if typechar in 'dD' else prec + msg = '\n'.join(["ufunc '{0}' arrays differ ({1}):", + "args: {2}", "expected {3}", "got {4}"]) + msg = msg.format(ufunc.__name__, c_args, prec, py_arg, c_arg) + self.assertPreciseEqual(py_arg, c_arg, prec=prec, msg=msg, + ulps=ulps) + + def _fixup_results(self, dtype, py_arg, c_arg): + return py_arg, c_arg + + @classmethod + def _check_ufunc_loops(cls, ufunc): + for loop in ufunc.types: + cls._inject_test(ufunc, loop) + + @classmethod + def _inject_test(cls, ufunc, loop): + def test_template(self): + fn = _make_ufunc_usecase(ufunc) + self._check_loop(fn, ufunc, loop) + setattr(cls, "test_{0}_{1}".format(ufunc.__name__, + loop.replace('->', '_')), + test_template) + + @classmethod + def autogenerate(cls): + for ufunc in cls._ufuncs: + cls._check_ufunc_loops(ufunc) + + +class TestLoopTypesInt(_LoopTypesTester): + _ufuncs = supported_ufuncs[:] + # reciprocal and power need a special test due to issue #757 + _ufuncs.remove(np.power) + _ufuncs.remove(np.reciprocal) + _ufuncs.remove(np.left_shift) # has its own test class + _ufuncs.remove(np.right_shift) # has its own test class + # special test for bool subtract/negative + _ufuncs.remove(np.subtract) + _ufuncs.remove(np.negative) + _required_types = '?bBhHiIlLqQ' + _skip_types = 'fdFDmMO' + _LoopTypesTester._skip_types + + +TestLoopTypesInt.autogenerate() + + +class TestLoopTypesSubtractAndNegative(_LoopTypesTester): + _ufuncs = [np.subtract, np.negative] + _required_types = '?bBhHiIlLqQfdFD' + _skip_types = 'mMO' + _LoopTypesTester._skip_types + '?' + + +TestLoopTypesSubtractAndNegative.autogenerate() + + +class TestLoopTypesReciprocal(_LoopTypesTester): + _ufuncs = [np.reciprocal] # issue #757 + _required_types = 'bBhHiIlLqQfdFD' + _skip_types = 'mMO' + _LoopTypesTester._skip_types + + def _arg_for_type(self, a_letter_type, index=0): + res = super(self.__class__, self)._arg_for_type(a_letter_type, + index=index) + if a_letter_type in 'bBhHiIlLqQ': + # For integer reciprocal, avoid 0 as argument, as it triggers + # undefined behavior that may differ in results from Numba + # to the compiler used to compile NumPy. + res[res == 0] = 42 + return res + + +TestLoopTypesReciprocal.autogenerate() + + +class TestLoopTypesPower(_LoopTypesTester): + _ufuncs = [np.power] # issue #757 + _required_types = 'bBhHiIlLqQfdFD' + _skip_types = 'mMO' + _LoopTypesTester._skip_types + + def _arg_for_type(self, a_letter_type, index=0): + res = super(self.__class__, self)._arg_for_type(a_letter_type, + index=index) + if a_letter_type in 'bBhHiIlLqQ' and index == 1: + # For integer power, avoid a negative exponent, as it triggers + # undefined behavior that may differ in results from Numba + # to the compiler used to compile NumPy + res[res < 0] = 3 + return res + + +TestLoopTypesPower.autogenerate() + + +class TestLoopTypesIntLeftShift(_LoopTypesTester): + _ufuncs = [np.left_shift] + _required_types = 'bBhHiIlLqQ' + _skip_types = 'fdFDmMO' + _LoopTypesTester._skip_types + + def _arg_for_type(self, a_letter_type, index=0): + res = super(self.__class__, self)._arg_for_type(a_letter_type, + index=index) + # Shifting by a negative amount (argument with index 1) is undefined + # behavior in C. It is also undefined behavior in numba. In the same + # sense, it is also undefined behavior when the shift amount is larger + # than the number of bits in the shifted integer. + # To avoid problems in the test, the values are clamped (clipped) so + # that 0 <= shift_amount < bitcount(shifted_integer) + if index == 1: + bit_count = res.dtype.itemsize * 8 + res = np.clip(res, 0, bit_count - 1) + return res + + +TestLoopTypesIntLeftShift.autogenerate() + + +class TestLoopTypesIntRightShift(_LoopTypesTester): + _ufuncs = [np.right_shift] + _required_types = 'bBhHiIlLqQ' + _skip_types = 'fdFDmMO' + _LoopTypesTester._skip_types + + def _arg_for_type(self, a_letter_type, index=0): + res = super(self.__class__, self)._arg_for_type(a_letter_type, + index=index) + # Shifting by a negative amount (argument with index 1) is undefined + # behavior in C. It is also undefined behavior in numba. In the same + # sense, it is also undefined behavior when the shift amount is larger + # than the number of bits in the shifted integer. + # To avoid problems in the test, the values are clamped (clipped) so + # that 0 <= shift_amount < bitcount(shifted_integer) + if index == 1: + bit_count = res.dtype.itemsize * 8 + res = np.clip(res, 0, bit_count - 1) + + # Right shift has "implementation defined behavior" when the number + # shifted is negative (in C). In numba, right shift for signed integers + # is "arithmetic" while for unsigned integers is "logical". + # This test compares against the NumPy implementation, that relies + # on "implementation defined behavior", so the test could be a false + # failure if the compiler used to compile NumPy doesn't follow the same + # policy. + # Hint: do not rely on right shifting negative numbers in NumPy. + if index == 0: + res = np.abs(res) + return res + + +TestLoopTypesIntRightShift.autogenerate() + + +class TestLoopTypesFloorDivide(_LoopTypesTester): + _ufuncs = [np.floor_divide, np.remainder, np.divmod] + _required_types = 'bBhHiIlLqQfdFD' + _skip_types = 'mMO' + _LoopTypesTester._skip_types + + def _fixup_results(self, dtype, py_arg, c_arg): + if dtype.kind == 'f': + # Discrepancies on floating-point floor division and remainder: + # Numpy may return nan where Numba returns inf, e.g. 1. // 0. + pred = (np.isinf(c_arg) & np.isnan(py_arg)) + # Numpy and Numba may differ in signed zeros, e.g. -0. // -1. + pred |= (py_arg == 0.0) & (c_arg == 0.0) + c_arg[pred] = py_arg[pred] + return py_arg, c_arg + + +TestLoopTypesFloorDivide.autogenerate() + + +class TestLoopTypesFloat(_LoopTypesTester): + _ufuncs = supported_ufuncs[:] + if iswindows: + _ufuncs.remove(np.signbit) # TODO: fix issue #758 + _ufuncs.remove(np.floor_divide) # has its own test class + _ufuncs.remove(np.remainder) # has its own test class + _ufuncs.remove(np.divmod) # has its own test class + _ufuncs.remove(np.mod) # same as np.remainder + _required_types = 'fd' + _skip_types = 'FDmMO' + _LoopTypesTester._skip_types + + +TestLoopTypesFloat.autogenerate() + + +class TestLoopTypesComplex(_LoopTypesTester): + _ufuncs = supported_ufuncs[:] + + # Test complex types + # Every loop containing a complex argument must be tested + _required_types = 'FD' + _skip_types = 'mMO' + _LoopTypesTester._skip_types + + +TestLoopTypesComplex.autogenerate() + + +class TestLoopTypesDatetime(_LoopTypesTester): + _ufuncs = supported_ufuncs[:] + + _ufuncs.remove(np.divmod) # not implemented yet + + # NOTE: the full list of ufuncs supporting datetime64 and timedelta64 + # types in Numpy is: + # ['absolute', 'add', 'divide', 'equal', 'floor_divide', 'fmax', 'fmin', + # 'greater', 'greater_equal', 'less', 'less_equal', 'maximum', + # 'minimum', 'multiply', 'negative', 'not_equal', 'sign', 'subtract', + # 'true_divide'] + + # Test datetime64 and timedelta64 types. + _required_types = 'mM' + + # Test various units combinations (TestLoopTypes is only able to test + # homogeneous units). + + def test_add(self): + ufunc = np.add + fn = _make_ufunc_usecase(ufunc) + # heterogeneous inputs + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[s]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[s]']) + # heterogeneous inputs, scaled output + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[ms]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[ms]']) + # Cannot upscale result (Numpy would accept this) + with self.assertRaises(LoweringError): + self._check_ufunc_with_dtypes(fn, ufunc, + ['m8[m]', 'm8[s]', 'm8[m]']) + + def test_subtract(self): + ufunc = np.subtract + fn = _make_ufunc_usecase(ufunc) + # heterogeneous inputs + self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[s]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[s]']) + # heterogeneous inputs, scaled output + self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[ms]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[ms]']) + # Cannot upscale result (Numpy would accept this) + with self.assertRaises(LoweringError): + self._check_ufunc_with_dtypes(fn, ufunc, + ['M8[m]', 'M8[s]', 'm8[m]']) + + def test_multiply(self): + ufunc = np.multiply + fn = _make_ufunc_usecase(ufunc) + # scaled output + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[us]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['q', 'm8[s]', 'm8[us]']) + # Cannot upscale result (Numpy would accept this) + with self.assertRaises(LoweringError): + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]']) + + def test_true_divide(self): + ufunc = np.true_divide + fn = _make_ufunc_usecase(ufunc) + # heterogeneous inputs + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'd']) + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'd']) + # scaled output + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]']) + # Cannot upscale result (Numpy would accept this) + with self.assertRaises(LoweringError): + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]']) + + def test_floor_divide(self): + ufunc = np.floor_divide + fn = _make_ufunc_usecase(ufunc) + # scaled output + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]']) + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]']) + # Cannot upscale result (Numpy would accept this) + with self.assertRaises(LoweringError): + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]']) + + def _check_comparison(self, ufunc): + fn = _make_ufunc_usecase(ufunc) + # timedelta + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', '?']) + self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', '?']) + # datetime + self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', '?']) + self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', '?']) + + def test_comparisons(self): + for ufunc in [np.equal, np.not_equal, np.less, np.less_equal, + np.greater, np.greater_equal]: + self._check_comparison(ufunc) + + +TestLoopTypesDatetime.autogenerate() + + +class TestUFuncBadArgs(TestCase): + def test_missing_args(self): + def func(x): + """error: np.add requires two args""" + result = np.add(x) + return result + + with self.assertRaises(TypingError): + njit([types.float64(types.float64)])(func) + + def test_too_many_args(self): + def func(x, out, out2): + """error: too many args""" + result = np.add(x, x, out, out2) + return result + + array_type = types.Array(types.float64, 1, 'C') + sig = array_type(array_type, array_type, array_type) + + with self.assertRaises(TypingError): + njit(sig)(func) + + def test_no_scalar_result_by_reference(self): + def func(x): + """error: scalar as a return value is not supported""" + y = 0 + np.add(x, x, y) + + with self.assertRaises(TypingError): + njit([types.float64(types.float64)])(func) + + +class TestUFuncCompilationThreadSafety(TestCase): + + def test_lock(self): + """ + Test that (lazy) compiling from several threads at once doesn't + produce errors (see issue #2403). + """ + errors = [] + + @vectorize + def foo(x): + return x + 1 + + def wrapper(): + try: + a = np.ones((10,), dtype=np.float64) + expected = np.ones((10,), dtype=np.float64) + 1. + np.testing.assert_array_equal(foo(a), expected) + except Exception as e: + errors.append(e) + + threads = [threading.Thread(target=wrapper) for i in range(16)] + for t in threads: + t.start() + for t in threads: + t.join() + self.assertFalse(errors) + + +class TestUfuncOnContext(TestCase): + def test_cpu_get_ufunc_info(self): + # The CPU context defines get_ufunc_info that is the same as + # ufunc_db.get_ufunc_info. + targetctx = cpu_target.target_context + # Check: get_ufunc_info returns a dict + add_info = targetctx.get_ufunc_info(np.add) + self.assertIsInstance(add_info, dict) + # Check: it is the same as ufunc_db.get_ufunc_info + expected = ufunc_db.get_ufunc_info(np.add) + self.assertEqual(add_info, expected) + # Check: KeyError raised on bad key + badkey = object() + with self.assertRaises(KeyError) as raises: + ufunc_db.get_ufunc_info(badkey) + self.assertEqual(raises.exception.args, (badkey,)) + + def test_base_get_ufunc_info(self): + # The BaseContext always raises NotImplementedError + targetctx = BaseContext(cpu_target.typing_context, 'cpu') + with self.assertRaises(NotImplementedError) as raises: + targetctx.get_ufunc_info(np.add) + self.assertRegex( + str(raises.exception), + r" does not support ufunc", + ) + + +class TestUfuncWriteInput(TestCase): + def test_write_input_arg(self): + @guvectorize(["void(float64[:], uint8[:])"], "(n)->(n)") + def func(x, out): + + for i in range(x.size): + # set every fourth element to 1 + if i % 4 == 0: + out[i] = 1 + + x = np.random.rand(10, 5) + out = np.zeros_like(x, dtype=np.int8) + + func(x, out) + np.testing.assert_array_equal( + np.array([True, False, False, False, True], dtype=np.bool_), + out.any(axis=0)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_unicode.py b/venv/lib/python3.10/site-packages/numba/tests/test_unicode.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0be5093e2a36c2f6edec6e205785e5ef7cf62b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_unicode.py @@ -0,0 +1,2717 @@ +# -*- coding: utf-8 -*- +from itertools import product +from itertools import permutations + +from numba import njit, typeof +from numba.core import types +import unittest +from numba.tests.support import (TestCase, no_pyobj_flags, MemoryLeakMixin) +from numba.core.errors import (TypingError, UnsupportedError, + UnsupportedBytecodeError) +from numba.cpython.unicode import _MAX_UNICODE +from numba.core.types.functions import _header_lead +from numba.extending import overload +from numba.core.utils import PYVERSION + + +def isascii(s): + return all(ord(c) < 128 for c in s) + + +def literal_usecase(): + return '大处着眼,小处着手。' + + +def passthrough_usecase(x): + return x + + +def eq_usecase(x, y): + return x == y + + +def len_usecase(x): + return len(x) + + +def bool_usecase(x): + return bool(x) + + +def getitem_usecase(x, i): + return x[i] + + +def getitem_check_kind_usecase(x, i): + return hash(x[i]) + + +def zfill_usecase(x, y): + return x.zfill(y) + + +def concat_usecase(x, y): + return x + y + + +def repeat_usecase(x, y): + return x * y + + +def inplace_concat_usecase(x, y): + x += y + return x + + +def in_usecase(x, y): + return x in y + + +def lt_usecase(x, y): + return x < y + + +def le_usecase(x, y): + return x <= y + + +def gt_usecase(x, y): + return x > y + + +def ge_usecase(x, y): + return x >= y + + +def partition_usecase(s, sep): + return s.partition(sep) + + +def find_usecase(x, y): + return x.find(y) + + +def find_with_start_only_usecase(x, y, start): + return x.find(y, start) + + +def find_with_start_end_usecase(x, y, start, end): + return x.find(y, start, end) + + +def rpartition_usecase(s, sep): + return s.rpartition(sep) + + +def count_usecase(x, y): + return x.count(y) + + +def count_with_start_usecase(x, y, start): + return x.count(y, start) + + +def count_with_start_end_usecase(x, y, start, end): + return x.count(y, start, end) + + +def rfind_usecase(x, y): + return x.rfind(y) + + +def rfind_with_start_only_usecase(x, y, start): + return x.rfind(y, start) + + +def rfind_with_start_end_usecase(x, y, start, end): + return x.rfind(y, start, end) + + +def replace_usecase(s, x, y): + return s.replace(x, y) + + +def replace_with_count_usecase(s, x, y, count): + return s.replace(x, y, count) + + +def rindex_usecase(x, y): + return x.rindex(y) + + +def rindex_with_start_only_usecase(x, y, start): + return x.rindex(y, start) + + +def rindex_with_start_end_usecase(x, y, start, end): + return x.rindex(y, start, end) + + +def index_usecase(x, y): + return x.index(y) + + +def index_with_start_only_usecase(x, y, start): + return x.index(y, start) + + +def index_with_start_end_usecase(x, y, start, end): + return x.index(y, start, end) + + +def startswith_usecase(x, y): + return x.startswith(y) + + +def endswith_usecase(x, y): + return x.endswith(y) + + +def expandtabs_usecase(s): + return s.expandtabs() + + +def expandtabs_with_tabsize_usecase(s, tabsize): + return s.expandtabs(tabsize) + + +def expandtabs_with_tabsize_kwarg_usecase(s, tabsize): + return s.expandtabs(tabsize=tabsize) + + +def startswith_with_start_only_usecase(x, y, start): + return x.startswith(y, start) + + +def startswith_with_start_end_usecase(x, y, start, end): + return x.startswith(y, start, end) + + +def endswith_with_start_only_usecase(x, y, start): + return x.endswith(y, start) + + +def endswith_with_start_end_usecase(x, y, start, end): + return x.endswith(y, start, end) + + +def split_usecase(x, y): + return x.split(y) + + +def split_with_maxsplit_usecase(x, y, maxsplit): + return x.split(y, maxsplit) + + +def split_with_maxsplit_kwarg_usecase(x, y, maxsplit): + return x.split(y, maxsplit=maxsplit) + + +def split_whitespace_usecase(x): + return x.split() + + +def splitlines_usecase(s): + return s.splitlines() + + +def splitlines_with_keepends_usecase(s, keepends): + return s.splitlines(keepends) + + +def splitlines_with_keepends_kwarg_usecase(s, keepends): + return s.splitlines(keepends=keepends) + + +def rsplit_usecase(s, sep): + return s.rsplit(sep) + + +def rsplit_with_maxsplit_usecase(s, sep, maxsplit): + return s.rsplit(sep, maxsplit) + + +def rsplit_with_maxsplit_kwarg_usecase(s, sep, maxsplit): + return s.rsplit(sep, maxsplit=maxsplit) + + +def rsplit_whitespace_usecase(s): + return s.rsplit() + + +def lstrip_usecase(x): + return x.lstrip() + + +def lstrip_usecase_chars(x, chars): + return x.lstrip(chars) + + +def rstrip_usecase(x): + return x.rstrip() + + +def rstrip_usecase_chars(x, chars): + return x.rstrip(chars) + + +def strip_usecase(x): + return x.strip() + + +def strip_usecase_chars(x, chars): + return x.strip(chars) + + +def join_usecase(x, y): + return x.join(y) + + +def join_empty_usecase(x): + # hack to make empty typed list + l = [''] + l.pop() + return x.join(l) + + +def center_usecase(x, y): + return x.center(y) + + +def center_usecase_fillchar(x, y, fillchar): + return x.center(y, fillchar) + + +def ljust_usecase(x, y): + return x.ljust(y) + + +def ljust_usecase_fillchar(x, y, fillchar): + return x.ljust(y, fillchar) + + +def rjust_usecase(x, y): + return x.rjust(y) + + +def rjust_usecase_fillchar(x, y, fillchar): + return x.rjust(y, fillchar) + + +def istitle_usecase(x): + return x.istitle() + + +def iter_usecase(x): + l = [] + for i in x: + l.append(i) + return l + + +def title(x): + return x.title() + + +def literal_iter_usecase(): + l = [] + for i in '大处着眼,小处着手。': + l.append(i) + return l + + +def enumerated_iter_usecase(x): + buf = "" + scan = 0 + for i, s in enumerate(x): + buf += s + scan += 1 + return buf, scan + + +def iter_stopiteration_usecase(x): + n = len(x) + i = iter(x) + for _ in range(n + 1): + next(i) + + +def literal_iter_stopiteration_usecase(): + s = '大处着眼,小处着手。' + i = iter(s) + n = len(s) + for _ in range(n + 1): + next(i) + + +def islower_usecase(x): + return x.islower() + + +def lower_usecase(x): + return x.lower() + + +def ord_usecase(x): + return ord(x) + + +def chr_usecase(x): + return chr(x) + + +class BaseTest(MemoryLeakMixin, TestCase): + def setUp(self): + super(BaseTest, self).setUp() + + +UNICODE_EXAMPLES = [ + '', + 'ascii', + '12345', + '1234567890', + '¡Y tú quién te crees?', + '🐍⚡', + '大处着眼,小处着手。', +] + +UNICODE_ORDERING_EXAMPLES = [ + '', + 'a' + 'aa', + 'aaa', + 'b', + 'aab', + 'ab', + 'asc', + 'ascih', + 'ascii', + 'ascij', + '大处着眼,小处着手', + '大处着眼,小处着手。', + '大处着眼,小处着手。🐍⚡', +] + +UNICODE_COUNT_EXAMPLES = [ + ('', ''), + ('', 'ascii'), + ('ascii', ''), + ('asc ii', ' '), + ('ascii', 'ci'), + ('ascii', 'ascii'), + ('ascii', 'Ă'), + ('ascii', '大处'), + ('ascii', 'étú?'), + ('', '大处 着眼,小处着手。大大大处'), + ('大处 着眼,小处着手。大大大处', ''), + ('大处 着眼,小处着手。大大大处', ' '), + ('大处 着眼,小处着手。大大大处', 'ci'), + ('大处 着眼,小处着手。大大大处', '大处大处'), + ('大处 着眼,小处着手。大大大处', '大处 着眼,小处着手。大大大处'), + ('大处 着眼,小处着手。大大大处', 'Ă'), + ('大处 着眼,小处着手。大大大处', '大处'), + ('大处 着眼,小处着手。大大大处', 'étú?'), + ('', 'tú quién te crees?'), + ('tú quién te crees?', ''), + ('tú quién te crees?', ' '), + ('tú quién te crees?', 'ci'), + ('tú quién te crees?', 'tú quién te crees?'), + ('tú quién te crees?', 'Ă'), + ('tú quién te crees?', '大处'), + ('tú quién te crees?', 'étú?'), + ('abababab', 'a'), + ('abababab', 'ab'), + ('abababab', 'aba'), + ('aaaaaaaaaa', 'aaa'), + ('aaaaaaaaaa', 'aĂ'), + ('aabbaaaabbaa', 'aa') +] + + +class TestUnicode(BaseTest): + + def test_literal(self): + pyfunc = literal_usecase + cfunc = njit(literal_usecase) + self.assertPreciseEqual(pyfunc(), cfunc()) + + def test_passthrough(self, flags=no_pyobj_flags): + pyfunc = passthrough_usecase + cfunc = njit(pyfunc) + for s in UNICODE_EXAMPLES: + self.assertEqual(pyfunc(s), cfunc(s)) + + def test_eq(self, flags=no_pyobj_flags): + pyfunc = eq_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + for b in reversed(UNICODE_EXAMPLES): + self.assertEqual(pyfunc(a, b), + cfunc(a, b), '%s, %s' % (a, b)) + # comparing against something that's not unicode + self.assertEqual(pyfunc(a, 1), + cfunc(a, 1), '%s, %s' % (a, 1)) + self.assertEqual(pyfunc(1, b), + cfunc(1, b), '%s, %s' % (1, b)) + + def test_eq_optional(self): + # See issue #7474 + @njit + def foo(pred1, pred2): + if pred1 > 0: + resolved1 = 'concrete' + else: + resolved1 = None + if pred2 < 0: + resolved2 = 'concrete' + else: + resolved2 = None + + # resolved* are Optionals + if resolved1 == resolved2: + return 10 + else: + return 20 + + for (p1, p2) in product(*((-1, 1),) * 2): + self.assertEqual(foo(p1, p2), foo.py_func(p1, p2)) + + def _check_ordering_op(self, usecase): + pyfunc = usecase + cfunc = njit(pyfunc) + + # Check comparison to self + for a in UNICODE_ORDERING_EXAMPLES: + self.assertEqual( + pyfunc(a, a), + cfunc(a, a), + '%s: "%s", "%s"' % (usecase.__name__, a, a), + ) + + # Check comparison to adjacent + for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2): + self.assertEqual( + pyfunc(a, b), + cfunc(a, b), + '%s: "%s", "%s"' % (usecase.__name__, a, b), + ) + # and reversed + self.assertEqual( + pyfunc(b, a), + cfunc(b, a), + '%s: "%s", "%s"' % (usecase.__name__, b, a), + ) + + def test_lt(self, flags=no_pyobj_flags): + self._check_ordering_op(lt_usecase) + + def test_le(self, flags=no_pyobj_flags): + self._check_ordering_op(le_usecase) + + def test_gt(self, flags=no_pyobj_flags): + self._check_ordering_op(gt_usecase) + + def test_ge(self, flags=no_pyobj_flags): + self._check_ordering_op(ge_usecase) + + def test_len(self, flags=no_pyobj_flags): + pyfunc = len_usecase + cfunc = njit(pyfunc) + for s in UNICODE_EXAMPLES: + self.assertEqual(pyfunc(s), cfunc(s)) + + def test_bool(self, flags=no_pyobj_flags): + pyfunc = bool_usecase + cfunc = njit(pyfunc) + for s in UNICODE_EXAMPLES: + self.assertEqual(pyfunc(s), cfunc(s)) + + def test_expandtabs(self): + pyfunc = expandtabs_usecase + cfunc = njit(pyfunc) + + cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc', + '🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta'] + + msg = 'Results of "{}".expandtabs() must be equal' + for s in cases: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_expandtabs_with_tabsize(self): + fns = [njit(expandtabs_with_tabsize_usecase), + njit(expandtabs_with_tabsize_kwarg_usecase)] + messages = ['Results of "{}".expandtabs({}) must be equal', + 'Results of "{}".expandtabs(tabsize={}) must be equal'] + + cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc', + '🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta'] + + for s in cases: + for tabsize in range(-1, 10): + for fn, msg in zip(fns, messages): + self.assertEqual(fn.py_func(s, tabsize), fn(s, tabsize), + msg=msg.format(s, tabsize)) + + def test_expandtabs_exception_noninteger_tabsize(self): + pyfunc = expandtabs_with_tabsize_usecase + cfunc = njit(pyfunc) + + accepted_types = (types.Integer, int) + with self.assertRaises(TypingError) as raises: + cfunc('\t', 2.4) + msg = '"tabsize" must be {}, not float'.format(accepted_types) + self.assertIn(msg, str(raises.exception)) + + def test_startswith_default(self): + pyfunc = startswith_usecase + cfunc = njit(pyfunc) + + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for prefix in cpython_subs + default_subs + extra_subs: + self.assertEqual(pyfunc(s, prefix), cfunc(s, prefix)) + + def test_startswith_with_start(self): + pyfunc = startswith_with_start_only_usecase + cfunc = njit(pyfunc) + + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for prefix in cpython_subs + default_subs + extra_subs: + for start in list(range(-20, 20)) + [None]: + self.assertEqual(pyfunc(s, prefix, start), + cfunc(s, prefix, start)) + + def test_startswith_with_start_end(self): + pyfunc = startswith_with_start_end_usecase + cfunc = njit(pyfunc) + + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for prefix in cpython_subs + default_subs + extra_subs: + for start in list(range(-20, 20)) + [None]: + for end in list(range(-20, 20)) + [None]: + self.assertEqual(pyfunc(s, prefix, start, end), + cfunc(s, prefix, start, end)) + + def test_startswith_exception_invalid_args(self): + msg_invalid_prefix = \ + "The arg 'prefix' should be a string or a tuple of strings" + with self.assertRaisesRegex(TypingError, msg_invalid_prefix): + cfunc = njit(startswith_usecase) + cfunc("hello", (1, "he")) + + msg_invalid_start = \ + "When specified, the arg 'start' must be an Integer or None" + with self.assertRaisesRegex(TypingError, msg_invalid_start): + cfunc = njit(startswith_with_start_only_usecase) + cfunc("hello", "he", "invalid start") + + msg_invalid_end = \ + "When specified, the arg 'end' must be an Integer or None" + with self.assertRaisesRegex(TypingError, msg_invalid_end): + cfunc = njit(startswith_with_start_end_usecase) + cfunc("hello", "he", 0, "invalid end") + + def test_startswith_tuple(self): + pyfunc = startswith_usecase + cfunc = njit(pyfunc) + + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + prefix = (sub_str, 'lo') + self.assertEqual(pyfunc(s, prefix), + cfunc(s, prefix)) + + def test_startswith_tuple_args(self): + pyfunc = startswith_with_start_end_usecase + cfunc = njit(pyfunc) + + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + for start in list(range(-20, 20)) + [None]: + for end in list(range(-20, 20)) + [None]: + prefix = (sub_str, 'lo') + self.assertEqual(pyfunc(s, prefix, start, end), + cfunc(s, prefix, start, end)) + + def test_endswith_default(self): + pyfunc = endswith_usecase + cfunc = njit(pyfunc) + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501 + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + msg = 'Results "{}".endswith("{}") must be equal' + self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str), + msg=msg.format(s, sub_str)) + + def test_endswith_with_start(self): + pyfunc = endswith_with_start_only_usecase + cfunc = njit(pyfunc) + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501 + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + for start in list(range(-20, 20)) + [None]: + msg = 'Results "{}".endswith("{}", {}) must be equal' + self.assertEqual(pyfunc(s, sub_str, start), + cfunc(s, sub_str, start), + msg=msg.format(s, sub_str, start)) + + def test_endswith_with_start_end(self): + pyfunc = endswith_with_start_end_usecase + cfunc = njit(pyfunc) + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#LL1049-L1099 # noqa: E501 + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + for start in list(range(-20, 20)) + [None]: + for end in list(range(-20, 20)) + [None]: + msg = 'Results "{}".endswith("{}", {}, {})\ + must be equal' + self.assertEqual(pyfunc(s, sub_str, start, end), + cfunc(s, sub_str, start, end), + msg=msg.format(s, sub_str, start, end)) + + def test_endswith_tuple(self): + pyfunc = endswith_usecase + cfunc = njit(pyfunc) + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501 + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + msg = 'Results "{}".endswith({}) must be equal' + tuple_subs = (sub_str, 'lo') + self.assertEqual(pyfunc(s, tuple_subs), + cfunc(s, tuple_subs), + msg=msg.format(s, tuple_subs)) + + def test_endswith_tuple_args(self): + pyfunc = endswith_with_start_end_usecase + cfunc = njit(pyfunc) + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501 + cpython_str = ['hello', 'helloworld', ''] + cpython_subs = [ + 'he', 'hello', 'helloworld', 'ello', + '', 'lowo', 'lo', 'he', 'lo', 'o', + ] + extra_subs = ['hellohellohello', ' '] + for s in cpython_str + UNICODE_EXAMPLES: + default_subs = ['', 'x', s[:-2], s[3:], s, s + s] + for sub_str in cpython_subs + default_subs + extra_subs: + for start in list(range(-20, 20)) + [None]: + for end in list(range(-20, 20)) + [None]: + msg = 'Results "{}".endswith("{}", {}, {})\ + must be equal' + tuple_subs = (sub_str, 'lo') + self.assertEqual(pyfunc(s, tuple_subs, start, end), + cfunc(s, tuple_subs, start, end), + msg=msg.format(s, tuple_subs, + start, end)) + + def test_in(self, flags=no_pyobj_flags): + pyfunc = in_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a] + for substr in extras: + self.assertEqual(pyfunc(substr, a), + cfunc(substr, a), + "'%s' in '%s'?" % (substr, a)) + + def test_partition_exception_invalid_sep(self): + self.disable_leak_check() + + pyfunc = partition_usecase + cfunc = njit(pyfunc) + + # Handle empty separator exception + for func in [pyfunc, cfunc]: + with self.assertRaises(ValueError) as raises: + func('a', '') + self.assertIn('empty separator', str(raises.exception)) + + accepted_types = (types.UnicodeType, types.UnicodeCharSeq) + with self.assertRaises(TypingError) as raises: + cfunc('a', None) + msg = '"sep" must be {}, not none'.format(accepted_types) + self.assertIn(msg, str(raises.exception)) + + def test_partition(self): + pyfunc = partition_usecase + cfunc = njit(pyfunc) + + CASES = [ + ('', '⚡'), + ('abcabc', '⚡'), + ('🐍⚡', '⚡'), + ('🐍⚡🐍', '⚡'), + ('abababa', 'a'), + ('abababa', 'b'), + ('abababa', 'c'), + ('abababa', 'ab'), + ('abababa', 'aba'), + ] + msg = 'Results of "{}".partition("{}") must be equal' + for s, sep in CASES: + self.assertEqual(pyfunc(s, sep), cfunc(s, sep), + msg=msg.format(s, sep)) + + def test_find(self, flags=no_pyobj_flags): + pyfunc = find_usecase + cfunc = njit(pyfunc) + + default_subs = [ + (s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES + ] + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L202-L231 # noqa: E501 + cpython_subs = [ + ('a' * 100 + '\u0102', ['\u0102', '\u0201', '\u0120', '\u0220']), + ('a' * 100 + '\U00100304', ['\U00100304', '\U00100204', + '\U00102004']), + ('\u0102' * 100 + 'a', ['a']), + ('\U00100304' * 100 + 'a', ['a']), + ('\U00100304' * 100 + '\u0102', ['\u0102']), + ('a' * 100, ['\u0102', '\U00100304', 'a\u0102', 'a\U00100304']), + ('\u0102' * 100, ['\U00100304', '\u0102\U00100304']), + ('\u0102' * 100 + 'a_', ['a_']), + ('\U00100304' * 100 + 'a_', ['a_']), + ('\U00100304' * 100 + '\u0102_', ['\u0102_']), + ] + for s, subs in default_subs + cpython_subs: + for sub_str in subs: + msg = 'Results "{}".find("{}") must be equal' + self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str), + msg=msg.format(s, sub_str)) + + def test_find_with_start_only(self): + pyfunc = find_with_start_only_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for sub_str in ['', 'xx', s[:-2], s[3:], s]: + for start in list(range(-20, 20)) + [None]: + msg = 'Results "{}".find("{}", {}) must be equal' + self.assertEqual(pyfunc(s, sub_str, start), + cfunc(s, sub_str, start), + msg=msg.format(s, sub_str, start)) + + def test_find_with_start_end(self): + pyfunc = find_with_start_end_usecase + cfunc = njit(pyfunc) + + starts = ends = list(range(-20, 20)) + [None] + for s in UNICODE_EXAMPLES: + for sub_str in ['', 'xx', s[:-2], s[3:], s]: + for start, end in product(starts, ends): + msg = 'Results of "{}".find("{}", {}, {}) must be equal' + self.assertEqual(pyfunc(s, sub_str, start, end), + cfunc(s, sub_str, start, end), + msg=msg.format(s, sub_str, start, end)) + + def test_find_exception_noninteger_start_end(self): + pyfunc = find_with_start_end_usecase + cfunc = njit(pyfunc) + + accepted = (types.Integer, types.NoneType) + for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]: + with self.assertRaises(TypingError) as raises: + cfunc('ascii', 'sci', start, end) + msg = '"{}" must be {}, not float'.format(name, accepted) + self.assertIn(msg, str(raises.exception)) + + def test_rpartition_exception_invalid_sep(self): + self.disable_leak_check() + + pyfunc = rpartition_usecase + cfunc = njit(pyfunc) + + # Handle empty separator exception + for func in [pyfunc, cfunc]: + with self.assertRaises(ValueError) as raises: + func('a', '') + self.assertIn('empty separator', str(raises.exception)) + + accepted_types = (types.UnicodeType, types.UnicodeCharSeq) + with self.assertRaises(TypingError) as raises: + cfunc('a', None) + msg = '"sep" must be {}, not none'.format(accepted_types) + self.assertIn(msg, str(raises.exception)) + + def test_rpartition(self): + pyfunc = rpartition_usecase + cfunc = njit(pyfunc) + + CASES = [ + ('', '⚡'), + ('abcabc', '⚡'), + ('🐍⚡', '⚡'), + ('🐍⚡🐍', '⚡'), + ('abababa', 'a'), + ('abababa', 'b'), + ('abababa', 'c'), + ('abababa', 'ab'), + ('abababa', 'aba'), + ] + msg = 'Results of "{}".rpartition("{}") must be equal' + for s, sep in CASES: + self.assertEqual(pyfunc(s, sep), cfunc(s, sep), + msg=msg.format(s, sep)) + + def test_count(self): + pyfunc = count_usecase + cfunc = njit(pyfunc) + error_msg = "'{0}'.py_count('{1}') = {2}\n'{0}'.c_count('{1}') = {3}" + + for s, sub in UNICODE_COUNT_EXAMPLES: + py_result = pyfunc(s, sub) + c_result = cfunc(s, sub) + self.assertEqual(py_result, c_result, + error_msg.format(s, sub, py_result, c_result)) + + def test_count_with_start(self): + pyfunc = count_with_start_usecase + cfunc = njit(pyfunc) + error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}) = {3}", + "'{0}'.c_count('{1}', {2}) = {4}") + + for s, sub in UNICODE_COUNT_EXAMPLES: + for i in range(-18, 18): + py_result = pyfunc(s, sub, i) + c_result = cfunc(s, sub, i) + self.assertEqual(py_result, c_result, + error_msg.format(s, sub, i, py_result, + c_result)) + + py_result = pyfunc(s, sub, None) + c_result = cfunc(s, sub, None) + self.assertEqual(py_result, c_result, + error_msg.format(s, sub, None, py_result, + c_result)) + + def test_count_with_start_end(self): + pyfunc = count_with_start_end_usecase + cfunc = njit(pyfunc) + error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}", + "'{0}'.c_count('{1}', {2}, {3}) = {5}") + + for s, sub in UNICODE_COUNT_EXAMPLES: + for i, j in product(range(-18, 18), (-18, 18)): + py_result = pyfunc(s, sub, i, j) + c_result = cfunc(s, sub, i, j) + self.assertEqual(py_result, c_result, + error_msg.format(s, sub, i, j, py_result, + c_result)) + + for j in range(-18, 18): + py_result = pyfunc(s, sub, None, j) + c_result = cfunc(s, sub, None, j) + self.assertEqual(py_result, c_result, + error_msg.format(s, sub, None, j, py_result, + c_result)) + + py_result = pyfunc(s, sub, None, None) + c_result = cfunc(s, sub, None, None) + self.assertEqual(py_result, c_result, + error_msg.format(s, sub, None, None, py_result, + c_result)) + + def test_count_arg_type_check(self): + cfunc = njit(count_with_start_end_usecase) + + with self.assertRaises(TypingError) as raises: + cfunc('ascii', 'c', 1, 0.5) + self.assertIn('The slice indices must be an Integer or None', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc('ascii', 'c', 1.2, 7) + self.assertIn('The slice indices must be an Integer or None', + str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc('ascii', 12, 1, 7) + self.assertIn('The substring must be a UnicodeType, not', + str(raises.exception)) + + def test_count_optional_arg_type_check(self): + pyfunc = count_with_start_end_usecase + + def try_compile_bad_optional(*args): + bad_sig = types.int64(types.unicode_type, + types.unicode_type, + types.Optional(types.float64), + types.Optional(types.float64)) + njit([bad_sig])(pyfunc) + + with self.assertRaises(TypingError) as raises: + try_compile_bad_optional('tú quis?', 'tú', 1.1, 1.1) + self.assertIn('The slice indices must be an Integer or None', + str(raises.exception)) + + error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}", + "'{0}'.c_count_op('{1}', {2}, {3}) = {5}") + sig_optional = types.int64(types.unicode_type, + types.unicode_type, + types.Optional(types.int64), + types.Optional(types.int64)) + cfunc_optional = njit([sig_optional])(pyfunc) + + py_result = pyfunc('tú quis?', 'tú', 0, 8) + c_result = cfunc_optional('tú quis?', 'tú', 0, 8) + self.assertEqual(py_result, c_result, + error_msg.format('tú quis?', 'tú', 0, 8, py_result, + c_result)) + + def test_rfind(self): + pyfunc = rfind_usecase + cfunc = njit(pyfunc) + + default_subs = [ + (s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES + ] + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L233-L259 # noqa: E501 + cpython_subs = [ + ('\u0102' + 'a' * 100, ['\u0102', '\u0201', '\u0120', '\u0220']), + ('\U00100304' + 'a' * 100, ['\U00100304', '\U00100204', + '\U00102004']), + ('abcdefghiabc', ['abc', '']), + ('a' + '\u0102' * 100, ['a']), + ('a' + '\U00100304' * 100, ['a']), + ('\u0102' + '\U00100304' * 100, ['\u0102']), + ('a' * 100, ['\u0102', '\U00100304', '\u0102a', '\U00100304a']), + ('\u0102' * 100, ['\U00100304', '\U00100304\u0102']), + ('_a' + '\u0102' * 100, ['_a']), + ('_a' + '\U00100304' * 100, ['_a']), + ('_\u0102' + '\U00100304' * 100, ['_\u0102']), + ] + for s, subs in default_subs + cpython_subs: + for sub_str in subs: + msg = 'Results "{}".rfind("{}") must be equal' + self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str), + msg=msg.format(s, sub_str)) + + def test_rfind_with_start_only(self): + pyfunc = rfind_with_start_only_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for sub_str in ['', 'xx', s[:-2], s[3:], s]: + for start in list(range(-20, 20)) + [None]: + msg = 'Results "{}".rfind("{}", {}) must be equal' + self.assertEqual(pyfunc(s, sub_str, start), + cfunc(s, sub_str, start), + msg=msg.format(s, sub_str, start)) + + def test_rfind_with_start_end(self): + pyfunc = rfind_with_start_end_usecase + cfunc = njit(pyfunc) + + starts = list(range(-20, 20)) + [None] + ends = list(range(-20, 20)) + [None] + for s in UNICODE_EXAMPLES: + for sub_str in ['', 'xx', s[:-2], s[3:], s]: + for start, end in product(starts, ends): + msg = 'Results of "{}".rfind("{}", {}, {}) must be equal' + self.assertEqual(pyfunc(s, sub_str, start, end), + cfunc(s, sub_str, start, end), + msg=msg.format(s, sub_str, start, end)) + + def test_rfind_wrong_substr(self): + cfunc = njit(rfind_usecase) + + for s in UNICODE_EXAMPLES: + for sub_str in [None, 1, False]: + with self.assertRaises(TypingError) as raises: + cfunc(s, sub_str) + msg = 'must be {}'.format(types.UnicodeType) + self.assertIn(msg, str(raises.exception)) + + def test_rfind_wrong_start_end(self): + cfunc = njit(rfind_with_start_end_usecase) + + accepted_types = (types.Integer, types.NoneType) + for s in UNICODE_EXAMPLES: + for sub_str in ['', 'xx', s[:-2], s[3:], s]: + # test wrong start + for start, end in product([0.1, False], [-1, 1]): + with self.assertRaises(TypingError) as raises: + cfunc(s, sub_str, start, end) + msg = '"start" must be {}'.format(accepted_types) + self.assertIn(msg, str(raises.exception)) + + # test wrong end + for start, end in product([-1, 1], [-0.1, True]): + with self.assertRaises(TypingError) as raises: + cfunc(s, sub_str, start, end) + msg = '"end" must be {}'.format(accepted_types) + self.assertIn(msg, str(raises.exception)) + + def test_rfind_wrong_start_end_optional(self): + s = UNICODE_EXAMPLES[0] + sub_str = s[1:-1] + accepted_types = (types.Integer, types.NoneType) + msg = 'must be {}'.format(accepted_types) + + def try_compile_wrong_start_optional(*args): + wrong_sig_optional = types.int64(types.unicode_type, + types.unicode_type, + types.Optional(types.float64), + types.Optional(types.intp)) + njit([wrong_sig_optional])(rfind_with_start_end_usecase) + + with self.assertRaises(TypingError) as raises: + try_compile_wrong_start_optional(s, sub_str, 0.1, 1) + self.assertIn(msg, str(raises.exception)) + + def try_compile_wrong_end_optional(*args): + wrong_sig_optional = types.int64(types.unicode_type, + types.unicode_type, + types.Optional(types.intp), + types.Optional(types.float64)) + njit([wrong_sig_optional])(rfind_with_start_end_usecase) + + with self.assertRaises(TypingError) as raises: + try_compile_wrong_end_optional(s, sub_str, 1, 0.1) + self.assertIn(msg, str(raises.exception)) + + def test_rindex(self): + pyfunc = rindex_usecase + cfunc = njit(pyfunc) + + default_subs = [ + (s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES + ] + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L284-L308 # noqa: E501 + cpython_subs = [ + ('abcdefghiabc', ['', 'def', 'abc']), + ('a' + '\u0102' * 100, ['a']), + ('a' + '\U00100304' * 100, ['a']), + ('\u0102' + '\U00100304' * 100, ['\u0102']), + ('_a' + '\u0102' * 100, ['_a']), + ('_a' + '\U00100304' * 100, ['_a']), + ('_\u0102' + '\U00100304' * 100, ['_\u0102']) + ] + for s, subs in default_subs + cpython_subs: + for sub_str in subs: + msg = 'Results "{}".rindex("{}") must be equal' + self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str), + msg=msg.format(s, sub_str)) + + def test_index(self): + pyfunc = index_usecase + cfunc = njit(pyfunc) + + default_subs = [ + (s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES + ] + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L260-L282 # noqa: E501 + cpython_subs = [ + ('abcdefghiabc', ['', 'def', 'abc']), + ('\u0102' * 100 + 'a', ['a']), + ('\U00100304' * 100 + 'a', ['a']), + ('\U00100304' * 100 + '\u0102', ['\u0102']), + ('\u0102' * 100 + 'a_', ['a_']), + ('\U00100304' * 100 + 'a_', ['a_']), + ('\U00100304' * 100 + '\u0102_', ['\u0102_']) + ] + for s, subs in default_subs + cpython_subs: + for sub_str in subs: + msg = 'Results "{}".index("{}") must be equal' + self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str), + msg=msg.format(s, sub_str)) + + def test_index_rindex_with_start_only(self): + pyfuncs = [index_with_start_only_usecase, + rindex_with_start_only_usecase] + messages = ['Results "{}".index("{}", {}) must be equal', + 'Results "{}".rindex("{}", {}) must be equal'] + unicode_examples = [ + 'ascii', + '12345', + '1234567890', + '¡Y tú quién te crees?', + '大处着眼,小处着手。', + ] + for pyfunc, msg in zip(pyfuncs, messages): + cfunc = njit(pyfunc) + for s in unicode_examples: + l = len(s) + cases = [ + ('', list(range(-10, l + 1))), + (s[:-2], [0] + list(range(-10, 1 - l))), + (s[3:], list(range(4)) + list(range(-10, 4 - l))), + (s, [0] + list(range(-10, 1 - l))), + ] + for sub_str, starts in cases: + for start in starts + [None]: + self.assertEqual(pyfunc(s, sub_str, start), + cfunc(s, sub_str, start), + msg=msg.format(s, sub_str, start)) + + def test_index_rindex_with_start_end(self): + pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase] + messages = ['Results of "{}".index("{}", {}, {}) must be equal', + 'Results of "{}".rindex("{}", {}, {}) must be equal'] + unicode_examples = [ + 'ascii', + '12345', + '1234567890', + '¡Y tú quién te crees?', + '大处着眼,小处着手。', + ] + for pyfunc, msg in zip(pyfuncs, messages): + cfunc = njit(pyfunc) + for s in unicode_examples: + l = len(s) + cases = [ + ('', list(range(-10, l + 1)), list(range(l, 10))), + (s[:-2], [0] + list(range(-10, 1 - l)), + [-2, -1] + list(range(l - 2, 10))), + (s[3:], list(range(4)) + list(range(-10, -1)), + list(range(l, 10))), + (s, [0] + list(range(-10, 1 - l)), list(range(l, 10))), + ] + for sub_str, starts, ends in cases: + for start, end in product(starts + [None], ends): + self.assertEqual(pyfunc(s, sub_str, start, end), + cfunc(s, sub_str, start, end), + msg=msg.format(s, sub_str, start, end)) + + def test_index_rindex_exception_substring_not_found(self): + self.disable_leak_check() + + unicode_examples = [ + 'ascii', + '12345', + '1234567890', + '¡Y tú quién te crees?', + '大处着眼,小处着手。', + ] + pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase] + for pyfunc in pyfuncs: + cfunc = njit(pyfunc) + for s in unicode_examples: + l = len(s) + cases = [ + ('', list(range(l + 1, 10)), [l]), + (s[:-2], [0], list(range(l - 2))), + (s[3:], list(range(4, 10)), [l]), + (s, [None], list(range(l))), + ] + for sub_str, starts, ends in cases: + for start, end in product(starts, ends): + for func in [pyfunc, cfunc]: + with self.assertRaises(ValueError) as raises: + func(s, sub_str, start, end) + msg = 'substring not found' + self.assertIn(msg, str(raises.exception)) + + def test_index_rindex_exception_noninteger_start_end(self): + accepted = (types.Integer, types.NoneType) + pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase] + for pyfunc in pyfuncs: + cfunc = njit(pyfunc) + for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]: + with self.assertRaises(TypingError) as raises: + cfunc('ascii', 'sci', start, end) + msg = '"{}" must be {}, not float'.format(name, accepted) + self.assertIn(msg, str(raises.exception)) + + def test_getitem(self): + pyfunc = getitem_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for i in range(-len(s), len(s)): + self.assertEqual(pyfunc(s, i), + cfunc(s, i), + "'%s'[%d]?" % (s, i)) + + def test_getitem_scalar_kind(self): + # See issue #6135, make sure that getitem returns a char of the minimal + # kind required to represent the "got" item, this is done via the use + # of `hash` in the test function as it is sensitive to kind. + pyfunc = getitem_check_kind_usecase + cfunc = njit(pyfunc) + samples = ['a\u1234', '¡着'] + for s in samples: + for i in range(-len(s), len(s)): + self.assertEqual(pyfunc(s, i), + cfunc(s, i), + "'%s'[%d]?" % (s, i)) + + def test_getitem_error(self): + self.disable_leak_check() + + pyfunc = getitem_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + with self.assertRaises(IndexError) as raises: + pyfunc(s, len(s)) + self.assertIn('string index out of range', str(raises.exception)) + + with self.assertRaises(IndexError) as raises: + cfunc(s, len(s)) + self.assertIn('string index out of range', str(raises.exception)) + + def test_slice2(self): + pyfunc = getitem_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for i in list(range(-len(s), len(s))): + for j in list(range(-len(s), len(s))): + sl = slice(i, j) + self.assertEqual(pyfunc(s, sl), + cfunc(s, sl), + "'%s'[%d:%d]?" % (s, i, j)) + + def test_slice2_error(self): + pyfunc = getitem_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for i in [-2, -1, len(s), len(s) + 1]: + for j in [-2, -1, len(s), len(s) + 1]: + sl = slice(i, j) + self.assertEqual(pyfunc(s, sl), + cfunc(s, sl), + "'%s'[%d:%d]?" % (s, i, j)) + + def test_getitem_slice2_kind(self): + # See issue #6135. Also see note in test_getitem_scalar_kind regarding + # testing. + pyfunc = getitem_check_kind_usecase + cfunc = njit(pyfunc) + samples = ['abc\u1234\u1234', '¡¡¡着着着'] + for s in samples: + for i in [-2, -1, 0, 1, 2, len(s), len(s) + 1]: + for j in [-2, -1, 0, 1, 2, len(s), len(s) + 1]: + sl = slice(i, j) + self.assertEqual(pyfunc(s, sl), + cfunc(s, sl), + "'%s'[%d:%d]?" % (s, i, j)) + + def test_slice3(self): + pyfunc = getitem_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for i in range(-len(s), len(s)): + for j in range(-len(s), len(s)): + for k in [-2, -1, 1, 2]: + sl = slice(i, j, k) + self.assertEqual(pyfunc(s, sl), + cfunc(s, sl), + "'%s'[%d:%d:%d]?" % (s, i, j, k)) + + def test_getitem_slice3_kind(self): + # See issue #6135. Also see note in test_getitem_scalar_kind regarding + # testing. + pyfunc = getitem_check_kind_usecase + cfunc = njit(pyfunc) + samples = ['abc\u1234\u1234', + 'a\u1234b\u1234c' + '¡¡¡着着着', + '¡着¡着¡着', + '着a着b着c', + '¡着a¡着b¡着c', + '¡着a着¡c',] + for s in samples: + for i in range(-len(s), len(s)): + for j in range(-len(s), len(s)): + for k in [-2, -1, 1, 2]: + sl = slice(i, j, k) + self.assertEqual(pyfunc(s, sl), + cfunc(s, sl), + "'%s'[%d:%d:%d]?" % (s, i, j, k)) + + def test_slice3_error(self): + pyfunc = getitem_usecase + cfunc = njit(pyfunc) + + for s in UNICODE_EXAMPLES: + for i in [-2, -1, len(s), len(s) + 1]: + for j in [-2, -1, len(s), len(s) + 1]: + for k in [-2, -1, 1, 2]: + sl = slice(i, j, k) + self.assertEqual(pyfunc(s, sl), + cfunc(s, sl), + "'%s'[%d:%d:%d]?" % (s, i, j, k)) + + def test_slice_ascii_flag(self): + """ + Make sure ascii flag is False when ascii and non-ascii characters are + mixed in output of Unicode slicing. + """ + @njit + def f(s): + return s[::2]._is_ascii, s[1::2]._is_ascii + + s = "¿abc¡Y tú, quién te cre\t\tes?" + self.assertEqual(f(s), (0, 1)) + + def test_zfill(self): + pyfunc = zfill_usecase + cfunc = njit(pyfunc) + + ZFILL_INPUTS = [ + 'ascii', + '+ascii', + '-ascii', + '-asc ii-', + '12345', + '-12345', + '+12345', + '', + '¡Y tú crs?', + '🐍⚡', + '+🐍⚡', + '-🐍⚡', + '大眼,小手。', + '+大眼,小手。', + '-大眼,小手。', + ] + + with self.assertRaises(TypingError) as raises: + cfunc(ZFILL_INPUTS[0], 1.1) + self.assertIn(' must be an Integer', str(raises.exception)) + + for s in ZFILL_INPUTS: + for width in range(-3, 20): + self.assertEqual(pyfunc(s, width), + cfunc(s, width)) + + def test_concat(self, flags=no_pyobj_flags): + pyfunc = concat_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + for b in UNICODE_EXAMPLES[::-1]: + self.assertEqual(pyfunc(a, b), + cfunc(a, b), + "'%s' + '%s'?" % (a, b)) + + def test_repeat(self, flags=no_pyobj_flags): + pyfunc = repeat_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + for b in (-1, 0, 1, 2, 3, 4, 5, 7, 8, 15, 70): + self.assertEqual(pyfunc(a, b), + cfunc(a, b)) + self.assertEqual(pyfunc(b, a), + cfunc(b, a)) + + def test_repeat_exception_float(self): + self.disable_leak_check() + cfunc = njit(repeat_usecase) + with self.assertRaises(TypingError) as raises: + cfunc('hi', 2.5) + self.assertIn(_header_lead + ' Function()', + str(raises.exception)) + + def test_split_exception_empty_sep(self): + self.disable_leak_check() + + pyfunc = split_usecase + cfunc = njit(pyfunc) + + # Handle empty separator exception + for func in [pyfunc, cfunc]: + with self.assertRaises(ValueError) as raises: + func('a', '') + self.assertIn('empty separator', str(raises.exception)) + + def test_split_exception_noninteger_maxsplit(self): + pyfunc = split_with_maxsplit_usecase + cfunc = njit(pyfunc) + + # Handle non-integer maxsplit exception + for sep in [' ', None]: + with self.assertRaises(TypingError) as raises: + cfunc('a', sep, 2.4) + self.assertIn('float64', str(raises.exception), + 'non-integer maxsplit with sep = %s' % sep) + + def test_split(self): + pyfunc = split_usecase + cfunc = njit(pyfunc) + + CASES = [ + (' a ', None), + ('', '⚡'), + ('abcabc', '⚡'), + ('🐍⚡', '⚡'), + ('🐍⚡🐍', '⚡'), + ('abababa', 'a'), + ('abababa', 'b'), + ('abababa', 'c'), + ('abababa', 'ab'), + ('abababa', 'aba'), + ] + + for test_str, splitter in CASES: + self.assertEqual(pyfunc(test_str, splitter), + cfunc(test_str, splitter), + "'%s'.split('%s')?" % (test_str, splitter)) + + def test_split_with_maxsplit(self): + CASES = [ + (' a ', None, 1), + ('', '⚡', 1), + ('abcabc', '⚡', 1), + ('🐍⚡', '⚡', 1), + ('🐍⚡🐍', '⚡', 1), + ('abababa', 'a', 2), + ('abababa', 'b', 1), + ('abababa', 'c', 2), + ('abababa', 'ab', 1), + ('abababa', 'aba', 5), + ] + + for pyfunc, fmt_str in [(split_with_maxsplit_usecase, + "'%s'.split('%s', %d)?"), + (split_with_maxsplit_kwarg_usecase, + "'%s'.split('%s', maxsplit=%d)?")]: + + cfunc = njit(pyfunc) + for test_str, splitter, maxsplit in CASES: + self.assertEqual(pyfunc(test_str, splitter, maxsplit), + cfunc(test_str, splitter, maxsplit), + fmt_str % (test_str, splitter, maxsplit)) + + def test_split_whitespace(self): + # explicit sep=None cases covered in test_split and + # test_split_with_maxsplit + pyfunc = split_whitespace_usecase + cfunc = njit(pyfunc) + + # list copied from + # https://github.com/python/cpython/blob/master/Objects/unicodetype_db.h + all_whitespace = ''.join(map(chr, [ + 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E, + 0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, + 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, + 0x2028, 0x2029, 0x202F, 0x205F, 0x3000 + ])) + + CASES = [ + '', + 'abcabc', + '🐍 ⚡', + '🐍 ⚡ 🐍', + '🐍 ⚡ 🐍 ', + ' 🐍 ⚡ 🐍', + ' 🐍' + all_whitespace + '⚡ 🐍 ', + ] + for test_str in CASES: + self.assertEqual(pyfunc(test_str), + cfunc(test_str), + "'%s'.split()?" % (test_str,)) + + def test_split_exception_invalid_keepends(self): + pyfunc = splitlines_with_keepends_usecase + cfunc = njit(pyfunc) + + accepted_types = (types.Integer, int, types.Boolean, bool) + for ty, keepends in (('none', None), ('unicode_type', 'None')): + with self.assertRaises(TypingError) as raises: + cfunc('\n', keepends) + msg = '"keepends" must be {}, not {}'.format(accepted_types, ty) + self.assertIn(msg, str(raises.exception)) + + def test_splitlines(self): + pyfunc = splitlines_usecase + cfunc = njit(pyfunc) + + cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85', + '\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e'] + + msg = 'Results of "{}".splitlines() must be equal' + for s in cases: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_splitlines_with_keepends(self): + pyfuncs = [ + splitlines_with_keepends_usecase, + splitlines_with_keepends_kwarg_usecase + ] + messages = [ + 'Results of "{}".splitlines({}) must be equal', + 'Results of "{}".splitlines(keepends={}) must be equal' + ] + cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85', + '\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e'] + all_keepends = [True, False, 0, 1, -1, 100] + + for pyfunc, msg in zip(pyfuncs, messages): + cfunc = njit(pyfunc) + for s, keepends in product(cases, all_keepends): + self.assertEqual(pyfunc(s, keepends), cfunc(s, keepends), + msg=msg.format(s, keepends)) + + def test_rsplit_exception_empty_sep(self): + self.disable_leak_check() + + pyfunc = rsplit_usecase + cfunc = njit(pyfunc) + + # Handle empty separator exception + for func in [pyfunc, cfunc]: + with self.assertRaises(ValueError) as raises: + func('a', '') + self.assertIn('empty separator', str(raises.exception)) + + def test_rsplit_exception_noninteger_maxsplit(self): + pyfunc = rsplit_with_maxsplit_usecase + cfunc = njit(pyfunc) + + accepted_types = (types.Integer, int) + for sep in [' ', None]: + with self.assertRaises(TypingError) as raises: + cfunc('a', sep, 2.4) + msg = '"maxsplit" must be {}, not float'.format(accepted_types) + self.assertIn(msg, str(raises.exception)) + + def test_rsplit(self): + pyfunc = rsplit_usecase + cfunc = njit(pyfunc) + + CASES = [ + (' a ', None), + ('', '⚡'), + ('abcabc', '⚡'), + ('🐍⚡', '⚡'), + ('🐍⚡🐍', '⚡'), + ('abababa', 'a'), + ('abababa', 'b'), + ('abababa', 'c'), + ('abababa', 'ab'), + ('abababa', 'aba'), + ] + msg = 'Results of "{}".rsplit("{}") must be equal' + for s, sep in CASES: + self.assertEqual(pyfunc(s, sep), cfunc(s, sep), + msg=msg.format(s, sep)) + + def test_rsplit_with_maxsplit(self): + pyfuncs = [rsplit_with_maxsplit_usecase, + rsplit_with_maxsplit_kwarg_usecase] + CASES = [ + (' a ', None, 1), + ('', '⚡', 1), + ('abcabc', '⚡', 1), + ('🐍⚡', '⚡', 1), + ('🐍⚡🐍', '⚡', 1), + ('abababa', 'a', 2), + ('abababa', 'b', 1), + ('abababa', 'c', 2), + ('abababa', 'ab', 1), + ('abababa', 'aba', 5), + ] + messages = [ + 'Results of "{}".rsplit("{}", {}) must be equal', + 'Results of "{}".rsplit("{}", maxsplit={}) must be equal' + ] + + for pyfunc, msg in zip(pyfuncs, messages): + cfunc = njit(pyfunc) + for test_str, sep, maxsplit in CASES: + self.assertEqual(pyfunc(test_str, sep, maxsplit), + cfunc(test_str, sep, maxsplit), + msg=msg.format(test_str, sep, maxsplit)) + + def test_rsplit_whitespace(self): + pyfunc = rsplit_whitespace_usecase + cfunc = njit(pyfunc) + + # list copied from + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodetype_db.h#L5996-L6031 # noqa: E501 + all_whitespace = ''.join(map(chr, [ + 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E, + 0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, + 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, + 0x2028, 0x2029, 0x202F, 0x205F, 0x3000 + ])) + + CASES = [ + '', + 'abcabc', + '🐍 ⚡', + '🐍 ⚡ 🐍', + '🐍 ⚡ 🐍 ', + ' 🐍 ⚡ 🐍', + ' 🐍' + all_whitespace + '⚡ 🐍 ', + ] + msg = 'Results of "{}".rsplit() must be equal' + for s in CASES: + self.assertEqual(pyfunc(s), cfunc(s), msg.format(s)) + + def test_join_empty(self): + # Can't pass empty list to nopython mode, so we have to make a + # separate test case + pyfunc = join_empty_usecase + cfunc = njit(pyfunc) + + CASES = [ + '', + '🐍🐍🐍', + ] + + for sep in CASES: + self.assertEqual(pyfunc(sep), + cfunc(sep), + "'%s'.join([])?" % (sep,)) + + def test_join_non_string_exception(self): + # Verify that join of list of integers raises typing exception + pyfunc = join_usecase + cfunc = njit(pyfunc) + + # Handle empty separator exception + with self.assertRaises(TypingError) as raises: + cfunc('', [1, 2, 3]) + # This error message is obscure, but indicates the error was trapped + # in the typing of str.join() + # Feel free to change this as we update error messages. + exc_message = str(raises.exception) + self.assertIn( + "During: resolving callee type: BoundFunction", + exc_message, + ) + # could be int32 or int64 + self.assertIn("reflected list(int", exc_message) + + def test_join(self): + pyfunc = join_usecase + cfunc = njit(pyfunc) + + CASES = [ + ('', ['', '', '']), + ('a', ['', '', '']), + ('', ['a', 'bbbb', 'c']), + ('🐍🐍🐍', ['⚡⚡'] * 5), + ] + + for sep, parts in CASES: + self.assertEqual(pyfunc(sep, parts), + cfunc(sep, parts), + "'%s'.join('%s')?" % (sep, parts)) + + def test_join_interleave_str(self): + # can pass a string as the parts iterable + pyfunc = join_usecase + cfunc = njit(pyfunc) + + CASES = [ + ('abc', '123'), + ('🐍🐍🐍', '⚡⚡'), + ] + + for sep, parts in CASES: + self.assertEqual(pyfunc(sep, parts), + cfunc(sep, parts), + "'%s'.join('%s')?" % (sep, parts)) + + def test_justification(self): + for pyfunc, case_name in [(center_usecase, 'center'), + (ljust_usecase, 'ljust'), + (rjust_usecase, 'rjust')]: + cfunc = njit(pyfunc) + + with self.assertRaises(TypingError) as raises: + cfunc(UNICODE_EXAMPLES[0], 1.1) + self.assertIn('The width must be an Integer', str(raises.exception)) + + for s in UNICODE_EXAMPLES: + for width in range(-3, 20): + self.assertEqual(pyfunc(s, width), + cfunc(s, width), + "'%s'.%s(%d)?" % (s, case_name, width)) + + def test_justification_fillchar(self): + for pyfunc, case_name in [(center_usecase_fillchar, 'center'), + (ljust_usecase_fillchar, 'ljust'), + (rjust_usecase_fillchar, 'rjust')]: + cfunc = njit(pyfunc) + + # allowed fillchar cases + for fillchar in [' ', '+', 'ú', '处']: + with self.assertRaises(TypingError) as raises: + cfunc(UNICODE_EXAMPLES[0], 1.1, fillchar) + self.assertIn('The width must be an Integer', + str(raises.exception)) + + for s in UNICODE_EXAMPLES: + for width in range(-3, 20): + self.assertEqual(pyfunc(s, width, fillchar), + cfunc(s, width, fillchar), + "'%s'.%s(%d, '%s')?" % (s, case_name, + width, + fillchar)) + + def test_justification_fillchar_exception(self): + self.disable_leak_check() + + for pyfunc in [center_usecase_fillchar, + ljust_usecase_fillchar, + rjust_usecase_fillchar]: + cfunc = njit(pyfunc) + + # disallowed fillchar cases + for fillchar in ['', '+0', 'quién', '处着']: + with self.assertRaises(ValueError) as raises: + cfunc(UNICODE_EXAMPLES[0], 20, fillchar) + self.assertIn('The fill character must be exactly one', + str(raises.exception)) + + # forbid fillchar cases with different types + for fillchar in [1, 1.1]: + with self.assertRaises(TypingError) as raises: + cfunc(UNICODE_EXAMPLES[0], 20, fillchar) + self.assertIn('The fillchar must be a UnicodeType', + str(raises.exception)) + + def test_inplace_concat(self, flags=no_pyobj_flags): + pyfunc = inplace_concat_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + for b in UNICODE_EXAMPLES[::-1]: + self.assertEqual(pyfunc(a, b), + cfunc(a, b), + "'%s' + '%s'?" % (a, b)) + + def test_isidentifier(self): + def pyfunc(s): + return s.isidentifier() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L695-L708 # noqa: E501 + cpython = ['a', 'Z', '_', 'b0', 'bc', 'b_', 'µ', + '𝔘𝔫𝔦𝔠𝔬𝔡𝔢', ' ', '[', '©', '0'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501 + cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', + 'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isidentifier() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_strip(self): + + STRIP_CASES = [ + ('ass cii', 'ai'), + ('ass cii', None), + ('asscii', 'ai '), + ('asscii ', 'ai '), + (' asscii ', 'ai '), + (' asscii ', 'asci '), + (' asscii ', 's'), + (' ', ' '), + ('', ' '), + ('', ''), + ('', None), + (' ', None), + (' asscii ', 'ai '), + (' asscii ', ''), + (' asscii ', None), + ('tú quién te crees?', 'étú? '), + (' tú quién te crees? ', 'étú? '), + (' tú qrees? ', ''), + (' tú quién te crees? ', None), + ('大处 着眼,小处着手。大大大处', '大处'), + (' 大处大处 ', ''), + ('\t\nabcd\t', '\ta'), + (' 大处大处 ', None), + ('\t abcd \t', None), + ('\n abcd \n', None), + ('\r abcd \r', None), + ('\x0b abcd \x0b', None), + ('\x0c abcd \x0c', None), + ('\u2029abcd\u205F', None), + ('\u0085abcd\u2009', None) + ] + + # form with no parameter + for pyfunc, case_name in [(strip_usecase, 'strip'), + (lstrip_usecase, 'lstrip'), + (rstrip_usecase, 'rstrip')]: + cfunc = njit(pyfunc) + + for string, chars in STRIP_CASES: + self.assertEqual(pyfunc(string), + cfunc(string), + "'%s'.%s()?" % (string, case_name)) + # parametrized form + for pyfunc, case_name in [(strip_usecase_chars, 'strip'), + (lstrip_usecase_chars, 'lstrip'), + (rstrip_usecase_chars, 'rstrip')]: + cfunc = njit(pyfunc) + + sig1 = types.unicode_type(types.unicode_type, + types.Optional(types.unicode_type)) + cfunc_optional = njit([sig1])(pyfunc) + + def try_compile_bad_optional(*args): + bad = types.unicode_type(types.unicode_type, + types.Optional(types.float64)) + njit([bad])(pyfunc) + + for fn in cfunc, try_compile_bad_optional: + with self.assertRaises(TypingError) as raises: + fn('tú quis?', 1.1) + self.assertIn('The arg must be a UnicodeType or None', + str(raises.exception)) + + for fn in cfunc, cfunc_optional: + + for string, chars in STRIP_CASES: + self.assertEqual(pyfunc(string, chars), + fn(string, chars), + "'%s'.%s('%s')?" % (string, case_name, + chars)) + + def test_isspace(self): + def pyfunc(s): + return s.isspace() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L613-L621 # noqa: E501 + cpython = ['\u2000', '\u200a', '\u2014', '\U00010401', '\U00010427', + '\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501 + cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', + 'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isspace() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_istitle(self): + pyfunc = istitle_usecase + cfunc = njit(pyfunc) + error_msg = "'{0}'.py_istitle() = {1}\n'{0}'.c_istitle() = {2}" + + unicode_title = [x.title() for x in UNICODE_EXAMPLES] + special = [ + '', + ' ', + ' AA ', + ' Ab ', + '1', + 'A123', + 'A12Bcd', + '+abA', + '12Abc', + 'A12abc', + '%^Abc 5 $% Def' + '𐐁𐐩', + '𐐧𐑎', + '𐐩', + '𐑎', + '🐍 Is', + '🐍 NOT', + '👯Is', + 'ῼ', + 'Greek ῼitlecases ...' + ] + ISTITLE_EXAMPLES = UNICODE_EXAMPLES + unicode_title + special + + for s in ISTITLE_EXAMPLES: + py_result = pyfunc(s) + c_result = cfunc(s) + self.assertEqual(py_result, c_result, + error_msg.format(s, py_result, c_result)) + + def test_isprintable(self): + def pyfunc(s): + return s.isprintable() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L710-L723 # noqa: E501 + cpython = ['', ' ', 'abcdefg', 'abcdefg\n', '\u0374', '\u0378', + '\ud800', '\U0001F46F', '\U000E0020'] + + msg = 'Results of "{}".isprintable() must be equal' + for s in UNICODE_EXAMPLES + cpython: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_pointless_slice(self, flags=no_pyobj_flags): + def pyfunc(a): + return a[:] + cfunc = njit(pyfunc) + args = ['a'] + self.assertEqual(pyfunc(*args), cfunc(*args)) + + def test_walk_backwards(self, flags=no_pyobj_flags): + def pyfunc(a): + return a[::-1] + cfunc = njit(pyfunc) + args = ['a'] + self.assertEqual(pyfunc(*args), cfunc(*args)) + + def test_stride_slice(self, flags=no_pyobj_flags): + def pyfunc(a): + return a[::2] + cfunc = njit(pyfunc) + args = ['a'] + self.assertEqual(pyfunc(*args), cfunc(*args)) + + def test_basic_lt(self, flags=no_pyobj_flags): + def pyfunc(a, b): + return a < b + cfunc = njit(pyfunc) + args = ['ab', 'b'] + self.assertEqual(pyfunc(*args), cfunc(*args)) + + def test_basic_gt(self, flags=no_pyobj_flags): + def pyfunc(a, b): + return a > b + cfunc = njit(pyfunc) + args = ['ab', 'b'] + self.assertEqual(pyfunc(*args), cfunc(*args)) + + def test_comparison(self): + def pyfunc(option, x, y): + if option == '==': + return x == y + elif option == '!=': + return x != y + elif option == '<': + return x < y + elif option == '>': + return x > y + elif option == '<=': + return x <= y + elif option == '>=': + return x >= y + else: + return None + + cfunc = njit(pyfunc) + + for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2): + for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']: + args = [cmpop, x, y] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_literal_concat(self): + def pyfunc(x): + abc = 'abc' + if len(x): + return abc + 'b123' + x + 'IO' + else: + return x + abc + '123' + x + + cfunc = njit(pyfunc) + args = ['x'] + self.assertEqual(pyfunc(*args), cfunc(*args)) + args = [''] + self.assertEqual(pyfunc(*args), cfunc(*args)) + + def test_literal_comparison(self): + def pyfunc(option): + x = 'a123' + y = 'aa12' + if option == '==': + return x == y + elif option == '!=': + return x != y + elif option == '<': + return x < y + elif option == '>': + return x > y + elif option == '<=': + return x <= y + elif option == '>=': + return x >= y + else: + return None + + cfunc = njit(pyfunc) + for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']: + args = [cmpop] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_literal_len(self): + def pyfunc(): + return len('abc') + cfunc = njit(pyfunc) + self.assertEqual(pyfunc(), cfunc()) + + def test_literal_getitem(self): + def pyfunc(which): + return 'abc'[which] + cfunc = njit(pyfunc) + for a in [-1, 0, 1, slice(1, None), slice(None, -1)]: + args = [a] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_literal_in(self): + def pyfunc(x): + return x in '9876zabiuh' + + cfunc = njit(pyfunc) + for a in ['a', '9', '1', '', '8uha', '987']: + args = [a] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_literal_xyzwith(self): + def pyfunc(x, y): + return 'abc'.startswith(x), 'cde'.endswith(y) + + cfunc = njit(pyfunc) + for args in permutations('abcdefg', r=2): + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_literal_find(self): + def pyfunc(x): + return 'abc'.find(x), x.find('a') + + cfunc = njit(pyfunc) + for a in ['ab']: + args = [a] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_not(self): + def pyfunc(x): + return not x + + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + args = [a] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_capitalize(self): + def pyfunc(x): + return x.capitalize() + + cfunc = njit(pyfunc) + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L800-L815 # noqa: E501 + cpython = ['\U0001044F', '\U0001044F\U0001044F', '\U00010427\U0001044F', + '\U0001044F\U00010427', 'X\U00010427x\U0001044F', 'h\u0130', + '\u1fd2\u0130', 'finnish', 'A\u0345\u03a3'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L926 # noqa: E501 + cpython_extras = ['\U00010000\U00100000'] + + msg = 'Results of "{}".capitalize() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isupper(self): + def pyfunc(x): + return x.isupper() + + cfunc = njit(pyfunc) + uppers = [x.upper() for x in UNICODE_EXAMPLES] + extras = ["AA12A", "aa12a", "大AA12A", "大aa12a", "AAADŽA", "A 1 1 大"] + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L585-L599 # noqa: E501 + cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427', '\U00010429', + '\U0001044E', '\U0001F40D', '\U0001F46F'] + fourxcpy = [x * 4 for x in cpython] + + for a in UNICODE_EXAMPLES + uppers + extras + cpython + fourxcpy: + args = [a] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_upper(self): + def pyfunc(x): + return x.upper() + + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + args = [a] + self.assertEqual(pyfunc(*args), cfunc(*args), + msg='failed on {}'.format(args)) + + def test_casefold(self): + def pyfunc(x): + return x.casefold() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L774-L781 # noqa: E501 + cpython = ['hello', 'hELlo', 'ß', 'fi', '\u03a3', + 'A\u0345\u03a3', '\u00b5'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L924 # noqa: E501 + cpython_extras = ['\U00010000\U00100000'] + + msg = 'Results of "{}".casefold() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isalpha(self): + def pyfunc(x): + return x.isalpha() + + cfunc = njit(pyfunc) + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L630-L640 # noqa: E501 + cpython = ['\u1FFc', '\U00010401', '\U00010427', '\U00010429', + '\U0001044E', '\U0001F40D', '\U0001F46F'] + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501 + extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', + 'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isalpha() must be equal' + for s in UNICODE_EXAMPLES + [''] + extras + cpython: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isascii(self): + def pyfunc(x): + return x.isascii() + + cfunc = njit(pyfunc) + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L913-L926 # noqa: E501 + cpython = ['', '\x00', '\x7f', '\x00\x7f', '\x80', '\xe9', ' '] + + msg = 'Results of "{}".isascii() must be equal' + for s in UNICODE_EXAMPLES + cpython: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_title(self): + pyfunc = title + cfunc = njit(pyfunc) + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L813-L828 # noqa: E501 + cpython = ['\U0001044F', '\U0001044F\U0001044F', + '\U0001044F\U0001044F \U0001044F\U0001044F', + '\U00010427\U0001044F \U00010427\U0001044F', + '\U0001044F\U00010427 \U0001044F\U00010427', + 'X\U00010427x\U0001044F X\U00010427x\U0001044F', + 'fiNNISH', 'A\u03a3 \u1fa1xy', 'A\u03a3A'] + + msg = 'Results of "{}".title() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_swapcase(self): + def pyfunc(x): + return x.swapcase() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L834-L858 # noqa: E501 + cpython = ['\U0001044F', '\U00010427', '\U0001044F\U0001044F', + '\U00010427\U0001044F', '\U0001044F\U00010427', + 'X\U00010427x\U0001044F', 'fi', '\u0130', '\u03a3', + '\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a', + 'A\u0345\u03a3', 'A\u03a3\u0345', '\u03a3\u0345 ', + '\u03a3', 'ß', '\u1fd2'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L928 # noqa: E501 + cpython_extras = ['\U00010000\U00100000'] + + msg = 'Results of "{}".swapcase() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_islower(self): + pyfunc = islower_usecase + cfunc = njit(pyfunc) + lowers = [x.lower() for x in UNICODE_EXAMPLES] + extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大'] + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L586-L600 # noqa: E501 + cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427', + '\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F'] + cpython += [x * 4 for x in cpython] + + msg = 'Results of "{}".islower() must be equal' + for s in UNICODE_EXAMPLES + lowers + [''] + extras + cpython: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isalnum(self): + def pyfunc(x): + return x.isalnum() + + cfunc = njit(pyfunc) + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L624-L628 # noqa: E501 + cpython = ['\U00010401', '\U00010427', '\U00010429', '\U0001044E', + '\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107'] + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501 + extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', + 'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isalnum() must be equal' + for s in UNICODE_EXAMPLES + [''] + extras + cpython: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_lower(self): + pyfunc = lower_usecase + cfunc = njit(pyfunc) + extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大'] + + # Samples taken from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L748-L758 # noqa: E501 + cpython = ['\U00010401', '\U00010427', '\U0001044E', '\U0001F46F', + '\U00010427\U00010427', '\U00010427\U0001044F', + 'X\U00010427x\U0001044F', '\u0130'] + + # special cases for sigma from CPython testing: + # https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L759-L768 # noqa: E501 + sigma = ['\u03a3', '\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a', + '\u03a3\u0345 ', '\U0008fffe', '\u2177'] + + extra_sigma = 'A\u03a3\u03a2' + sigma.append(extra_sigma) + + msg = 'Results of "{}".lower() must be equal' + for s in UNICODE_EXAMPLES + [''] + extras + cpython + sigma: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isnumeric(self): + def pyfunc(x): + return x.isnumeric() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L676-L693 # noqa: E501 + cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789', + '0123456789a', '\U00010401', '\U00010427', '\U00010429', + '\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065', + '\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501 + cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa', + 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isnumeric() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isdigit(self): + def pyfunc(x): + return x.isdigit() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L664-L674 # noqa: E501 + cpython = ['\u2460', '\xbc', '\u0660', '\U00010401', '\U00010427', + '\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F', + '\U00011065', '\U0001D7F6', '\U00011066', '\U000104A0', + '\U0001F107'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501 + cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', + 'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isdigit() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_isdecimal(self): + def pyfunc(x): + return x.isdecimal() + + cfunc = njit(pyfunc) + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L646-L662 # noqa: E501 + cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789', + '0123456789a', '\U00010401', '\U00010427', '\U00010429', + '\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065', + '\U0001F107', '\U0001D7F6', '\U00011066', '\U000104A0'] + # https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501 + cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF', + 'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa', + 'a\uDFFFb\uD800a'] + + msg = 'Results of "{}".isdecimal() must be equal' + for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras: + self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s)) + + def test_replace(self): + pyfunc = replace_usecase + cfunc = njit(pyfunc) + + CASES = [ + ('abc', '', 'A'), + ('', '⚡', 'A'), + ('abcabc', '⚡', 'A'), + ('🐍⚡', '⚡', 'A'), + ('🐍⚡🐍', '⚡', 'A'), + ('abababa', 'a', 'A'), + ('abababa', 'b', 'A'), + ('abababa', 'c', 'A'), + ('abababa', 'ab', 'A'), + ('abababa', 'aba', 'A'), + ] + + for test_str, old_str, new_str in CASES: + self.assertEqual(pyfunc(test_str, old_str, new_str), + cfunc(test_str, old_str, new_str), + "'%s'.replace('%s', '%s')?" % + (test_str, old_str, new_str)) + + def test_replace_with_count(self): + pyfunc = replace_with_count_usecase + cfunc = njit(pyfunc) + + CASES = [ + ('abc', '', 'A'), + ('', '⚡', 'A'), + ('abcabc', '⚡', 'A'), + ('🐍⚡', '⚡', 'A'), + ('🐍⚡🐍', '⚡', 'A'), + ('abababa', 'a', 'A'), + ('abababa', 'b', 'A'), + ('abababa', 'c', 'A'), + ('abababa', 'ab', 'A'), + ('abababa', 'aba', 'A'), + ] + + count_test = [-1, 1, 0, 5] + + for test_str, old_str, new_str in CASES: + for count in count_test: + self.assertEqual(pyfunc(test_str, old_str, new_str, count), + cfunc(test_str, old_str, new_str, count), + "'%s'.replace('%s', '%s', '%s')?" % + (test_str, old_str, new_str, count)) + + def test_replace_unsupported(self): + def pyfunc(s, x, y, count): + return s.replace(x, y, count) + + cfunc = njit(pyfunc) + + with self.assertRaises(TypingError) as raises: + cfunc('ababababab', 'ba', 'qqq', 3.5) + msg = 'Unsupported parameters. The parameters must be Integer.' + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc('ababababab', 0, 'qqq', 3) + msg = 'The object must be a UnicodeType.' + self.assertIn(msg, str(raises.exception)) + + with self.assertRaises(TypingError) as raises: + cfunc('ababababab', 'ba', 0, 3) + msg = 'The object must be a UnicodeType.' + self.assertIn(msg, str(raises.exception)) + + +class TestUnicodeInTuple(BaseTest): + + def test_const_unicode_in_tuple(self): + # Issue 3673 + @njit + def f(): + return ('aa',) < ('bb',) + + self.assertEqual(f.py_func(), f()) + + @njit + def f(): + return ('cc',) < ('bb',) + + self.assertEqual(f.py_func(), f()) + + def test_const_unicode_in_hetero_tuple(self): + @njit + def f(): + return ('aa', 1) < ('bb', 1) + + self.assertEqual(f.py_func(), f()) + + @njit + def f(): + return ('aa', 1) < ('aa', 2) + + self.assertEqual(f.py_func(), f()) + + def test_ascii_flag_unbox(self): + @njit + def f(s): + return s._is_ascii + + for s in UNICODE_EXAMPLES: + self.assertEqual(f(s), isascii(s)) + + def test_ascii_flag_join(self): + @njit + def f(): + s1 = 'abc' + s2 = '123' + s3 = '🐍⚡' + s4 = '大处着眼,小处着手。' + return (",".join([s1, s2])._is_ascii, + "🐍⚡".join([s1, s2])._is_ascii, + ",".join([s1, s3])._is_ascii, + ",".join([s3, s4])._is_ascii) + + self.assertEqual(f(), (1, 0, 0, 0)) + + def test_ascii_flag_getitem(self): + @njit + def f(): + s1 = 'abc123' + s2 = '🐍⚡🐍⚡🐍⚡' + return (s1[0]._is_ascii, s1[2:]._is_ascii, s2[0]._is_ascii, + s2[2:]._is_ascii) + + self.assertEqual(f(), (1, 1, 0, 0)) + + def test_ascii_flag_add_mul(self): + @njit + def f(): + s1 = 'abc' + s2 = '123' + s3 = '🐍⚡' + s4 = '大处着眼,小处着手。' + return ((s1 + s2)._is_ascii, + (s1 + s3)._is_ascii, + (s3 + s4)._is_ascii, + (s1 * 2)._is_ascii, + (s3 * 2)._is_ascii) + + self.assertEqual(f(), (1, 0, 0, 1, 0)) + + +class TestUnicodeIteration(BaseTest): + + def test_unicode_iter(self): + pyfunc = iter_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + + def test_unicode_literal_iter(self): + pyfunc = literal_iter_usecase + cfunc = njit(pyfunc) + self.assertPreciseEqual(pyfunc(), cfunc()) + + def test_unicode_enumerate_iter(self): + pyfunc = enumerated_iter_usecase + cfunc = njit(pyfunc) + for a in UNICODE_EXAMPLES: + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + + def test_unicode_stopiteration_iter(self): + self.disable_leak_check() + pyfunc = iter_stopiteration_usecase + cfunc = njit(pyfunc) + for f in (pyfunc, cfunc): + for a in UNICODE_EXAMPLES: + with self.assertRaises(StopIteration): + f(a) + + def test_unicode_literal_stopiteration_iter(self): + pyfunc = literal_iter_stopiteration_usecase + cfunc = njit(pyfunc) + for f in (pyfunc, cfunc): + with self.assertRaises(StopIteration): + f() + + +class TestUnicodeAuxillary(BaseTest): + + def test_ord(self): + pyfunc = ord_usecase + cfunc = njit(pyfunc) + for ex in UNICODE_EXAMPLES: + for a in ex: + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + + def test_ord_invalid(self): + self.disable_leak_check() + + pyfunc = ord_usecase + cfunc = njit(pyfunc) + + # wrong number of chars + for func in (pyfunc, cfunc): + for ch in ('', 'abc'): + with self.assertRaises(TypeError) as raises: + func(ch) + self.assertIn('ord() expected a character', + str(raises.exception)) + + # wrong type + with self.assertRaises(TypingError) as raises: + cfunc(1.23) + self.assertIn(_header_lead, str(raises.exception)) + + def test_chr(self): + pyfunc = chr_usecase + cfunc = njit(pyfunc) + for ex in UNICODE_EXAMPLES: + for x in ex: + a = ord(x) + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + # test upper/lower bounds + for a in (0x0, _MAX_UNICODE): + self.assertPreciseEqual(pyfunc(a), cfunc(a)) + + def test_chr_invalid(self): + pyfunc = chr_usecase + cfunc = njit(pyfunc) + + # value negative/>_MAX_UNICODE + for func in (pyfunc, cfunc): + for v in (-2, _MAX_UNICODE + 1): + with self.assertRaises(ValueError) as raises: + func(v) + self.assertIn("chr() arg not in range", str(raises.exception)) + + # wrong type + with self.assertRaises(TypingError) as raises: + cfunc('abc') + self.assertIn(_header_lead, str(raises.exception)) + + def test_unicode_type_mro(self): + # see issue #5635 + def bar(x): + return True + + @overload(bar) + def ol_bar(x): + ok = False + if isinstance(x, types.UnicodeType): + if isinstance(x, types.Hashable): + ok = True + return lambda x: ok + + @njit + def foo(strinst): + return bar(strinst) + + inst = "abc" + self.assertEqual(foo.py_func(inst), foo(inst)) + self.assertIn(types.Hashable, types.unicode_type.__class__.__mro__) + + def test_f_strings(self): + """test f-string support, which requires bytecode handling + """ + # requires formatting (FORMAT_VALUE) and concatenation (BUILD_STRINGS) + def impl1(a): + return f"AA_{a + 3}_B" + + # does not require concatenation + def impl2(a): + return f"{a + 2}" + + # no expression + def impl3(a): + return f"ABC_{a}" + + # format spec not allowed + def impl4(a): + return f"ABC_{a:0}" + + # corner case: empty string + def impl5(): + return f"" # noqa: F541 + + self.assertEqual(impl1(3), njit(impl1)(3)) + self.assertEqual(impl2(2), njit(impl2)(2)) + # string input + self.assertEqual(impl3("DE"), njit(impl3)("DE")) + + # check output when there's no __str__ or __repr__ defined + list_arg = ["A", "B"] + got = njit(impl3)(list_arg) + expected = f"ABC_" + self.assertEqual(got, expected) + + # check error when format spec provided + unsupported_errors = (UnsupportedError, UnsupportedBytecodeError) + with self.assertRaises(unsupported_errors) as raises: + njit(impl4)(["A", "B"]) + if PYVERSION in ((3, 13),): + msg = "Use of unsupported opcode (FORMAT_WITH_SPEC)" + self.assertIn(msg, str(raises.exception)) + elif PYVERSION in ((3, 10), (3, 11), (3, 12)): + msg = "format spec in f-strings not supported yet" + self.assertIn(msg, str(raises.exception)) + else: + raise NotImplementedError(PYVERSION) + self.assertEqual(impl5(), njit(impl5)()) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_unicode_array.py b/venv/lib/python3.10/site-packages/numba/tests/test_unicode_array.py new file mode 100644 index 0000000000000000000000000000000000000000..d291ffd90d14a2325e86cf479af2afde22bcc2bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_unicode_array.py @@ -0,0 +1,889 @@ +import numpy as np + +import unittest +from numba import jit, from_dtype +from numba.core import types +from numba.typed import Dict +from numba.tests.support import (TestCase, skip_ppc64le_issue4563) + + +def getitem(x, i): + return x[i] + + +def getitem2(x, i, j): + return x[i][j] + + +def setitem(x, i, v): + x[i] = v + return x + + +def setitem2(x, i, y, j): + x[i] = y[j] + return x + + +def setitem_literal(x, i): + x[i] = '123' + return x + + +def getitem_key(x, y, j): + x[y[j]] = 123 + + +def return_len(x, i): + return len(x[i]) + + +def return_bool(x, i): + return bool(x[i]) + + +def equal_getitem(x, i, j): + return x[i] == x[j] + + +def notequal_getitem(x, i, j): + return x[i] != x[j] + + +def lessthan_getitem(x, i, j): + return x[i] < x[j] + + +def greaterthan_getitem(x, i, j): + return x[i] > x[j] + + +def lessequal_getitem(x, i, j): + return x[i] <= x[j] + + +def greaterequal_getitem(x, i, j): + return x[i] >= x[j] + + +def contains_getitem2(x, i, y, j): + return x[i] in y[j] + + +def equal_getitem_value(x, i, v): + r1 = x[i] == v + r2 = v == x[i] + if r1 == r2: + return r1 + raise ValueError('x[i] == v and v == x[i] are unequal') + + +def notequal_getitem_value(x, i, v): + r1 = x[i] != v + r2 = v != x[i] + if r1 == r2: + return r1 + raise ValueError('x[i] != v and v != x[i] are unequal') + + +def return_isascii(x, i): + return x[i].isascii() + + +def return_isupper(x, i): + return x[i].isupper() + + +def return_upper(x, i): + return x[i].upper() + + +def return_str(x, i): + return str(x[i]) + + +def return_bytes(x, i): + return bytes(x[i]) + + +def return_hash(x, i): + return hash(x[i]) + + +def return_find(x, i, y, j): + return x[i].find(y[j]) + + +def return_rfind(x, i, y, j): + return x[i].rfind(y[j]) + + +def return_startswith(x, i, y, j): + return x[i].startswith(y[j]) + + +def return_endswith(x, i, y, j): + return x[i].endswith(y[j]) + + +def return_split1(x, i): + return x[i].split() + + +def return_split2(x, i, y, j): + return x[i].split(y[j]) + + +def return_split3(x, i, y, j, maxsplit): + return x[i].split(sep=y[j], maxsplit=maxsplit) + + +# NOT IMPLEMENTED: tests for rsplit + + +def return_center1(x, i, w): + return x[i].center(w) + + +def return_center2(x, i, w, y, j): + return x[i].center(w, y[j]) + + +def return_ljust1(x, i, w): + return x[i].ljust(w) + + +def return_ljust2(x, i, w, y, j): + return x[i].ljust(w, y[j]) + + +def return_rjust1(x, i, w): + return x[i].rjust(w) + + +def return_rjust2(x, i, w, y, j): + return x[i].rjust(w, y[j]) + + +def return_join(x, i, y, j, z, k): + return x[i].join([y[j], z[k]]) + + +def return_zfill(x, i, w): + return x[i].zfill(w) + + +def return_lstrip1(x, i): + return x[i].lstrip() + + +def return_lstrip2(x, i, y, j): + return x[i].lstrip(y[j]) + + +def return_rstrip1(x, i): + return x[i].rstrip() + + +def return_rstrip2(x, i, y, j): + return x[i].rstrip(y[j]) + + +def return_strip1(x, i): + return x[i].strip() + + +def return_strip2(x, i, y, j): + return x[i].strip(y[j]) + + +def return_add(x, i, y, j): + return x[i] + y[j] + + +def return_iadd(x, i, y, j): + x[i] += y[j] + return x[i] + + +def return_mul(x, i, y, j): + return x[i] * y[j] # one of the operants must be integer + + +def return_not(x, i): + return not x[i] + + +def join_string_array(str_arr): + return ",".join(str_arr) + + +@skip_ppc64le_issue4563 +class TestUnicodeArray(TestCase): + + def _test(self, pyfunc, cfunc, *args, **kwargs): + expected = pyfunc(*args, **kwargs) + self.assertPreciseEqual(cfunc(*args, **kwargs), expected) + + def test_getitem2(self): + cgetitem2 = jit(nopython=True)(getitem2) + + arr = np.array(b'12') + self.assertPreciseEqual(cgetitem2(arr, (), 0), getitem2(arr, (), 0)) + with self.assertRaisesRegex(IndexError, 'index out of range'): + cgetitem2(arr, (), 2) + + arr = np.array('12') + self.assertPreciseEqual(cgetitem2(arr, (), 0), getitem2(arr, (), 0)) + with self.assertRaisesRegex(IndexError, 'index out of range'): + cgetitem2(arr, (), 2) + + arr = np.array([b'12', b'3']) + self.assertPreciseEqual(cgetitem2(arr, 0, 0), getitem2(arr, 0, 0)) + self.assertPreciseEqual(cgetitem2(arr, 0, 1), getitem2(arr, 0, 1)) + self.assertPreciseEqual(cgetitem2(arr, 1, 0), getitem2(arr, 1, 0)) + with self.assertRaisesRegex(IndexError, 'index out of range'): + cgetitem2(arr, 1, 1) + + arr = np.array(['12', '3']) + self.assertPreciseEqual(cgetitem2(arr, 0, 0), getitem2(arr, 0, 0)) + self.assertPreciseEqual(cgetitem2(arr, 0, 1), getitem2(arr, 0, 1)) + self.assertPreciseEqual(cgetitem2(arr, 1, 0), getitem2(arr, 1, 0)) + with self.assertRaisesRegex(IndexError, 'index out of range'): + cgetitem2(arr, 1, 1) + + def test_getitem(self): + pyfunc = getitem + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, b'12', 1) + self._test(pyfunc, cfunc, np.array(b'12'), ()) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 0) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 1) + + self._test(pyfunc, cfunc, '12', 1) + self._test(pyfunc, cfunc, np.array('12'), ()) + self._test(pyfunc, cfunc, np.array(['12', '3']), 0) + self._test(pyfunc, cfunc, np.array(['12', '3']), 1) + + def test_getitem_key(self): + pyfunc = getitem_key + cfunc = jit(nopython=True)(pyfunc) + + for x, i in [ + (np.array('123'), ()), + (np.array(['123']), 0), + (np.array(b'123'), ()), + (np.array([b'123']), 0) + ]: + d1 = {} + d2 = Dict.empty(from_dtype(x.dtype), types.int64) + pyfunc(d1, x, i) + cfunc(d2, x, i) + self.assertEqual(d1, d2) + # check for charseq to str conversion: + str(d2) + + def test_setitem(self): + pyfunc = setitem + cfunc = jit(nopython=True)(pyfunc) + + x = np.array(12) + self._test(pyfunc, cfunc, x, (), 34) + + x1 = np.array(b'123') + x2 = np.array(b'123') + y1 = pyfunc(x1, (), b'34') + y2 = cfunc(x2, (), b'34') + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + x1 = np.array(['123']) + x2 = np.array(['123']) + y1 = pyfunc(x1, 0, '34') + y2 = cfunc(x2, 0, '34') + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + def test_setitem2(self): + pyfunc = setitem2 + cfunc = jit(nopython=True)(pyfunc) + + x1 = np.array(['123', 'ABC']) + x2 = np.array(['123', 'ABC']) + y1 = pyfunc(x1, 0, x1, 1) + y2 = cfunc(x2, 0, x2, 1) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + x1 = np.array([b'123', b'ABC']) + x2 = np.array([b'123', b'ABC']) + y1 = pyfunc(x1, 0, x1, 1) + y2 = cfunc(x2, 0, x2, 1) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + x1 = np.array('123') + x2 = np.array('123') + z1 = np.array('ABC') + z2 = np.array('ABC') + y1 = pyfunc(x1, (), z1, ()) + y2 = cfunc(x2, (), z2, ()) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + x1 = np.array(123) + x2 = np.array(123) + z1 = 456, + z2 = 456, + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # bytes + x1 = np.array(b'123') + x2 = np.array(b'123') + z1 = b'ABC', + z2 = b'ABC', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # UTF-8 + x1 = np.array('123') + x2 = np.array('123') + z1 = 'ABC', + z2 = 'ABC', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # UTF-16 + x1 = np.array('123') + x2 = np.array('123') + z1 = 'AB\u01e9', + z2 = 'AB\u01e9', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # UTF-32 + x1 = np.array('123') + x2 = np.array('123') + z1 = 'AB\U00108a0e', + z2 = 'AB\U00108a0e', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # UTF-8, assign longer value (truncates as in numpy) + x1 = np.array('123') + x2 = np.array('123') + z1 = 'ABCD', + z2 = 'ABCD', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # UTF-8, assign shorter value + x1 = np.array('123') + x2 = np.array('123') + z1 = 'AB', + z2 = 'AB', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # bytes, assign longer value (truncates as in numpy) + x1 = np.array(b'123') + x2 = np.array(b'123') + z1 = b'ABCD', + z2 = b'ABCD', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + # bytes, assign shorter value + x1 = np.array(b'123') + x2 = np.array(b'123') + z1 = b'AB', + z2 = b'AB', + y1 = pyfunc(x1, (), z1, 0) + y2 = cfunc(x2, (), z2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + def test_setitem_literal(self): + pyfunc = setitem_literal + cfunc = jit(nopython=True)(pyfunc) + + x1 = np.array('ABC') + x2 = np.array('ABC') + y1 = pyfunc(x1, ()) + y2 = cfunc(x2, ()) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + x1 = np.array(['ABC', '5678']) + x2 = np.array(['ABC', '5678']) + y1 = pyfunc(x1, 0) + y2 = cfunc(x2, 0) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + x1 = np.array(['ABC', '5678']) + x2 = np.array(['ABC', '5678']) + y1 = pyfunc(x1, 1) + y2 = cfunc(x2, 1) + self.assertPreciseEqual(x1, x2) + self.assertPreciseEqual(y1, y2) + + def test_return_len(self): + pyfunc = return_len + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array(''), ()) + self._test(pyfunc, cfunc, np.array(b''), ()) + self._test(pyfunc, cfunc, np.array(b'12'), ()) + self._test(pyfunc, cfunc, np.array('12'), ()) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 0) + self._test(pyfunc, cfunc, np.array(['12', '3']), 0) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 1) + self._test(pyfunc, cfunc, np.array(['12', '3']), 1) + + def test_return_bool(self): + pyfunc = return_bool + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array(''), ()) + self._test(pyfunc, cfunc, np.array(b''), ()) + self._test(pyfunc, cfunc, np.array(b'12'), ()) + self._test(pyfunc, cfunc, np.array('12'), ()) + self._test(pyfunc, cfunc, np.array([b'12', b'']), 0) + self._test(pyfunc, cfunc, np.array(['12', '']), 0) + self._test(pyfunc, cfunc, np.array([b'12', b'']), 1) + self._test(pyfunc, cfunc, np.array(['12', '']), 1) + + def _test_op_getitem(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array([1, 2]), 0, 1) + self._test(pyfunc, cfunc, '12', 0, 1) + self._test(pyfunc, cfunc, b'12', 0, 1) + self._test(pyfunc, cfunc, np.array(b'12'), (), ()) + self._test(pyfunc, cfunc, np.array('1234'), (), ()) + + self._test(pyfunc, cfunc, np.array([b'1', b'2']), 0, 0) + self._test(pyfunc, cfunc, np.array([b'1', b'2']), 0, 1) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 0, 0) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 1, 1) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 0, 1) + self._test(pyfunc, cfunc, np.array([b'12', b'3']), 1, 0) + + self._test(pyfunc, cfunc, np.array(['1', '2']), 0, 0) + self._test(pyfunc, cfunc, np.array(['1', '2']), 0, 1) + self._test(pyfunc, cfunc, np.array(['12', '3']), 0, 0) + self._test(pyfunc, cfunc, np.array(['12', '3']), 1, 1) + self._test(pyfunc, cfunc, np.array(['12', '3']), 0, 1) + self._test(pyfunc, cfunc, np.array(['12', '3']), 1, 0) + + def test_equal_getitem(self): + self._test_op_getitem(equal_getitem) + + def test_notequal_getitem(self): + self._test_op_getitem(notequal_getitem) + + def test_lessthan_getitem(self): + self._test_op_getitem(lessthan_getitem) + + def test_greaterthan_getitem(self): + self._test_op_getitem(greaterthan_getitem) + + def test_lessequal_getitem(self): + self._test_op_getitem(lessequal_getitem) + + def test_greaterequal_getitem(self): + self._test_op_getitem(greaterequal_getitem) + + def _test_op_getitem_value(self, pyfunc): + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array([1, 2]), 0, 1) + self._test(pyfunc, cfunc, '12', 0, '1') + self._test(pyfunc, cfunc, '12', 1, '3') + self._test(pyfunc, cfunc, np.array('1234'), (), '1234') + self._test(pyfunc, cfunc, np.array(['1234']), 0, '1234') + self._test(pyfunc, cfunc, np.array(['1234']), 0, 'abc') + # fails: No conversion from array(bool, 1d, C) to bool + #self._test(pyfunc, cfunc, b'12', 0, b'1') + self._test(pyfunc, cfunc, np.array(b'12'), (), b'12') + self._test(pyfunc, cfunc, np.array([b'12']), 0, b'12') + self._test(pyfunc, cfunc, np.array([b'12']), 0, b'a') + + def test_equal_getitem_value(self): + self._test_op_getitem_value(equal_getitem_value) + + def test_notequal_getitem_value(self): + self._test_op_getitem_value(notequal_getitem_value) + + def test_contains_getitem2(self): + pyfunc = contains_getitem2 + cfunc = jit(nopython=True)(pyfunc) + + x = np.array('123') + y = np.array('12345') + self._test(pyfunc, cfunc, x, (), y, ()) + self._test(pyfunc, cfunc, y, (), x, ()) + + x = np.array(b'123') + y = np.array(b'12345') + self._test(pyfunc, cfunc, x, (), y, ()) + self._test(pyfunc, cfunc, y, (), x, ()) + + x = ('123',) + y = np.array('12345') + self._test(pyfunc, cfunc, x, 0, y, ()) + self._test(pyfunc, cfunc, y, (), x, 0) + + x = (b'123',) + y = np.array(b'12345') + self._test(pyfunc, cfunc, x, 0, y, ()) + self._test(pyfunc, cfunc, y, (), x, 0) + + def test_return_isascii(self): + pyfunc = return_isascii + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array('1234'), ()) + self._test(pyfunc, cfunc, np.array(['1234']), 0) + self._test(pyfunc, cfunc, np.array('1234\u00e9'), ()) + self._test(pyfunc, cfunc, np.array(['1234\u00e9']), 0) + + def test_return_isupper(self): + pyfunc = return_isupper + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array('abc'), ()) + self._test(pyfunc, cfunc, np.array(['abc']), 0) + + self._test(pyfunc, cfunc, np.array(b'abc'), ()) + self._test(pyfunc, cfunc, np.array([b'abc']), 0) + + def test_return_str(self): + pyfunc = return_str + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array('1234'), ()) + self._test(pyfunc, cfunc, np.array(['1234']), 0) + + def test_return_bytes(self): + pyfunc = return_bytes + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array(b'1234'), ()) + self._test(pyfunc, cfunc, np.array([b'1234']), 0) + + def test_return_upper(self): + pyfunc = return_upper + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array('abc'), ()) + self._test(pyfunc, cfunc, np.array(['abc']), 0) + + self._test(pyfunc, cfunc, np.array(b'abc'), ()) + self._test(pyfunc, cfunc, np.array([b'abc']), 0) + + def test_hash(self): + pyfunc = return_hash + cfunc = jit(nopython=True)(pyfunc) + + hash1 = pyfunc(np.array('123'), ()) + hash2 = hash('123') + hash3 = hash(np.array('123')[()]) + self.assertTrue(hash1 == hash2 == hash3) + + self._test(pyfunc, cfunc, np.array('1234'), ()) + self._test(pyfunc, cfunc, np.array(['1234']), 0) + + self._test(pyfunc, cfunc, np.array('1234\u00e9'), ()) + self._test(pyfunc, cfunc, np.array(['1234u00e9']), 0) + + self._test(pyfunc, cfunc, np.array('1234\U00108a0e'), ()) + self._test(pyfunc, cfunc, np.array(['1234\U00108a0e']), 0) + + self._test(pyfunc, cfunc, np.array(b'1234'), ()) + self._test(pyfunc, cfunc, np.array([b'1234']), 0) + + def test_return_find(self): + pyfunc = return_find + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array('1234'), (), np.array('23'), ()) + self._test(pyfunc, cfunc, np.array('1234'), (), ('23',), 0) + self._test(pyfunc, cfunc, ('1234',), 0, np.array('23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), np.array(b'23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), (b'23',), 0) + self._test(pyfunc, cfunc, (b'1234',), 0, np.array(b'23'), ()) + + def test_return_rfind(self): + pyfunc = return_rfind + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array('1234'), (), np.array('23'), ()) + self._test(pyfunc, cfunc, np.array('1234'), (), ('23',), 0) + self._test(pyfunc, cfunc, ('1234',), 0, np.array('23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), np.array(b'23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), (b'23',), 0) + self._test(pyfunc, cfunc, (b'1234',), 0, np.array(b'23'), ()) + + def test_return_startswith(self): + pyfunc = return_startswith + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1234'), (), np.array('23'), ()) + self._test(pyfunc, cfunc, np.array('1234'), (), ('23',), 0) + self._test(pyfunc, cfunc, ('1234',), 0, np.array('23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), np.array(b'23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), (b'23',), 0) + self._test(pyfunc, cfunc, (b'1234',), 0, np.array(b'23'), ()) + + def test_return_endswith(self): + pyfunc = return_endswith + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1234'), (), np.array('23'), ()) + self._test(pyfunc, cfunc, np.array('1234'), (), ('23',), 0) + self._test(pyfunc, cfunc, ('1234',), 0, np.array('23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), np.array(b'23'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), (), (b'23',), 0) + self._test(pyfunc, cfunc, (b'1234',), 0, np.array(b'23'), ()) + + def test_return_split1(self): + pyfunc = return_split1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('12 34'), ()) + self._test(pyfunc, cfunc, np.array(b'1234'), ()) + + def test_return_split2(self): + pyfunc = return_split2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('12 34'), (), np.array(' '), ()) + self._test(pyfunc, cfunc, np.array('12 34'), (), (' ',), 0) + self._test(pyfunc, cfunc, ('12 34',), 0, np.array(' '), ()) + self._test(pyfunc, cfunc, np.array(b'12 34'), (), np.array(b' '), ()) + self._test(pyfunc, cfunc, np.array(b'12 34'), (), (b' ',), 0) + self._test(pyfunc, cfunc, (b'12 34',), 0, np.array(b' '), ()) + + def test_return_split3(self): + pyfunc = return_split3 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), + np.array(' '), (), 2) + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), (' ',), 0, 2) + self._test(pyfunc, cfunc, ('1 2 3 4',), 0, np.array(' '), (), 2) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), + np.array(b' '), (), 2) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), (b' ',), 0, 2) + self._test(pyfunc, cfunc, (b'1 2 3 4',), 0, np.array(b' '), (), 2) + + def test_return_ljust1(self): + pyfunc = return_ljust1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40) + + def test_return_ljust2(self): + pyfunc = return_ljust2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40, + np.array('='), ()) + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40, ('=',), 0) + self._test(pyfunc, cfunc, ('1 2 3 4',), 0, 40, np.array('='), ()) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40, + np.array(b'='), ()) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40, (b'=',), 0) + self._test(pyfunc, cfunc, (b'1 2 3 4',), 0, 40, np.array(b'='), ()) + + def test_return_rjust1(self): + pyfunc = return_rjust1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40) + + def test_return_rjust2(self): + pyfunc = return_rjust2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40, + np.array('='), ()) + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40, ('=',), 0) + self._test(pyfunc, cfunc, ('1 2 3 4',), 0, 40, np.array('='), ()) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40, + np.array(b'='), ()) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40, (b'=',), 0) + self._test(pyfunc, cfunc, (b'1 2 3 4',), 0, 40, np.array(b'='), ()) + + def test_return_center1(self): + pyfunc = return_center1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40) + + def test_return_center2(self): + pyfunc = return_center2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40, + np.array('='), ()) + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40, ('=',), 0) + self._test(pyfunc, cfunc, ('1 2 3 4',), 0, 40, np.array('='), ()) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40, + np.array(b'='), ()) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40, (b'=',), 0) + self._test(pyfunc, cfunc, (b'1 2 3 4',), 0, 40, np.array(b'='), ()) + + def test_return_join(self): + pyfunc = return_join + cfunc = jit(nopython=True)(pyfunc) + self._test(pyfunc, cfunc, np.array(','), (), np.array('abc'), (), + np.array('123'), ()) + self._test(pyfunc, cfunc, np.array(','), (), np.array('abc'), (), + ('123',), 0) + self._test(pyfunc, cfunc, (',',), 0, np.array('abc'), (), + np.array('123'), ()) + self._test(pyfunc, cfunc, (',',), 0, np.array('abc'), (), + ('123',), 0) + self._test(pyfunc, cfunc, np.array(b','), (), np.array(b'abc'), (), + np.array(b'123'), ()) + self._test(pyfunc, cfunc, np.array(b','), (), np.array(b'abc'), (), + (b'123',), 0) + self._test(pyfunc, cfunc, (b',',), 0, np.array(b'abc'), (), + np.array(b'123'), ()) + self._test(pyfunc, cfunc, (b',',), 0, np.array(b'abc'), (), + (b'123',), 0) + + def test_return_zfill(self): + pyfunc = return_zfill + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('1 2 3 4'), (), 40) + self._test(pyfunc, cfunc, np.array(b'1 2 3 4'), (), 40) + + def test_return_lstrip1(self): + pyfunc = return_lstrip1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(' 123 '), ()) + self._test(pyfunc, cfunc, np.array(b' 123 '), ()) + + def test_return_lstrip2(self): + pyfunc = return_lstrip2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(' 123 '), (), np.array(' '), ()) + self._test(pyfunc, cfunc, np.array(' 123 '), (), (' ',), 0) + self._test(pyfunc, cfunc, (' 123 ',), 0, np.array(' '), ()) + + self._test(pyfunc, cfunc, np.array(b' 123 '), (), np.array(b' '), ()) + self._test(pyfunc, cfunc, np.array(b' 123 '), (), (b' ',), 0) + self._test(pyfunc, cfunc, (b' 123 ',), 0, np.array(b' '), ()) + + def test_return_rstrip1(self): + pyfunc = return_rstrip1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(' 123 '), ()) + self._test(pyfunc, cfunc, np.array(b' 123 '), ()) + + def test_return_rstrip2(self): + pyfunc = return_rstrip2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(' 123 '), (), np.array(' '), ()) + self._test(pyfunc, cfunc, np.array(' 123 '), (), (' ',), 0) + self._test(pyfunc, cfunc, (' 123 ',), 0, np.array(' '), ()) + + self._test(pyfunc, cfunc, np.array(b' 123 '), (), np.array(b' '), ()) + self._test(pyfunc, cfunc, np.array(b' 123 '), (), (b' ',), 0) + self._test(pyfunc, cfunc, (b' 123 ',), 0, np.array(b' '), ()) + + def test_return_strip1(self): + pyfunc = return_strip1 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(' 123 '), ()) + self._test(pyfunc, cfunc, np.array(b' 123 '), ()) + + def test_return_strip2(self): + pyfunc = return_strip2 + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(' 123 '), (), np.array(' '), ()) + self._test(pyfunc, cfunc, np.array(' 123 '), (), (' ',), 0) + self._test(pyfunc, cfunc, (' 123 ',), 0, np.array(' '), ()) + + self._test(pyfunc, cfunc, np.array(b' 123 '), (), np.array(b' '), ()) + self._test(pyfunc, cfunc, np.array(b' 123 '), (), (b' ',), 0) + self._test(pyfunc, cfunc, (b' 123 ',), 0, np.array(b' '), ()) + + def test_return_add(self): + pyfunc = return_add + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('ab'), (), np.array('cd'), ()) + self._test(pyfunc, cfunc, np.array('ab'), (), ('cd',), 0) + self._test(pyfunc, cfunc, ('ab',), 0, np.array('cd'), ()) + + self._test(pyfunc, cfunc, np.array(b'ab'), (), np.array(b'cd'), ()) + self._test(pyfunc, cfunc, np.array(b'ab'), (), (b'cd',), 0) + self._test(pyfunc, cfunc, (b'ab',), 0, np.array(b'cd'), ()) + + def test_return_iadd(self): + pyfunc = return_iadd + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('ab'), (), np.array('cd'), ()) + self._test(pyfunc, cfunc, np.array('ab'), (), ('cd',), 0) + expected = pyfunc(['ab'], 0, np.array('cd'), ()) + result = pyfunc(['ab'], 0, np.array('cd'), ()) + self.assertPreciseEqual(result, expected) + + self._test(pyfunc, cfunc, np.array(b'ab'), (), np.array(b'cd'), ()) + self._test(pyfunc, cfunc, np.array(b'ab'), (), (b'cd',), 0) + expected = pyfunc([b'ab'], 0, np.array(b'cd'), ()) + result = pyfunc([b'ab'], 0, np.array(b'cd'), ()) + self.assertPreciseEqual(result, expected) + + def test_return_mul(self): + pyfunc = return_mul + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('ab'), (), (5,), 0) + self._test(pyfunc, cfunc, (5,), 0, np.array('ab'), ()) + self._test(pyfunc, cfunc, np.array(b'ab'), (), (5,), 0) + self._test(pyfunc, cfunc, (5,), 0, np.array(b'ab'), ()) + + def test_return_not(self): + pyfunc = return_not + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array('ab'), ()) + self._test(pyfunc, cfunc, np.array(b'ab'), ()) + self._test(pyfunc, cfunc, (b'ab',), 0) + + self._test(pyfunc, cfunc, np.array(''), ()) + self._test(pyfunc, cfunc, np.array(b''), ()) + self._test(pyfunc, cfunc, (b'',), 0) + + def test_join(self): + pyfunc = join_string_array + cfunc = jit(nopython=True)(pyfunc) + + self._test(pyfunc, cfunc, np.array(["hi", "there"])) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_unicode_names.py b/venv/lib/python3.10/site-packages/numba/tests/test_unicode_names.py new file mode 100644 index 0000000000000000000000000000000000000000..0ed8fb756418ee993989ccded14f9b3cedb2521d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_unicode_names.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + + +from numba import njit, cfunc +from numba.tests.support import TestCase, unittest +from numba.core import cgutils + +unicode_name1 = u""" +def unicode_name1(ಠ_ರೃ, ಠਊಠ): + return (ಠ_ರೃ) + (ಠਊಠ) +""" + +unicode_name2 = u""" +def Ծ_Ծ(ಠ_ರೃ, ಠਊಠ): + return (ಠ_ರೃ) + (ಠਊಠ) +""" + + +class TestUnicodeNames(TestCase): + def make_testcase(self, src, fname): + glb = {} + exec(src, glb) + fn = glb[fname] + return fn + + def test_unicode_name1(self): + fn = self.make_testcase(unicode_name1, 'unicode_name1') + cfn = njit(fn) + self.assertEqual(cfn(1, 2), 3) + + def test_unicode_name2(self): + fn = self.make_testcase(unicode_name2, 'Ծ_Ծ') + cfn = njit(fn) + self.assertEqual(cfn(1, 2), 3) + + def test_cfunc(self): + fn = self.make_testcase(unicode_name2, 'Ծ_Ծ') + cfn = cfunc("int32(int32, int32)")(fn) + self.assertEqual(cfn.ctypes(1, 2), 3) + + +class TestUnicodeUtils(TestCase): + def test_normalize_ir_text(self): + # non-unicode input + out = cgutils.normalize_ir_text('abc') + # str returned + self.assertIsInstance(out, str) + # try encoding to latin + out.encode('latin1') + + def test_normalize_ir_text_unicode(self): + # unicode input + out = cgutils.normalize_ir_text(unicode_name2) + # str returned + self.assertIsInstance(out, str) + # try encoding to latin + out.encode('latin1') + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_unpack_sequence.py b/venv/lib/python3.10/site-packages/numba/tests/test_unpack_sequence.py new file mode 100644 index 0000000000000000000000000000000000000000..55db569682664f4844cd289cc317994e83ad03ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_unpack_sequence.py @@ -0,0 +1,234 @@ +import numpy as np + +import unittest +from numba import jit, njit +from numba.core import errors, types +from numba import typeof +from numba.tests.support import TestCase, MemoryLeakMixin +from numba.tests.support import no_pyobj_flags as nullary_no_pyobj_flags +from numba.tests.support import force_pyobj_flags as nullary_force_pyobj_flags + +force_pyobj_flags = {'forceobj': True} +no_pyobj_flags = {'nopython': True} + + +def unpack_list(l): + a, b, c = l + return (a, b, c) + + +def unpack_shape(a): + x, y, z = a.shape + return x + y + z + + +def unpack_range(): + a, b, c = range(3) + return a + b + c + + +def unpack_range_too_small(): + a, b, c = range(2) + return a + b + c + + +def unpack_range_too_large(): + a, b, c = range(4) + return a + b + c + + +def unpack_tuple(): + a, b, c = (1, 2, 3) + return a + b + c + + +def unpack_tuple_too_small(): + a, b, c = (1, 2) + return a + b + c + + +def unpack_tuple_too_large(): + a, b, c = (1, 2, 3, 4) + return a + b + c + + +def unpack_heterogeneous_tuple_too_small(): + a, b, c = (1, 2.5j) + return a + b + c + + +def unpack_heterogeneous_tuple_too_large(): + a, b, c = (1, 2.5, 3j, 4) + return a + b + c + + +def unpack_heterogeneous_tuple(): + a, b, c = (1, 2.5, 3j) + return a + b + c + + +def unpack_nested_heterogeneous_tuple(): + a, (b, c) = (1, (2.5, 3j)) + return a + b + c + + +def unpack_arbitrary(seq): + a, b = seq + return b, a + + +def unpack_nrt(): + a = np.zeros(1) + b = np.zeros(2) + tup = b, a + alpha, beta = tup + return alpha, beta + + +def chained_unpack_assign1(x, y): + # Used to fail in object mode (issue #580) + a = (b, c) = (x, y) + (d, e) = a + return d + e + b + c + + +def conditional_swap(x, y): + # Used to produce invalid code (issue #977) + if x > 0: + x, y = y, x + return x, y + + +class TestUnpack(MemoryLeakMixin, TestCase): + + def check_nullary_npm(self, pyfunc): + cfunc = njit(pyfunc) + self.assertPreciseEqual(cfunc(), pyfunc()) + + def check_nullary_objmode(self, pyfunc): + cfunc = jit(forceobj=True)(pyfunc) + self.assertPreciseEqual(cfunc(), pyfunc()) + + def test_unpack_list(self): + pyfunc = unpack_list + cfunc = jit(forceobj=True)(pyfunc) + l = [1, 2, 3] + self.assertEqual(cfunc(l), pyfunc(l)) + + def test_unpack_shape(self): + pyfunc = unpack_shape + cfunc = jit((types.Array(dtype=types.int32, ndim=3, layout='C'),), + forceobj=True)(pyfunc) + a = np.zeros(shape=(1, 2, 3)).astype(np.int32) + self.assertPreciseEqual(cfunc(a), pyfunc(a)) + + def test_unpack_shape_npm(self): + pyfunc = unpack_shape + cfunc = njit((types.Array(dtype=types.int32, ndim=3, layout='C'),), + )(pyfunc) + a = np.zeros(shape=(1, 2, 3)).astype(np.int32) + self.assertPreciseEqual(cfunc(a), pyfunc(a)) + + def test_unpack_range(self): + self.check_nullary_objmode(unpack_range) + + def test_unpack_range_npm(self): + self.check_nullary_npm(unpack_range) + + def test_unpack_tuple(self): + self.check_nullary_objmode(unpack_tuple) + + def test_unpack_tuple_npm(self): + self.check_nullary_npm(unpack_tuple) + + def test_unpack_heterogeneous_tuple(self): + self.check_nullary_objmode(unpack_heterogeneous_tuple) + + def test_unpack_heterogeneous_tuple_npm(self): + self.check_nullary_npm(unpack_heterogeneous_tuple) + + def test_unpack_nested_heterogeneous_tuple(self): + self.check_nullary_objmode(unpack_nested_heterogeneous_tuple) + + def test_unpack_nested_heterogeneous_tuple_npm(self): + self.check_nullary_npm(unpack_nested_heterogeneous_tuple) + + def test_chained_unpack_assign(self, flags=force_pyobj_flags): + pyfunc = chained_unpack_assign1 + cfunc = jit((types.int32, types.int32), **flags)(pyfunc) + args = (4, 5) + self.assertPreciseEqual(cfunc(*args), pyfunc(*args)) + + def test_chained_unpack_assign_npm(self): + self.test_chained_unpack_assign(flags=no_pyobj_flags) + + def check_unpack_error(self, pyfunc, flags=force_pyobj_flags, + exc=ValueError): + with self.assertRaises(exc): + cfunc = jit((), **flags)(pyfunc) + cfunc() + + def test_unpack_tuple_too_small(self): + self.check_unpack_error(unpack_tuple_too_small) + self.check_unpack_error(unpack_heterogeneous_tuple_too_small) + + def test_unpack_tuple_too_small_npm(self): + self.check_unpack_error(unpack_tuple_too_small, no_pyobj_flags, + errors.TypingError) + self.check_unpack_error(unpack_heterogeneous_tuple_too_small, + no_pyobj_flags, errors.TypingError) + + def test_unpack_tuple_too_large(self): + self.check_unpack_error(unpack_tuple_too_large) + self.check_unpack_error(unpack_heterogeneous_tuple_too_large) + + def test_unpack_tuple_too_large_npm(self): + self.check_unpack_error(unpack_tuple_too_large, no_pyobj_flags, + errors.TypingError) + self.check_unpack_error(unpack_heterogeneous_tuple_too_large, + no_pyobj_flags, errors.TypingError) + + def test_unpack_range_too_small(self): + self.check_unpack_error(unpack_range_too_small) + + def test_unpack_range_too_small_npm(self): + self.check_unpack_error(unpack_range_too_small, no_pyobj_flags) + + def test_unpack_range_too_large(self): + self.check_unpack_error(unpack_range_too_large) + + def test_unpack_range_too_large_npm(self): + self.check_unpack_error(unpack_range_too_large, no_pyobj_flags) + + def check_conditional_swap(self, flags=force_pyobj_flags): + cfunc = jit((types.int32, types.int32), **flags)(conditional_swap) + self.assertPreciseEqual(cfunc(4, 5), (5, 4)) + self.assertPreciseEqual(cfunc(0, 5), (0, 5)) + + def test_conditional_swap(self): + self.check_conditional_swap() + + def test_conditional_swap_npm(self): + self.check_conditional_swap(no_pyobj_flags) + + def test_unpack_tuple_of_arrays(self): + tup = tuple(np.zeros(i + 1) for i in range(2)) + tupty = typeof(tup) + pyfunc = unpack_arbitrary + cfunc = njit((tupty,))(pyfunc) + self.assertPreciseEqual(cfunc(tup), pyfunc(tup)) + + def test_unpack_nrt(self): + pyfunc = unpack_nrt + cfunc = njit((),)(pyfunc) + self.assertPreciseEqual(cfunc(), pyfunc()) + + def test_invalid_unpack(self): + pyfunc = unpack_arbitrary + with self.assertRaises(errors.TypingError) as raises: + njit((types.int32,))(pyfunc) + self.assertIn("failed to unpack int32", str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_unpickle_without_module.py b/venv/lib/python3.10/site-packages/numba/tests/test_unpickle_without_module.py new file mode 100644 index 0000000000000000000000000000000000000000..ddbfbd7512d75dabe9bfce41f57fbb81fa65d2b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_unpickle_without_module.py @@ -0,0 +1,49 @@ +import unittest +import pickle +import sys +import tempfile +from pathlib import Path + + +class TestUnpickleDeletedModule(unittest.TestCase): + def test_loading_pickle_with_no_module(self): + """Create a module that uses Numba, import a function from it. + Then delete the module and pickle the function. The function + should load from the pickle without a problem. + + Note - This is a simplified version of how Numba might be used + on a distributed system using e.g. dask distributed. With the + pickle being sent to the worker but not the original module. + """ + + # Source code for temporary module we will make + source = "\n".join( + [ + "from numba import vectorize", + "@vectorize(['float64(float64)'])", + "def inc1(x):", + " return x + 1", + ] + ) + + # Create a temporary directory and add it to path. + modname = "tmp_module" + with tempfile.TemporaryDirectory() as tmp_dir: + sys.path.append(tmp_dir) + + # Create tmp_module.py in there with our source code above. + filename = Path(f"{tmp_dir}/{modname}.py") + f = open(filename, "a") + f.write(source) + f.close() + + # Import the temporary module before file is deleted + from tmp_module import inc1 + + # Remove from imported libraries + del sys.modules[modname] + + # Pickle function and assert that it loads correctly + pkl = pickle.dumps(inc1) + f = pickle.loads(pkl) + self.assertEqual(f(2), 3) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_unsafe_intrinsics.py b/venv/lib/python3.10/site-packages/numba/tests/test_unsafe_intrinsics.py new file mode 100644 index 0000000000000000000000000000000000000000..4de35f2c209ff12130e82a68afc409f54881ecbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_unsafe_intrinsics.py @@ -0,0 +1,232 @@ +import random +import numpy as np + +from numba.tests.support import TestCase, captured_stdout +from numba import njit, literally +from numba.core import types +from numba.cpython.unsafe.tuple import tuple_setitem, build_full_slice_tuple +from numba.np.unsafe.ndarray import to_fixed_tuple, empty_inferred +from numba.core.unsafe.bytes import memcpy_region +from numba.core.unsafe.refcount import dump_refcount +from numba.cpython.unsafe.numbers import trailing_zeros, leading_zeros +from numba.core.errors import TypingError + + +class TestTupleIntrinsic(TestCase): + """Tests for numba.unsafe.tuple + """ + def test_tuple_setitem(self): + @njit + def foo(tup, idxs, vals): + out_tup = tup + for i, v in zip(idxs, vals): + out_tup = tuple_setitem(out_tup, i, v) + return tup, out_tup + + random.seed(123) + for _ in range(20): + # Random data + n = random.randint(1, 10) + tup = tuple([random.randint(0, n) for i in range(n)]) + vals = tuple([random.randint(10, 20) for i in range(n)]) + idxs = list(range(len(vals))) + random.shuffle(idxs) + idxs = tuple(idxs) + # Expect + expect_tup = tuple(tup) + expect_out = np.asarray(expect_tup) + expect_out[np.asarray(idxs)] = vals + # Got + got_tup, got_out = foo(tup, idxs, vals) + # Check + self.assertEqual(got_tup, expect_tup) + self.assertEqual(got_out, tuple(expect_out)) + + def test_slice_tuple(self): + @njit + def full_slice_array(a, n): + # Since numba slices can't be boxed at the moment + return a[build_full_slice_tuple(literally(n))] + + for n in range(1, 3): + a = np.random.random(np.arange(n) + 1) + for i in range(1, n + 1): + np.testing.assert_array_equal(a, full_slice_array(a, i)) + with self.assertRaises(TypingError): + # numpy would throw an IndexError here + full_slice_array(a, n + 1) + + +class TestNdarrayIntrinsic(TestCase): + """Tests for numba.unsafe.ndarray + """ + def test_to_fixed_tuple(self): + const = 3 + + @njit + def foo(array): + a = to_fixed_tuple(array, length=1) + b = to_fixed_tuple(array, 2) + c = to_fixed_tuple(array, const) + d = to_fixed_tuple(array, 0) + return a, b, c, d + + np.random.seed(123) + for _ in range(10): + # Random data + arr = np.random.random(3) + # Run + a, b, c, d = foo(arr) + # Check + self.assertEqual(a, tuple(arr[:1])) + self.assertEqual(b, tuple(arr[:2])) + self.assertEqual(c, tuple(arr[:3])) + self.assertEqual(d, ()) + + # Check error with ndim!=1 + with self.assertRaises(TypingError) as raises: + foo(np.random.random((1, 2))) + self.assertIn("Not supported on array.ndim=2", + str(raises.exception)) + + # Check error with non-constant length + @njit + def tuple_with_length(array, length): + return to_fixed_tuple(array, length) + + with self.assertRaises(TypingError) as raises: + tuple_with_length(np.random.random(3), 1) + expectmsg = "*length* argument must be a constant" + self.assertIn(expectmsg, str(raises.exception)) + + def test_issue_3586_variant1(self): + @njit + def func(): + S = empty_inferred((10,)) + a = 1.1 + for i in range(len(S)): + S[i] = a + 2 + return S + + got = func() + expect = np.asarray([3.1] * 10) + np.testing.assert_array_equal(got, expect) + + def test_issue_3586_variant2(self): + @njit + def func(): + S = empty_inferred((10,)) + a = 1.1 + for i in range(S.size): + S[i] = a + 2 + return S + + got = func() + expect = np.asarray([3.1] * 10) + np.testing.assert_array_equal(got, expect) + + +class TestBytesIntrinsic(TestCase): + """Tests for numba.unsafe.bytes + """ + def test_memcpy_region(self): + @njit + def foo(dst, dst_index, src, src_index, nbytes): + # last arg is assume 1 byte alignment + memcpy_region(dst.ctypes.data, dst_index, + src.ctypes.data, src_index, nbytes, 1) + + d = np.zeros(10, dtype=np.int8) + s = np.arange(10, dtype=np.int8) + + # copy s[1:6] to d[4:9] + foo(d, 4, s, 1, 5) + + expected = [0, 0, 0, 0, 1, 2, 3, 4, 5, 0] + np.testing.assert_array_equal(d, expected) + + +class TestRefCount(TestCase): + def test_dump_refcount(self): + @njit + def use_dump_refcount(): + a = np.ones(10) + b = (a, a) + dump_refcount(a) + dump_refcount(b) + + # Capture output to sys.stdout + with captured_stdout() as stream: + use_dump_refcount() + + output = stream.getvalue() + # Check that it printed + pat = "dump refct of {}" + aryty = types.float64[::1] + tupty = types.Tuple.from_types([aryty] * 2) + self.assertIn(pat.format(aryty), output) + self.assertIn(pat.format(tupty), output) + + +class TestZeroCounts(TestCase): + def test_zero_count(self): + lz = njit(lambda x: leading_zeros(x)) + tz = njit(lambda x: trailing_zeros(x)) + + evens = [2, 42, 126, 128] + + for T in types.unsigned_domain: + self.assertTrue(tz(T(0)) == lz(T(0)) == T.bitwidth) + for i in range(T.bitwidth): + val = T(2 ** i) + self.assertEqual(lz(val) + tz(val) + 1, T.bitwidth) + for n in evens: + self.assertGreater(tz(T(n)), 0) + self.assertEqual(tz(T(n + 1)), 0) + + for T in types.signed_domain: + self.assertTrue(tz(T(0)) == lz(T(0)) == T.bitwidth) + for i in range(T.bitwidth - 1): + val = T(2 ** i) + self.assertEqual(lz(val) + tz(val) + 1, T.bitwidth) + self.assertEqual(lz(-val), 0) + self.assertEqual(tz(val), tz(-val)) + for n in evens: + if not T.minval <= n <= T.maxval: + continue + self.assertGreater(tz(T(n)), 0) + self.assertEqual(tz(T(n + 1)), 0) + + def check_error_msg(self, func): + cfunc = njit(lambda *x: func(*x)) + func_name = func._name + + unsupported_types = filter( + lambda x: not isinstance(x, types.Integer), types.number_domain + ) + for typ in sorted(unsupported_types, key=str): + with self.assertRaises(TypingError) as e: + cfunc(typ(2)) + self.assertIn( + "{} is only defined for integers, but value passed was '{}'." + .format(func_name, typ), + str(e.exception), + ) + + # Testing w/ too many/few arguments + def check(args, string): + with self.assertRaises((TypingError, TypeError)) as e: + cfunc(*args) + self.assertIn( + "{}() ".format(func_name), + str(e.exception) + ) + + check((1, 2), "takes 2 positional arguments but 3 were given") + check((), "missing 1 required positional argument") + + def test_trailing_zeros_error(self): + self.check_error_msg(trailing_zeros) + + def test_leading_zeros_error(self): + self.check_error_msg(leading_zeros) diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/test_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..bf58dabe9835c9ee579f751cf8a9478418daf7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_usecases.py @@ -0,0 +1,192 @@ +import itertools +import unittest +import numpy as np + +from numba import jit, njit +from numba.core import types +from numba.tests import usecases +from numba.tests.support import TestCase + + +class TestUsecases(TestCase): + # NOTE: All these test cases are run in subprocesses to achieve total + # isolation. + + @TestCase.run_test_in_subprocess + def test_andor(self): + pyfunc = usecases.andor + cfunc = njit((types.int32, types.int32))(pyfunc) + + # Argument boundaries + xs = -1, 0, 1, 9, 10, 11 + ys = -1, 0, 1, 9, 10, 11 + + for args in itertools.product(xs, ys): + self.assertEqual(pyfunc(*args), cfunc(*args), "args %s" % (args,)) + + @TestCase.run_test_in_subprocess + def test_sum1d(self): + pyfunc = usecases.sum1d + cfunc = njit((types.int32, types.int32))(pyfunc) + + ss = -1, 0, 1, 100, 200 + es = -1, 0, 1, 100, 200 + + for args in itertools.product(ss, es): + self.assertEqual(pyfunc(*args), cfunc(*args), args) + + @TestCase.run_test_in_subprocess + def test_sum1d_pyobj(self): + pyfunc = usecases.sum1d + cfunc = jit((types.int32, types.int32), forceobj=True)(pyfunc) + + ss = -1, 0, 1, 100, 200 + es = -1, 0, 1, 100, 200 + + for args in itertools.product(ss, es): + self.assertEqual(pyfunc(*args), cfunc(*args), args) + + @TestCase.run_test_in_subprocess + def test_sum2d(self): + pyfunc = usecases.sum2d + cfunc = njit((types.int32, types.int32))(pyfunc) + + ss = -1, 0, 1, 100, 200 + es = -1, 0, 1, 100, 200 + + for args in itertools.product(ss, es): + self.assertEqual(pyfunc(*args), cfunc(*args), args) + + @TestCase.run_test_in_subprocess + def test_while_count(self): + pyfunc = usecases.while_count + cfunc = njit((types.int32, types.int32))(pyfunc) + + ss = -1, 0, 1, 100, 200 + es = -1, 0, 1, 100, 200 + + for args in itertools.product(ss, es): + self.assertEqual(pyfunc(*args), cfunc(*args), args) + + @TestCase.run_test_in_subprocess + def test_copy_arrays(self): + pyfunc = usecases.copy_arrays + arraytype = types.Array(types.int32, 1, 'A') + cfunc = njit((arraytype, arraytype))(pyfunc) + + nda = 0, 1, 10, 100 + + for nd in nda: + a = np.arange(nd, dtype='int32') + b = np.empty_like(a) + args = a, b + + cfunc(*args) + self.assertPreciseEqual(a, b, msg=str(args)) + + @TestCase.run_test_in_subprocess + def test_copy_arrays2d(self): + pyfunc = usecases.copy_arrays2d + arraytype = types.Array(types.int32, 2, 'A') + cfunc = njit((arraytype, arraytype))(pyfunc) + + nda = (0, 0), (1, 1), (2, 5), (4, 25) + + for nd in nda: + d1, d2 = nd + a = np.arange(d1 * d2, dtype='int32').reshape(d1, d2) + b = np.empty_like(a) + args = a, b + + cfunc(*args) + self.assertPreciseEqual(a, b, msg=str(args)) + + @TestCase.run_test_in_subprocess + def test_string_concat(self): + pyfunc = usecases.string_concat + cfunc = jit((types.int32, types.int32), forceobj=True)(pyfunc) + + xs = -1, 0, 1 + ys = -1, 0, 1 + + for x, y in itertools.product(xs, ys): + args = x, y + self.assertEqual(pyfunc(*args), cfunc(*args), args) + + @TestCase.run_test_in_subprocess + def test_string_len(self): + pyfunc = usecases.string_len + cfunc = jit((types.pyobject,), forceobj=True)(pyfunc) + + test_str = '123456' + self.assertEqual(pyfunc(test_str), cfunc(test_str)) + test_str = '1' + self.assertEqual(pyfunc(test_str), cfunc(test_str)) + test_str = '' + self.assertEqual(pyfunc(test_str), cfunc(test_str)) + + @TestCase.run_test_in_subprocess + def test_string_slicing(self): + pyfunc = usecases.string_slicing + cfunc = jit((types.pyobject,) * 3, forceobj=True)(pyfunc) + + test_str = '123456' + self.assertEqual(pyfunc(test_str, 0, 3), cfunc(test_str, 0, 3)) + self.assertEqual(pyfunc(test_str, 1, 5), cfunc(test_str, 1, 5)) + self.assertEqual(pyfunc(test_str, 2, 3), cfunc(test_str, 2, 3)) + + @TestCase.run_test_in_subprocess + def test_string_conversion(self): + pyfunc = usecases.string_conversion + + cfunc = jit((types.int32,), forceobj=True)(pyfunc) + self.assertEqual(pyfunc(1), cfunc(1)) + + cfunc = jit((types.float32,), forceobj=True)(pyfunc) + self.assertEqual(pyfunc(1.1), cfunc(1.1)) + + @TestCase.run_test_in_subprocess + def test_string_comparisons(self): + import operator + pyfunc = usecases.string_comparison + cfunc = jit((types.pyobject, types.pyobject, types.pyobject), + forceobj=True)(pyfunc) + + test_str1 = '123' + test_str2 = '123' + op = operator.eq + self.assertEqual(pyfunc(test_str1, test_str2, op), + cfunc(test_str1, test_str2, op)) + + test_str1 = '123' + test_str2 = '456' + op = operator.eq + self.assertEqual(pyfunc(test_str1, test_str2, op), + cfunc(test_str1, test_str2, op)) + + test_str1 = '123' + test_str2 = '123' + op = operator.ne + self.assertEqual(pyfunc(test_str1, test_str2, op), + cfunc(test_str1, test_str2, op)) + + test_str1 = '123' + test_str2 = '456' + op = operator.ne + self.assertEqual(pyfunc(test_str1, test_str2, op), + cfunc(test_str1, test_str2, op)) + + @TestCase.run_test_in_subprocess + def test_blackscholes_cnd(self): + pyfunc = usecases.blackscholes_cnd + cfunc = njit((types.float32,))(pyfunc) + + ds = -0.5, 0, 0.5 + + for d in ds: + args = (d,) + self.assertEqual(pyfunc(*args), cfunc(*args), args) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_vectorization.py b/venv/lib/python3.10/site-packages/numba/tests/test_vectorization.py new file mode 100644 index 0000000000000000000000000000000000000000..924c8ca61969694b69d11bd960b264abfd3afa1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_vectorization.py @@ -0,0 +1,87 @@ +import platform +import numpy as np +from numba import types +import unittest +from numba import njit +from numba.core import config +from numba.tests.support import TestCase + +_DEBUG = False +if _DEBUG: + from llvmlite import binding as llvm + # Prints debug info from the LLVMs vectorizer + llvm.set_option("", "--debug-only=loop-vectorize") + + +_skylake_env = { + "NUMBA_CPU_NAME": "skylake-avx512", + "NUMBA_CPU_FEATURES": "", +} + + +@unittest.skipIf(platform.machine() != 'x86_64', 'x86_64 only test') +class TestVectorization(TestCase): + """ + Tests to assert that code which should vectorize does indeed vectorize + """ + def gen_ir(self, func, args_tuple, fastmath=False): + self.assertEqual(config.CPU_NAME, "skylake-avx512") + self.assertEqual(config.CPU_FEATURES, "") + + jitted = njit(args_tuple, fastmath=fastmath)(func) + return jitted.inspect_llvm(args_tuple) + + @TestCase.run_test_in_subprocess(envvars=_skylake_env) + def test_nditer_loop(self): + # see https://github.com/numba/numba/issues/5033 + def do_sum(x): + acc = 0 + for v in np.nditer(x): + acc += v.item() + return acc + + llvm_ir = self.gen_ir(do_sum, (types.float64[::1],), fastmath=True) + self.assertIn("vector.body", llvm_ir) + self.assertIn("llvm.loop.isvectorized", llvm_ir) + + # SLP is off by default due to miscompilations, see #8705. Put this into a + # subprocess to isolate any potential issues. + @TestCase.run_test_in_subprocess( + envvars={'NUMBA_SLP_VECTORIZE': '1', **_skylake_env}, + ) + def test_slp(self): + # Sample translated from: + # https://www.llvm.org/docs/Vectorizers.html#the-slp-vectorizer + + def foo(a1, a2, b1, b2, A): + A[0] = a1 * (a1 + b1) + A[1] = a2 * (a2 + b2) + A[2] = a1 * (a1 + b1) + A[3] = a2 * (a2 + b2) + + ty = types.float64 + llvm_ir = self.gen_ir(foo, ((ty,) * 4 + (ty[::1],)), fastmath=True) + self.assertIn("2 x double", llvm_ir) + + @TestCase.run_test_in_subprocess(envvars=_skylake_env) + def test_instcombine_effect(self): + # Without instcombine running ahead of refprune, the IR has refops that + # are trivially prunable (same BB) but the arguments are obfuscated + # through aliases etc. The follow case triggers this situation as the + # typed.List has a structproxy call for computing `len` and getting the + # base pointer for use in iteration. + + def sum_sqrt_list(lst): + acc = 0.0 + for item in lst: + acc += np.sqrt(item) + return acc + + llvm_ir = self.gen_ir(sum_sqrt_list, (types.ListType(types.float64),), + fastmath=True) + self.assertIn("vector.body", llvm_ir) + self.assertIn("llvm.loop.isvectorized", llvm_ir) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_vectorization_type_inference.py b/venv/lib/python3.10/site-packages/numba/tests/test_vectorization_type_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..c183bf602015966a6b75cd4fcbb7114f602435c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_vectorization_type_inference.py @@ -0,0 +1,41 @@ +from numba import vectorize, jit, bool_, double, int_, float32, typeof, int8 +import unittest +import numpy as np + + +def add(a, b): + return a + b + + +def func(dtypeA, dtypeB): + A = np.arange(10, dtype=dtypeA) + B = np.arange(10, dtype=dtypeB) + return typeof(vector_add(A, B)) + + +class TestVectTypeInfer(unittest.TestCase): + + def test_type_inference(self): + """This is testing numpy ufunc dispatch machinery + """ + global vector_add + vector_add = vectorize([ + bool_(double, int_), + double(double, double), + float32(double, float32), + ])(add) + + def numba_type_equal(a, b): + self.assertEqual(a.dtype, b.dtype) + self.assertEqual(a.ndim, b.ndim) + + numba_type_equal(func(np.dtype(np.float64), np.dtype('i')), bool_[:]) + numba_type_equal(func(np.dtype(np.float64), np.dtype(np.float64)), + double[:]) + # This is because the double(double, double) matches first + numba_type_equal(func(np.dtype(np.float64), np.dtype(np.float32)), + double[:]) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_warnings.py b/venv/lib/python3.10/site-packages/numba/tests/test_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..92ea575f35c5742007f59abe48f269dc740daa1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_warnings.py @@ -0,0 +1,187 @@ +import os +import subprocess +import sys +import warnings +import numpy as np + +import unittest +from numba import jit +from numba.core.errors import ( + NumbaWarning, + deprecated, + NumbaDeprecationWarning, + NumbaPendingDeprecationWarning, +) +from numba.core import errors +from numba.tests.support import ignore_internal_warnings + + +class TestBuiltins(unittest.TestCase): + + def check_objmode_deprecation_warning(self, w): + # Object mode fall-back is slated for deprecation, check the warning + msg = ("Fall-back from the nopython compilation path to the object " + "mode compilation path has been detected") + self.assertEqual(w.category, NumbaDeprecationWarning) + self.assertIn(msg, str(w.message)) + + def check_nopython_kwarg_missing_warning(self, w): + # nopython default is scheduled to change when objmode fall-back is + # removed, check warning. + msg = ("The \'nopython\' keyword argument was not supplied") + self.assertEqual(w.category, NumbaDeprecationWarning) + self.assertIn(msg, str(w.message)) + + def test_return_type_warning_with_nrt(self): + """ + Rerun test_return_type_warning with nrt + """ + y = np.ones(4, dtype=np.float32) + + def return_external_array(): + return y + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaWarning) + ignore_internal_warnings() + + cfunc = jit(nopython=True)(return_external_array) + cfunc() + # No more warning + self.assertEqual(len(w), 0) + + def test_no_warning_with_forceobj(self): + def add(x, y): + a = [] # noqa dead + return x + y + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always', NumbaWarning) + ignore_internal_warnings() + + cfunc = jit(add, forceobj=True) + cfunc(1, 2) + + self.assertEqual(len(w), 0) + + def test_deprecated(self): + @deprecated('foo') + def bar(): + pass + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + ignore_internal_warnings() + bar() + + self.assertEqual(len(w), 1) + self.assertEqual(w[0].category, DeprecationWarning) + self.assertIn('bar', str(w[0].message)) + self.assertIn('foo', str(w[0].message)) + + def test_warnings_fixer(self): + # For some context, see #4083 + + wfix = errors.WarningsFixer(errors.NumbaWarning) + with wfix.catch_warnings('foo', 10): + warnings.warn(errors.NumbaWarning('same')) + warnings.warn(errors.NumbaDeprecationWarning('same')) + ignore_internal_warnings() + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + ignore_internal_warnings() + wfix.flush() + + self.assertEqual(len(w), 2) + # the order of these will be backwards to the above, the + # WarningsFixer flush method sorts with a key based on str + # comparison + self.assertEqual(w[0].category, NumbaDeprecationWarning) + self.assertEqual(w[1].category, NumbaWarning) + self.assertIn('same', str(w[0].message)) + self.assertIn('same', str(w[1].message)) + + def test_disable_performance_warnings(self): + + not_found_ret_code = 55 + found_ret_code = 99 + expected = "'parallel=True' was specified but no transformation" + + # NOTE: the error_usecases is needed as the NumbaPerformanceWarning's + # for parallel=True failing to parallelise do not appear for functions + # defined by string eval/exec etc. + parallel_code = """if 1: + import warnings + from numba.tests.error_usecases import foo + import numba + from numba.tests.support import ignore_internal_warnings + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + ignore_internal_warnings() + foo() + for x in w: + if x.category == numba.errors.NumbaPerformanceWarning: + if "%s" in str(x.message): + exit(%s) + exit(%s) + """ % (expected, found_ret_code, not_found_ret_code) + + # run in the standard env, warning should raise + popen = subprocess.Popen([sys.executable, "-c", parallel_code]) + out, err = popen.communicate() + self.assertEqual(popen.returncode, found_ret_code) + + # run in an env with performance warnings disabled, should not warn + env = dict(os.environ) + env['NUMBA_DISABLE_PERFORMANCE_WARNINGS'] = "1" + popen = subprocess.Popen([sys.executable, "-c", parallel_code], env=env) + out, err = popen.communicate() + self.assertEqual(popen.returncode, not_found_ret_code) + + def test_filter_deprecation_warnings(self): + # Filter on base classes of deprecation warnings should apply to Numba's + # deprecation warnings + with warnings.catch_warnings(): + warnings.simplefilter('error') + warnings.simplefilter('ignore', category=DeprecationWarning) + warnings.simplefilter('ignore', category=PendingDeprecationWarning) + warnings.warn(DeprecationWarning("this is ignored")) + warnings.warn(PendingDeprecationWarning("this is ignored")) + warnings.warn(NumbaDeprecationWarning("this is ignored")) + warnings.warn(NumbaPendingDeprecationWarning("this is ignored")) + with self.assertRaises(NumbaWarning): + warnings.warn(NumbaWarning("this is not ignored")) + + def test_filter_ignore_numba_deprecation_only(self): + # Make a filter that ignores Numba's deprecation warnings but raises on + # other deprecation warnings + with warnings.catch_warnings(): + warnings.simplefilter('error', category=DeprecationWarning) + warnings.simplefilter('error', category=PendingDeprecationWarning) + warnings.simplefilter('ignore', category=NumbaDeprecationWarning) + warnings.simplefilter('ignore', + category=NumbaPendingDeprecationWarning) + + with self.assertRaises(DeprecationWarning): + warnings.warn(DeprecationWarning("this is not ignored")) + with self.assertRaises(PendingDeprecationWarning): + warnings.warn(PendingDeprecationWarning("this is not ignored")) + + warnings.warn(NumbaDeprecationWarning("this is ignored")) + warnings.warn(NumbaPendingDeprecationWarning("this is ignored")) + + # now make it so that Numba deprecation warnings are raising + warnings.simplefilter('error', category=NumbaDeprecationWarning) + warnings.simplefilter('error', + category=NumbaPendingDeprecationWarning) + + with self.assertRaises(DeprecationWarning): + warnings.warn(NumbaDeprecationWarning("this is not ignored")) + with self.assertRaises(PendingDeprecationWarning): + warnings.warn(NumbaPendingDeprecationWarning( + "this is not ignored")) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/test_withlifting.py b/venv/lib/python3.10/site-packages/numba/tests/test_withlifting.py new file mode 100644 index 0000000000000000000000000000000000000000..dda31c1ebccdaf1e28d8ccc0ebaa33c6a562cb82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/test_withlifting.py @@ -0,0 +1,1217 @@ +import copy +import warnings +import numpy as np + +import numba +from numba.core.transforms import find_setupwiths, with_lifting +from numba.core.withcontexts import bypass_context, call_context, objmode_context +from numba.core.bytecode import FunctionIdentity, ByteCode +from numba.core.interpreter import Interpreter +from numba.core import errors +from numba.core.registry import cpu_target +from numba.core.compiler import compile_ir, DEFAULT_FLAGS +from numba import njit, typeof, objmode, types +from numba.core.extending import overload +from numba.tests.support import (MemoryLeak, TestCase, captured_stdout, + skip_unless_scipy, linux_only, + strace_supported, strace, + expected_failure_py311, + expected_failure_py312, + expected_failure_py313) +from numba.core.utils import PYVERSION +from numba.experimental import jitclass +import unittest + + +def get_func_ir(func): + func_id = FunctionIdentity.from_function(func) + bc = ByteCode(func_id=func_id) + interp = Interpreter(func_id) + func_ir = interp.interpret(bc) + return func_ir + + +def lift1(): + print("A") + with bypass_context: + print("B") + b() + print("C") + + +def lift2(): + x = 1 + print("A", x) + x = 1 + with bypass_context: + print("B", x) + x += 100 + b() + x += 1 + with bypass_context: + print("C", x) + b() + x += 10 + x += 1 + print("D", x) + + +def lift3(): + x = 1 + y = 100 + print("A", x, y) + with bypass_context: + print("B") + b() + x += 100 + with bypass_context: + print("C") + y += 100000 + b() + x += 1 + y += 1 + print("D", x, y) + + +def lift4(): + x = 0 + print("A", x) + x += 10 + with bypass_context: + print("B") + b() + x += 1 + for i in range(10): + with bypass_context: + print("C") + b() + x += i + with bypass_context: + print("D") + b() + if x: + x *= 10 + x += 1 + print("E", x) + + +def lift5(): + print("A") + + +def liftcall1(): + x = 1 + print("A", x) + with call_context: + x += 1 + print("B", x) + return x + + +def liftcall2(): + x = 1 + print("A", x) + with call_context: + x += 1 + print("B", x) + with call_context: + x += 10 + print("C", x) + return x + + +def liftcall3(): + x = 1 + print("A", x) + with call_context: + if x > 0: + x += 1 + print("B", x) + with call_context: + for i in range(10): + x += i + print("C", x) + return x + + +def liftcall4(): + with call_context: + with call_context: + pass + + +def liftcall5(): + for i in range(10): + with call_context: + print(i) + if i == 5: + print("A") + break + return i + + +def lift_undefiend(): + with undefined_global_var: + pass + + +bogus_contextmanager = object() + + +def lift_invalid(): + with bogus_contextmanager: + pass + + +gv_type = types.intp + + +class TestWithFinding(TestCase): + def check_num_of_with(self, func, expect_count): + the_ir = get_func_ir(func) + ct = len(find_setupwiths(the_ir)[0]) + self.assertEqual(ct, expect_count) + + def test_lift1(self): + self.check_num_of_with(lift1, expect_count=1) + + def test_lift2(self): + self.check_num_of_with(lift2, expect_count=2) + + def test_lift3(self): + self.check_num_of_with(lift3, expect_count=1) + + def test_lift4(self): + self.check_num_of_with(lift4, expect_count=2) + + def test_lift5(self): + self.check_num_of_with(lift5, expect_count=0) + + +class BaseTestWithLifting(TestCase): + def setUp(self): + super(BaseTestWithLifting, self).setUp() + self.typingctx = cpu_target.typing_context + self.targetctx = cpu_target.target_context + self.flags = DEFAULT_FLAGS + + def check_extracted_with(self, func, expect_count, expected_stdout): + the_ir = get_func_ir(func) + new_ir, extracted = with_lifting( + the_ir, self.typingctx, self.targetctx, self.flags, + locals={}, + ) + self.assertEqual(len(extracted), expect_count) + cres = self.compile_ir(new_ir) + + with captured_stdout() as out: + cres.entry_point() + + self.assertEqual(out.getvalue(), expected_stdout) + + def compile_ir(self, the_ir, args=(), return_type=None): + typingctx = self.typingctx + targetctx = self.targetctx + flags = self.flags + return compile_ir(typingctx, targetctx, the_ir, args, + return_type, flags, locals={}) + + +class TestLiftByPass(BaseTestWithLifting): + + def test_lift1(self): + self.check_extracted_with(lift1, expect_count=1, + expected_stdout="A\nC\n") + + def test_lift2(self): + self.check_extracted_with(lift2, expect_count=2, + expected_stdout="A 1\nD 3\n") + + def test_lift3(self): + self.check_extracted_with(lift3, expect_count=1, + expected_stdout="A 1 100\nD 2 101\n") + + def test_lift4(self): + self.check_extracted_with(lift4, expect_count=2, + expected_stdout="A 0\nE 11\n") + + def test_lift5(self): + self.check_extracted_with(lift5, expect_count=0, + expected_stdout="A\n") + + +class TestLiftCall(BaseTestWithLifting): + + def check_same_semantic(self, func): + """Ensure same semantic with non-jitted code + """ + jitted = njit(func) + with captured_stdout() as got: + jitted() + + with captured_stdout() as expect: + func() + + self.assertEqual(got.getvalue(), expect.getvalue()) + + def test_liftcall1(self): + self.check_extracted_with(liftcall1, expect_count=1, + expected_stdout="A 1\nB 2\n") + self.check_same_semantic(liftcall1) + + def test_liftcall2(self): + self.check_extracted_with(liftcall2, expect_count=2, + expected_stdout="A 1\nB 2\nC 12\n") + self.check_same_semantic(liftcall2) + + def test_liftcall3(self): + self.check_extracted_with(liftcall3, expect_count=2, + expected_stdout="A 1\nB 2\nC 47\n") + self.check_same_semantic(liftcall3) + + def test_liftcall4(self): + accept = (errors.TypingError, errors.NumbaRuntimeError, + errors.NumbaValueError, errors.CompilerError) + with self.assertRaises(accept) as raises: + njit(liftcall4)() + # Known error. We only support one context manager per function + # for body that are lifted. + msg = ("compiler re-entrant to the same function signature") + self.assertIn(msg, str(raises.exception)) + + @expected_failure_py311 + @expected_failure_py312 + @expected_failure_py313 + def test_liftcall5(self): + self.check_extracted_with(liftcall5, expect_count=1, + expected_stdout="0\n1\n2\n3\n4\n5\nA\n") + self.check_same_semantic(liftcall5) + + +def expected_failure_for_list_arg(fn): + def core(self, *args, **kwargs): + with self.assertRaises(errors.TypingError) as raises: + fn(self, *args, **kwargs) + self.assertIn('Does not support list type', + str(raises.exception)) + return core + + +def expected_failure_for_function_arg(fn): + def core(self, *args, **kwargs): + with self.assertRaises(errors.TypingError) as raises: + fn(self, *args, **kwargs) + self.assertIn('Does not support function type', + str(raises.exception)) + return core + + +class TestLiftObj(MemoryLeak, TestCase): + + def setUp(self): + warnings.simplefilter("error", errors.NumbaWarning) + + def tearDown(self): + warnings.resetwarnings() + + def assert_equal_return_and_stdout(self, pyfunc, *args): + py_args = copy.deepcopy(args) + c_args = copy.deepcopy(args) + cfunc = njit(pyfunc) + + with captured_stdout() as stream: + expect_res = pyfunc(*py_args) + expect_out = stream.getvalue() + + # avoid compiling during stdout-capturing for easier print-debugging + cfunc.compile(tuple(map(typeof, c_args))) + with captured_stdout() as stream: + got_res = cfunc(*c_args) + got_out = stream.getvalue() + + self.assertEqual(expect_out, got_out) + self.assertPreciseEqual(expect_res, got_res) + + def test_lift_objmode_basic(self): + def bar(ival): + print("ival =", {'ival': ival // 2}) + + def foo(ival): + ival += 1 + with objmode_context: + bar(ival) + return ival + 1 + + def foo_nonglobal(ival): + ival += 1 + with numba.objmode: + bar(ival) + return ival + 1 + + self.assert_equal_return_and_stdout(foo, 123) + self.assert_equal_return_and_stdout(foo_nonglobal, 123) + + def test_lift_objmode_array_in(self): + def bar(arr): + print({'arr': arr // 2}) + # arr is modified. the effect is visible outside. + arr *= 2 + + def foo(nelem): + arr = np.arange(nelem).astype(np.int64) + with objmode_context: + # arr is modified inplace inside bar() + bar(arr) + return arr + 1 + + nelem = 10 + self.assert_equal_return_and_stdout(foo, nelem) + + def test_lift_objmode_define_new_unused(self): + def bar(y): + print(y) + + def foo(x): + with objmode_context(): + y = 2 + x # defined but unused outside + a = np.arange(y) # defined but unused outside + bar(a) + return x + + arg = 123 + self.assert_equal_return_and_stdout(foo, arg) + + def test_lift_objmode_return_simple(self): + def inverse(x): + print(x) + return 1 / x + + def foo(x): + with objmode_context(y="float64"): + y = inverse(x) + return x, y + + def foo_nonglobal(x): + with numba.objmode(y="float64"): + y = inverse(x) + return x, y + + arg = 123 + self.assert_equal_return_and_stdout(foo, arg) + self.assert_equal_return_and_stdout(foo_nonglobal, arg) + + def test_lift_objmode_return_array(self): + def inverse(x): + print(x) + return 1 / x + + def foo(x): + with objmode_context(y="float64[:]", z="int64"): + y = inverse(x) + z = int(y[0]) + return x, y, z + + arg = np.arange(1, 10, dtype=np.float64) + self.assert_equal_return_and_stdout(foo, arg) + + @expected_failure_for_list_arg + def test_lift_objmode_using_list(self): + def foo(x): + with objmode_context(y="float64[:]"): + print(x) + x[0] = 4 + print(x) + y = [1, 2, 3] + x + y = np.asarray([1 / i for i in y]) + return x, y + + arg = [1, 2, 3] + self.assert_equal_return_and_stdout(foo, arg) + + def test_lift_objmode_var_redef(self): + def foo(x): + for x in range(x): + pass + if x: + x += 1 + with objmode_context(x="intp"): + print(x) + x -= 1 + print(x) + for i in range(x): + x += i + print(x) + return x + + arg = 123 + self.assert_equal_return_and_stdout(foo, arg) + + @expected_failure_for_list_arg + def test_case01_mutate_list_ahead_of_ctx(self): + def foo(x, z): + x[2] = z + + with objmode_context(): + # should print [1, 2, 15] but prints [1, 2, 3] + print(x) + + with objmode_context(): + x[2] = 2 * z + # should print [1, 2, 30] but prints [1, 2, 15] + print(x) + + return x + + self.assert_equal_return_and_stdout(foo, [1, 2, 3], 15) + + def test_case02_mutate_array_ahead_of_ctx(self): + def foo(x, z): + x[2] = z + + with objmode_context(): + # should print [1, 2, 15] + print(x) + + with objmode_context(): + x[2] = 2 * z + # should print [1, 2, 30] + print(x) + + return x + + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x, 15) + + @expected_failure_for_list_arg + def test_case03_create_and_mutate(self): + def foo(x): + with objmode_context(y='List(int64)'): + y = [1, 2, 3] + with objmode_context(): + y[2] = 10 + return y + self.assert_equal_return_and_stdout(foo, 1) + + def test_case04_bogus_variable_type_info(self): + + def foo(x): + # should specifying nonsense type info be considered valid? + with objmode_context(k="float64[:]"): + print(x) + return x + + x = np.array([1, 2, 3]) + cfoo = njit(foo) + with self.assertRaises(errors.TypingError) as raises: + cfoo(x) + self.assertIn( + "Invalid type annotation on non-outgoing variables", + str(raises.exception), + ) + + def test_case05_bogus_type_info(self): + def foo(x): + # should specifying the wrong type info be considered valid? + # z is complex. + # Note: for now, we will coerce for scalar and raise for array + with objmode_context(z="float64[:]"): + z = x + 1.j + return z + + x = np.array([1, 2, 3]) + cfoo = njit(foo) + with self.assertRaises(TypeError) as raises: + got = cfoo(x) + self.assertIn( + ("can't unbox array from PyObject into native value." + " The object maybe of a different type"), + str(raises.exception), + ) + + def test_case06_double_objmode(self): + def foo(x): + # would nested ctx in the same scope ever make sense? Is this + # pattern useful? + with objmode_context(): + #with npmmode_context(): not implemented yet + with objmode_context(): + print(x) + return x + + with self.assertRaises(errors.TypingError) as raises: + njit(foo)(123) + # Check that an error occurred in with-lifting in objmode + pat = ("During: resolving callee type: " + r"type\(ObjModeLiftedWith\(<.*>\)\)") + self.assertRegex(str(raises.exception), pat) + + def test_case07_mystery_key_error(self): + # this raises a key error + def foo(x): + with objmode_context(): + t = {'a': x} + u = 3 + return x, t, u + x = np.array([1, 2, 3]) + cfoo = njit(foo) + + with self.assertRaises(errors.TypingError) as raises: + cfoo(x) + + exstr = str(raises.exception) + self.assertIn("Missing type annotation on outgoing variable(s): " + "['t', 'u']", + exstr) + self.assertIn("Example code: with objmode" + "(t='')", + exstr) + + def test_case08_raise_from_external(self): + # this segfaults, expect its because the dict needs to raise as '2' is + # not in the keys until a later loop (looking for `d['0']` works fine). + d = dict() + + def foo(x): + for i in range(len(x)): + with objmode_context(): + k = str(i) + v = x[i] + d[k] = v + print(d['2']) + return x + + x = np.array([1, 2, 3]) + cfoo = njit(foo) + with self.assertRaises(KeyError) as raises: + cfoo(x) + self.assertEqual(str(raises.exception), "'2'") + + def test_case09_explicit_raise(self): + def foo(x): + with objmode_context(): + raise ValueError() + return x + + x = np.array([1, 2, 3]) + cfoo = njit(foo) + with self.assertRaises(errors.CompilerError) as raises: + cfoo(x) + self.assertIn( + ('unsupported control flow due to raise statements inside ' + 'with block'), + str(raises.exception), + ) + + @expected_failure_for_list_arg + def test_case10_mutate_across_contexts(self): + # This shouldn't work due to using List as input. + def foo(x): + with objmode_context(y='List(int64)'): + y = [1, 2, 3] + with objmode_context(): + y[2] = 10 + return y + + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + def test_case10_mutate_array_across_contexts(self): + # Sub-case of case-10. + def foo(x): + with objmode_context(y='int64[:]'): + y = np.asarray([1, 2, 3], dtype='int64') + with objmode_context(): + # Note: `y` is not an output. + y[2] = 10 + return y + + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + def test_case11_define_function_in_context(self): + # should this work? no, global name 'bar' is not defined + def foo(x): + with objmode_context(): + def bar(y): + return y + 1 + return x + + x = np.array([1, 2, 3]) + cfoo = njit(foo) + with self.assertRaises(NameError) as raises: + cfoo(x) + self.assertIn( + "global name 'bar' is not defined", + str(raises.exception), + ) + + def test_case12_njit_inside_a_objmode_ctx(self): + # TODO: is this still the cases? + # this works locally but not inside this test, probably due to the way + # compilation is being done + def bar(y): + return y + 1 + + def foo(x): + with objmode_context(y='int64[:]'): + y = njit(bar)(x).astype('int64') + return x + y + + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + def test_case14_return_direct_from_objmode_ctx(self): + def foo(x): + with objmode_context(x='int64[:]'): + x += 1 + return x + + result = foo(np.array([1, 2, 3])) + np.testing.assert_array_equal(np.array([2, 3, 4]), result) + + # No easy way to handle this yet. + @unittest.expectedFailure + def test_case15_close_over_objmode_ctx(self): + # Fails with Unsupported constraint encountered: enter_with $phi8.1 + def foo(x): + j = 10 + + def bar(x): + with objmode_context(x='int64[:]'): + print(x) + return x + j + return bar(x) + 2 + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + @skip_unless_scipy + def test_case16_scipy_call_in_objmode_ctx(self): + from scipy import sparse as sp + + def foo(x): + with objmode_context(k='int64'): + print(x) + spx = sp.csr_matrix(x) + # the np.int64 call is pointless, works around: + # https://github.com/scipy/scipy/issues/10206 + # which hit the SciPy 1.3 release. + k = np.int64(spx[0, 0]) + return k + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + def test_case17_print_own_bytecode(self): + import dis + + def foo(x): + with objmode_context(): + dis.dis(foo) + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + @expected_failure_for_function_arg + def test_case18_njitfunc_passed_to_objmode_ctx(self): + def foo(func, x): + with objmode_context(): + func(x[0]) + + x = np.array([1, 2, 3]) + fn = njit(lambda z: z + 5) + self.assert_equal_return_and_stdout(foo, fn, x) + + @expected_failure_py311 + @expected_failure_py312 + @expected_failure_py313 + def test_case19_recursion(self): + def foo(x): + with objmode_context(): + if x == 0: + return 7 + ret = foo(x - 1) + return ret + with self.assertRaises((errors.TypingError, errors.CompilerError)) as raises: + cfoo = njit(foo) + cfoo(np.array([1, 2, 3])) + msg = "Untyped global name 'foo'" + self.assertIn(msg, str(raises.exception)) + + @unittest.expectedFailure + def test_case20_rng_works_ok(self): + def foo(x): + np.random.seed(0) + y = np.random.rand() + with objmode_context(z="float64"): + # It's known that the random state does not sync + z = np.random.rand() + return x + z + y + + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + def test_case21_rng_seed_works_ok(self): + def foo(x): + np.random.seed(0) + y = np.random.rand() + with objmode_context(z="float64"): + # Similar to test_case20_rng_works_ok but call seed + np.random.seed(0) + z = np.random.rand() + return x + z + y + + x = np.array([1, 2, 3]) + self.assert_equal_return_and_stdout(foo, x) + + def test_example01(self): + # Example from _ObjModeContextType.__doc__ + def bar(x): + return np.asarray(list(reversed(x.tolist()))) + + @njit + def foo(): + x = np.arange(5) + with objmode(y='intp[:]'): # annotate return type + # this region is executed by object-mode. + y = x + bar(x) + return y + + self.assertPreciseEqual(foo(), foo.py_func()) + self.assertIs(objmode, objmode_context) + + def test_objmode_in_overload(self): + def foo(s): + pass + + @overload(foo) + def foo_overload(s): + def impl(s): + with objmode(out='intp'): + out = s + 3 + return out + return impl + + @numba.njit + def f(): + return foo(1) + + self.assertEqual(f(), 1 + 3) + + def test_objmode_gv_variable(self): + @njit + def global_var(): + with objmode(val=gv_type): + val = 12.3 + return val + + ret = global_var() + # the result is truncated because of the intp return-type + self.assertIsInstance(ret, int) + self.assertEqual(ret, 12) + + def test_objmode_gv_variable_error(self): + @njit + def global_var(): + with objmode(val=gv_type2): + val = 123 + return val + + with self.assertRaisesRegex( + errors.CompilerError, + ("Error handling objmode argument 'val'. " + r"Global 'gv_type2' is not defined.") + ): + global_var() + + def test_objmode_gv_mod_attr(self): + @njit + def modattr1(): + with objmode(val=types.intp): + val = 12.3 + return val + + @njit + def modattr2(): + with objmode(val=numba.types.intp): + val = 12.3 + return val + + for fn in (modattr1, modattr2): + with self.subTest(fn=str(fn)): + ret = fn() + # the result is truncated because of the intp return-type + self.assertIsInstance(ret, int) + self.assertEqual(ret, 12) + + def test_objmode_gv_mod_attr_error(self): + @njit + def moderror(): + with objmode(val=types.THIS_DOES_NOT_EXIST): + val = 12.3 + return val + with self.assertRaisesRegex( + errors.CompilerError, + ("Error handling objmode argument 'val'. " + "Getattr cannot be resolved at compile-time"), + ): + moderror() + + def test_objmode_gv_mod_attr_error_multiple(self): + @njit + def moderror(): + with objmode(v1=types.intp, v2=types.THIS_DOES_NOT_EXIST, + v3=types.float32): + v1 = 12.3 + v2 = 12.3 + v3 = 12.3 + return val + with self.assertRaisesRegex( + errors.CompilerError, + ("Error handling objmode argument 'v2'. " + "Getattr cannot be resolved at compile-time"), + ): + moderror() + + def test_objmode_closure_type_in_overload(self): + def foo(): + pass + + @overload(foo) + def foo_overload(): + shrubbery = types.float64[:] + def impl(): + with objmode(out=shrubbery): + out = np.arange(10).astype(np.float64) + return out + return impl + + @njit + def bar(): + return foo() + + self.assertPreciseEqual(bar(), np.arange(10).astype(np.float64)) + + def test_objmode_closure_type_in_overload_error(self): + def foo(): + pass + + @overload(foo) + def foo_overload(): + shrubbery = types.float64[:] + def impl(): + with objmode(out=shrubbery): + out = np.arange(10).astype(np.float64) + return out + # Remove closure var. + # Otherwise, it will "shrubbery" will be a global + del shrubbery + return impl + + @njit + def bar(): + return foo() + + with self.assertRaisesRegex( + errors.TypingError, + ("Error handling objmode argument 'out'. " + "Freevar 'shrubbery' is not defined"), + ): + bar() + + def test_objmode_invalid_use(self): + @njit + def moderror(): + with objmode(bad=1 + 1): + out = 1 + return val + with self.assertRaisesRegex( + errors.CompilerError, + ("Error handling objmode argument 'bad'. " + "The value must be a compile-time constant either as " + "a non-local variable or a getattr expression that " + "refers to a Numba type."), + ): + moderror() + + def test_objmode_multi_type_args(self): + array_ty = types.int32[:] + @njit + def foo(): + # t1 is a string + # t2 is a global type + # t3 is a non-local/freevar + with objmode(t1="float64", t2=gv_type, t3=array_ty): + t1 = 793856.5 + t2 = t1 # to observe truncation + t3 = np.arange(5).astype(np.int32) + return t1, t2, t3 + + t1, t2, t3 = foo() + self.assertPreciseEqual(t1, 793856.5) + self.assertPreciseEqual(t2, 793856) + self.assertPreciseEqual(t3, np.arange(5).astype(np.int32)) + + def test_objmode_jitclass(self): + spec = [ + ('value', types.int32), # a simple scalar field + ('array', types.float32[:]), # an array field + ] + + @jitclass(spec) + class Bag(object): + def __init__(self, value): + self.value = value + self.array = np.zeros(value, dtype=np.float32) + + @property + def size(self): + return self.array.size + + def increment(self, val): + for i in range(self.size): + self.array[i] += val + return self.array + + @staticmethod + def add(x, y): + return x + y + + n = 21 + mybag = Bag(n) + + def foo(): + pass + + @overload(foo) + def foo_overload(): + shrubbery = mybag._numba_type_ + def impl(): + with objmode(out=shrubbery): + out = Bag(123) + out.increment(3) + return out + return impl + + @njit + def bar(): + return foo() + + z = bar() + self.assertIsInstance(z, Bag) + self.assertEqual(z.add(2, 3), 2 + 3) + exp_array = np.zeros(123, dtype=np.float32) + 3 + self.assertPreciseEqual(z.array, exp_array) + + + @staticmethod + def case_objmode_cache(x): + with objmode(output='float64'): + output = x / 10 + return output + + def test_objmode_reflected_list(self): + ret_type = typeof([1, 2, 3, 4, 5]) + @njit + def test2(): + with objmode(out=ret_type): + out = [1, 2, 3, 4, 5] + return out + + with self.assertRaises(errors.CompilerError) as raises: + test2() + self.assertRegex( + str(raises.exception), + (r"Objmode context failed. " + r"Argument 'out' is declared as an unsupported type: " + r"reflected list\(int(32|64)\). " + r"Reflected types are not supported."), + ) + + def test_objmode_reflected_set(self): + ret_type = typeof({1, 2, 3, 4, 5}) + @njit + def test2(): + with objmode(result=ret_type): + result = {1, 2, 3, 4, 5} + return result + + with self.assertRaises(errors.CompilerError) as raises: + test2() + self.assertRegex( + str(raises.exception), + (r"Objmode context failed. " + r"Argument 'result' is declared as an unsupported type: " + r"reflected set\(int(32|64)\). " + r"Reflected types are not supported."), + ) + + def test_objmode_typed_dict(self): + ret_type = types.DictType(types.unicode_type, types.int64) + @njit + def test4(): + with objmode(res=ret_type): + res = {'A': 1, 'B': 2} + return res + + with self.assertRaises(TypeError) as raises: + test4() + self.assertIn( + ("can't unbox a " + "as a "), + str(raises.exception), + ) + + def test_objmode_typed_list(self): + ret_type = types.ListType(types.int64) + @njit + def test4(): + with objmode(res=ret_type): + res = [1, 2] + return res + + with self.assertRaises(TypeError) as raises: + test4() + self.assertRegex( + str(raises.exception), + (r"can't unbox a " + r"as a ()?"), + ) + + def test_objmode_use_of_view(self): + # See issue #7158, npm functionality should only be validated if in + # npm. + @njit + def foo(x): + with numba.objmode(y="int64[::1]"): + y = x.view("int64") + return y + + a = np.ones(1, np.int64).view('float64') + expected = foo.py_func(a) + got = foo(a) + self.assertPreciseEqual(expected, got) + + +def case_inner_pyfunc(x): + return x / 10 + + +def case_objmode_cache(x): + with objmode(output='float64'): + output = case_inner_pyfunc(x) + return output + + +class TestLiftObjCaching(MemoryLeak, TestCase): + # Warnings in this test class are converted to errors + + def setUp(self): + warnings.simplefilter("error", errors.NumbaWarning) + + def tearDown(self): + warnings.resetwarnings() + + def check(self, py_func): + first = njit(cache=True)(py_func) + self.assertEqual(first(123), 12.3) + + second = njit(cache=True)(py_func) + self.assertFalse(second._cache_hits) + self.assertEqual(second(123), 12.3) + self.assertTrue(second._cache_hits) + + def test_objmode_caching_basic(self): + def pyfunc(x): + with objmode(output='float64'): + output = x / 10 + return output + + self.check(pyfunc) + + def test_objmode_caching_call_closure_bad(self): + def other_pyfunc(x): + return x / 10 + + def pyfunc(x): + with objmode(output='float64'): + output = other_pyfunc(x) + return output + + self.check(pyfunc) + + def test_objmode_caching_call_closure_good(self): + self.check(case_objmode_cache) + + +class TestBogusContext(BaseTestWithLifting): + def test_undefined_global(self): + the_ir = get_func_ir(lift_undefiend) + + with self.assertRaises(errors.CompilerError) as raises: + with_lifting( + the_ir, self.typingctx, self.targetctx, self.flags, locals={}, + ) + self.assertIn( + "Undefined variable used as context manager", + str(raises.exception), + ) + + def test_invalid(self): + the_ir = get_func_ir(lift_invalid) + + with self.assertRaises(errors.CompilerError) as raises: + with_lifting( + the_ir, self.typingctx, self.targetctx, self.flags, locals={}, + ) + self.assertIn( + "Unsupported context manager in use", + str(raises.exception), + ) + + def test_with_as_fails_gracefully(self): + @njit + def foo(): + with open('') as f: + pass + + with self.assertRaises(errors.UnsupportedBytecodeError) as raises: + foo() + + excstr = str(raises.exception) + msg = ("The 'with (context manager) as (variable):' construct is not " + "supported.") + self.assertIn(msg, excstr) + + +class TestMisc(TestCase): + # Tests for miscellaneous objmode issues. Run serially. + + _numba_parallel_test_ = False + + @linux_only + @TestCase.run_test_in_subprocess + def test_no_fork_in_compilation(self): + # Checks that there is no fork/clone/execve during compilation, see + # issue #7881. This needs running in a subprocess as the offending fork + # call that triggered #7881 occurs on the first call to uuid1 as it's + # part if the initialisation process for that function (gets hardware + # address of machine). + + if not strace_supported(): + # Needs strace support. + self.skipTest("strace support missing") + + def force_compile(): + @njit('void()') # force compilation + def f(): + with numba.objmode(): + pass + + # capture these syscalls: + syscalls = ['fork', 'clone', 'execve'] + + # check that compilation does not trigger fork, clone or execve + strace_data = strace(force_compile, syscalls) + self.assertFalse(strace_data) + + +if __name__ == '__main__': + unittest.main() diff --git a/venv/lib/python3.10/site-packages/numba/tests/threading_backend_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/threading_backend_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..3188cfb9b0067de39927346cf6cb6fac011504c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/threading_backend_usecases.py @@ -0,0 +1,29 @@ +import signal +import sys +from numba import njit +import numpy as np + + +def sigterm_handler(signum, frame): + raise RuntimeError("Caught SIGTERM") + + +@njit(parallel=True) +def busy_func_inner(a, b): + c = a + b * np.sqrt(a) + np.sqrt(b) + d = np.sqrt(a + b * np.sqrt(a) + np.sqrt(b)) + return c + d + + +def busy_func(a, b, q=None): + sys.stdout.flush() + sys.stderr.flush() + signal.signal(signal.SIGTERM, sigterm_handler) + try: + z = busy_func_inner(a, b) + sys.stdout.flush() + sys.stderr.flush() + return z + except Exception as e: + if q is not None: + q.put(e) diff --git a/venv/lib/python3.10/site-packages/numba/tests/typedlist_usecases.py b/venv/lib/python3.10/site-packages/numba/tests/typedlist_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..249d3c66f3e30057246152c01ba69b70c13ae230 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/typedlist_usecases.py @@ -0,0 +1,14 @@ +from numba import int32 +from numba.typed import List + + +# global typed-list for testing purposes +global_typed_list = List.empty_list(int32) +for i in (1, 2, 3): + global_typed_list.append(int32(i)) + + +def catch_global(): + x = List() + for i in global_typed_list: + x.append(i) diff --git a/venv/lib/python3.10/site-packages/numba/tests/usecases.py b/venv/lib/python3.10/site-packages/numba/tests/usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..7bdc3119b5dc1d875e79435828a8c616c96efab3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/tests/usecases.py @@ -0,0 +1,93 @@ +import math +import numpy as np +from numba import jit + +_GLOBAL_STR = "abc" + +def sum1d(s, e): + c = 0 + for i in range(s, e): + c += i + return c + + +def sum2d(s, e): + c = 0 + for i in range(s, e): + for j in range(s, e): + c += i * j + return c + + +def while_count(s, e): + i = s + c = 0 + while i < e: + c += i + i += 1 + return c + + +def copy_arrays(a, b): + for i in range(a.shape[0]): + b[i] = a[i] + + +def copy_arrays2d(a, b): + for i in range(a.shape[0]): + for j in range(a.shape[1]): + b[i, j] = a[i, j] + + +def redefine1(): + x = 0 + for i in range(5): + x += 1 + x = 0. + x + for i in range(5): + x += 1 + return x + + +def andor(x, y): + return (x > 0 and x < 10) or (y > 0 and y < 10) + +andornopython = jit(nopython=True)(andor) + + +def string_concat(x, y): + a = "whatzup" + return a + str(x + y) + + +def string_len(s): + return len(s) + + +def string_slicing(s, start, stop): + return s[start:stop] + + +def string_conversion(x): + # the test that calls this has always relied on objmode fallback so force it + object() + return str(x) + + +def string_comparison(s1, s2, op): + return op(s1, s2) + + +def blackscholes_cnd(d): + A1 = 0.31938153 + A2 = -0.356563782 + A3 = 1.781477937 + A4 = -1.821255978 + A5 = 1.330274429 + RSQRT2PI = 0.39894228040143267793994605993438 + K = 1.0 / (1.0 + 0.2316419 * math.fabs(d)) + ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) * + (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))))) + if d > 0: + ret_val = 1.0 - ret_val + return ret_val diff --git a/venv/lib/python3.10/site-packages/numba/typed/__init__.py b/venv/lib/python3.10/site-packages/numba/typed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..62004deb0a7c8868993b48ac4597a24433ce449c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/__init__.py @@ -0,0 +1,20 @@ +import importlib + + +_delayed_symbols = { + "Dict": ".typeddict", + "List": ".typedlist", +} + + +def __getattr__(name): + # Uses PEP-562 but requires python>3.6 + if name in _delayed_symbols: + modpath = _delayed_symbols[name] + mod = importlib.import_module(modpath, __name__) + return getattr(mod, name) + else: + try: + return importlib.import_module(f".{name}", __name__) + except ModuleNotFoundError: + raise AttributeError diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..552f87fc17775a6947299e4aa4c6e4fca17b4d10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/dictimpl.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/dictimpl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d2034fe7b22c7e47e41b7fad92ddfc43dfc265a Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/dictimpl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/dictobject.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/dictobject.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..430c0040cb996867d437aa2f97cc9e4905c99754 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/dictobject.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/listobject.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/listobject.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd0f85d05eb33d5df828f87417161572df71eb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/listobject.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typeddict.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typeddict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5aa331c8e82c3b18e3eb189edfd9b376149dbbe Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typeddict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typedlist.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typedlist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..451c2fd8240f9ba7798c6b559f86d46e45149b6f Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typedlist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typedobjectutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typedobjectutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..101184e680046991beb263de2e92063ea75eba98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/typed/__pycache__/typedobjectutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numba/typed/dictimpl.py b/venv/lib/python3.10/site-packages/numba/typed/dictimpl.py new file mode 100644 index 0000000000000000000000000000000000000000..fc35e0dc1bcea3c98db2b4f130620caca351096f --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/dictimpl.py @@ -0,0 +1,43 @@ +""" +This file implements the lowering for `dict()` +""" +from numba.core import types +from numba.core.imputils import lower_builtin + + +_message_dict_support = """ +Unsupported use of `dict()` with keyword argument(s). \ +The only supported uses are `dict()` or `dict(*iterable)`. +""".strip() + + +@lower_builtin(dict, types.IterableType) +def dict_constructor(context, builder, sig, args): + from numba.typed import Dict + + dicttype = sig.return_type + kt, vt = dicttype.key_type, dicttype.value_type + + def dict_impl(iterable): + res = Dict.empty(kt, vt) + for k, v in iterable: + res[k] = v + return res + + return context.compile_internal(builder, dict_impl, sig, args) + + +@lower_builtin(dict) +def impl_dict(context, builder, sig, args): + """ + The `dict()` implementation simply forwards the work to `Dict.empty()`. + """ + from numba.typed import Dict + + dicttype = sig.return_type + kt, vt = dicttype.key_type, dicttype.value_type + + def call_ctor(): + return Dict.empty(kt, vt) + + return context.compile_internal(builder, call_ctor, sig, args) diff --git a/venv/lib/python3.10/site-packages/numba/typed/dictobject.py b/venv/lib/python3.10/site-packages/numba/typed/dictobject.py new file mode 100644 index 0000000000000000000000000000000000000000..60d9db6e0e1df5fdd5853f6d773b70b7042c46b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/dictobject.py @@ -0,0 +1,1367 @@ +""" +Compiler-side implementation of the dictionary. +""" +import ctypes +import operator +from enum import IntEnum + +from llvmlite import ir + +from numba import _helperlib + +from numba.core.extending import ( + overload, + overload_method, + overload_attribute, + intrinsic, + register_model, + models, + lower_builtin, + lower_cast, + make_attribute_wrapper, +) +from numba.core.imputils import iternext_impl, impl_ret_untracked +from numba.core import types, cgutils +from numba.core.types import ( + DictType, + DictItemsIterableType, + DictKeysIterableType, + DictValuesIterableType, + DictIteratorType, + Type, +) +from numba.core.imputils import impl_ret_borrowed, RefType +from numba.core.errors import TypingError, LoweringError, NumbaTypeError +from numba.core import typing +from numba.typed.typedobjectutils import (_as_bytes, _cast, _nonoptional, + _sentry_safe_cast_default, + _get_incref_decref, + _get_equal, _container_get_data,) + +ll_dict_type = cgutils.voidptr_t +ll_dictiter_type = cgutils.voidptr_t +ll_voidptr_type = cgutils.voidptr_t +ll_status = cgutils.int32_t +ll_ssize_t = cgutils.intp_t +ll_hash = ll_ssize_t +ll_bytes = cgutils.voidptr_t + + +_meminfo_dictptr = types.MemInfoPointer(types.voidptr) + + +# The following enums must match _dictobject.c + +class DKIX(IntEnum): + """Special return value of dict lookup. + """ + EMPTY = -1 + + +class Status(IntEnum): + """Status code for other dict operations. + """ + OK = 0 + OK_REPLACED = 1 + ERR_NO_MEMORY = -1 + ERR_DICT_MUTATED = -2 + ERR_ITER_EXHAUSTED = -3 + ERR_DICT_EMPTY = -4 + ERR_CMP_FAILED = -5 + + +def new_dict(key, value, n_keys=0): + """Construct a new dict with enough space for *n_keys* without a resize. + + Parameters + ---------- + key, value : TypeRef + Key type and value type of the new dict. + n_keys : int, default 0 + The number of keys to insert without needing a resize. + A value of 0 creates a dict with minimum size. + """ + # With JIT disabled, ignore all arguments and return a Python dict. + return dict() + + +@register_model(DictType) +class DictModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('meminfo', _meminfo_dictptr), + ('data', types.voidptr), # ptr to the C dict + ] + super(DictModel, self).__init__(dmm, fe_type, members) + + +@register_model(DictItemsIterableType) +@register_model(DictKeysIterableType) +@register_model(DictValuesIterableType) +@register_model(DictIteratorType) +class DictIterModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('parent', fe_type.parent), # reference to the dict + ('state', types.voidptr), # iterator state in C code + ] + super(DictIterModel, self).__init__(dmm, fe_type, members) + + +# Make _parent available to make len simple +make_attribute_wrapper(DictItemsIterableType, "parent", "_parent") +make_attribute_wrapper(DictKeysIterableType, "parent", "_parent") +make_attribute_wrapper(DictValuesIterableType, "parent", "_parent") + + +def _raise_if_error(context, builder, status, msg): + """Raise an internal error depending on the value of *status* + """ + ok_status = status.type(int(Status.OK)) + with builder.if_then(builder.icmp_signed('!=', status, ok_status)): + context.call_conv.return_user_exc(builder, RuntimeError, (msg,)) + + +@intrinsic +def _as_meminfo(typingctx, dctobj): + """Returns the MemInfoPointer of a dictionary. + """ + if not isinstance(dctobj, types.DictType): + raise TypingError('expected *dctobj* to be a DictType') + + def codegen(context, builder, sig, args): + [td] = sig.args + [d] = args + # Incref + context.nrt.incref(builder, td, d) + ctor = cgutils.create_struct_proxy(td) + dstruct = ctor(context, builder, value=d) + # Returns the plain MemInfo + return dstruct.meminfo + + sig = _meminfo_dictptr(dctobj) + return sig, codegen + + +@intrinsic +def _from_meminfo(typingctx, mi, dicttyperef): + """Recreate a dictionary from a MemInfoPointer + """ + if mi != _meminfo_dictptr: + raise TypingError('expected a MemInfoPointer for dict.') + dicttype = dicttyperef.instance_type + if not isinstance(dicttype, DictType): + raise TypingError('expected a {}'.format(DictType)) + + def codegen(context, builder, sig, args): + [tmi, tdref] = sig.args + td = tdref.instance_type + [mi, _] = args + + ctor = cgutils.create_struct_proxy(td) + dstruct = ctor(context, builder) + + data_pointer = context.nrt.meminfo_data(builder, mi) + data_pointer = builder.bitcast(data_pointer, ll_dict_type.as_pointer()) + + dstruct.data = builder.load(data_pointer) + dstruct.meminfo = mi + + return impl_ret_borrowed( + context, + builder, + dicttype, + dstruct._getvalue(), + ) + + sig = dicttype(mi, dicttyperef) + return sig, codegen + + +def _call_dict_free(context, builder, ptr): + """Call numba_dict_free(ptr) + """ + fnty = ir.FunctionType( + ir.VoidType(), + [ll_dict_type], + ) + free = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_free') + builder.call(free, [ptr]) + + +def _imp_dtor(context, module): + """Define the dtor for dictionary + """ + llvoidptr = context.get_value_type(types.voidptr) + llsize = context.get_value_type(types.uintp) + fnty = ir.FunctionType( + ir.VoidType(), + [llvoidptr, llsize, llvoidptr], + ) + fname = '_numba_dict_dtor' + fn = cgutils.get_or_insert_function(module, fnty, fname) + + if fn.is_declaration: + # Set linkage + fn.linkage = 'linkonce_odr' + # Define + builder = ir.IRBuilder(fn.append_basic_block()) + dp = builder.bitcast(fn.args[0], ll_dict_type.as_pointer()) + d = builder.load(dp) + _call_dict_free(context, builder, d) + builder.ret_void() + + return fn + + +@intrinsic +def _dict_new_sized(typingctx, n_keys, keyty, valty): + """Wrap numba_dict_new_sized. + + Allocate a new dictionary object with enough space to hold + *n_keys* keys without needing a resize. + + Parameters + ---------- + keyty, valty: Type + Type of the key and value, respectively. + n_keys: int + The number of keys to insert without needing a resize. + A value of 0 creates a dict with minimum size. + """ + resty = types.voidptr + sig = resty(n_keys, keyty, valty) + + def codegen(context, builder, sig, args): + n_keys = builder.bitcast(args[0], ll_ssize_t) + + # Determine sizeof key and value types + ll_key = context.get_data_type(keyty.instance_type) + ll_val = context.get_data_type(valty.instance_type) + sz_key = context.get_abi_sizeof(ll_key) + sz_val = context.get_abi_sizeof(ll_val) + + refdp = cgutils.alloca_once(builder, ll_dict_type, zfill=True) + + argtys = [ll_dict_type.as_pointer(), ll_ssize_t, ll_ssize_t, ll_ssize_t] + fnty = ir.FunctionType(ll_status, argtys) + fn = ir.Function(builder.module, fnty, 'numba_dict_new_sized') + + args = [refdp, n_keys, ll_ssize_t(sz_key), ll_ssize_t(sz_val)] + status = builder.call(fn, args) + + allocated_failed_msg = "Failed to allocate dictionary" + _raise_if_error(context, builder, status, msg=allocated_failed_msg) + + dp = builder.load(refdp) + return dp + + return sig, codegen + + +@intrinsic +def _dict_set_method_table(typingctx, dp, keyty, valty): + """Wrap numba_dict_set_method_table + """ + resty = types.void + sig = resty(dp, keyty, valty) + + def codegen(context, builder, sig, args): + vtablety = ir.LiteralStructType([ + ll_voidptr_type, # equal + ll_voidptr_type, # key incref + ll_voidptr_type, # key decref + ll_voidptr_type, # val incref + ll_voidptr_type, # val decref + ]) + setmethod_fnty = ir.FunctionType( + ir.VoidType(), + [ll_dict_type, vtablety.as_pointer()] + ) + setmethod_fn = ir.Function( + builder.module, + setmethod_fnty, + name='numba_dict_set_method_table', + ) + dp = args[0] + vtable = cgutils.alloca_once(builder, vtablety, zfill=True) + + # install key incref/decref + key_equal_ptr = cgutils.gep_inbounds(builder, vtable, 0, 0) + key_incref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 1) + key_decref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 2) + val_incref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 3) + val_decref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 4) + + dm_key = context.data_model_manager[keyty.instance_type] + if dm_key.contains_nrt_meminfo(): + equal = _get_equal(context, builder.module, dm_key, 'dict_key') + key_incref, key_decref = _get_incref_decref( + context, builder.module, dm_key, 'dict_key' + ) + builder.store( + builder.bitcast(equal, key_equal_ptr.type.pointee), + key_equal_ptr, + ) + builder.store( + builder.bitcast(key_incref, key_incref_ptr.type.pointee), + key_incref_ptr, + ) + builder.store( + builder.bitcast(key_decref, key_decref_ptr.type.pointee), + key_decref_ptr, + ) + + dm_val = context.data_model_manager[valty.instance_type] + if dm_val.contains_nrt_meminfo(): + val_incref, val_decref = _get_incref_decref( + context, builder.module, dm_val, 'dict_value' + ) + builder.store( + builder.bitcast(val_incref, val_incref_ptr.type.pointee), + val_incref_ptr, + ) + builder.store( + builder.bitcast(val_decref, val_decref_ptr.type.pointee), + val_decref_ptr, + ) + + builder.call(setmethod_fn, [dp, vtable]) + + return sig, codegen + + +@intrinsic +def _dict_insert(typingctx, d, key, hashval, val): + """Wrap numba_dict_insert + """ + resty = types.int32 + sig = resty(d, d.key_type, types.intp, d.value_type) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_dict_type, ll_bytes, ll_hash, ll_bytes, ll_bytes], + ) + [d, key, hashval, val] = args + [td, tkey, thashval, tval] = sig.args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_insert') + + dm_key = context.data_model_manager[tkey] + dm_val = context.data_model_manager[tval] + + data_key = dm_key.as_data(builder, key) + data_val = dm_val.as_data(builder, val) + + ptr_key = cgutils.alloca_once_value(builder, data_key) + cgutils.memset_padding(builder, ptr_key) + + ptr_val = cgutils.alloca_once_value(builder, data_val) + # TODO: the ptr_oldval is not used. needed for refct + ptr_oldval = cgutils.alloca_once(builder, data_val.type) + + dp = _container_get_data(context, builder, td, d) + status = builder.call( + fn, + [ + dp, + _as_bytes(builder, ptr_key), + hashval, + _as_bytes(builder, ptr_val), + _as_bytes(builder, ptr_oldval), + ], + ) + return status + + return sig, codegen + + +@intrinsic +def _dict_length(typingctx, d): + """Wrap numba_dict_length + + Returns the length of the dictionary. + """ + resty = types.intp + sig = resty(d) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_ssize_t, + [ll_dict_type], + ) + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_length') + [d] = args + [td] = sig.args + dp = _container_get_data(context, builder, td, d) + n = builder.call(fn, [dp]) + return n + + return sig, codegen + + +@intrinsic +def _dict_dump(typingctx, d): + """Dump the dictionary keys and values. + Wraps numba_dict_dump for debugging. + """ + resty = types.void + sig = resty(d) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ir.VoidType(), + [ll_dict_type], + ) + [td] = sig.args + [d] = args + dp = _container_get_data(context, builder, td, d) + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_dump') + + builder.call(fn, [dp]) + + return sig, codegen + + +@intrinsic +def _dict_lookup(typingctx, d, key, hashval): + """Wrap numba_dict_lookup + + Returns 2-tuple of (intp, ?value_type) + """ + resty = types.Tuple([types.intp, types.Optional(d.value_type)]) + sig = resty(d, key, hashval) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_ssize_t, + [ll_dict_type, ll_bytes, ll_hash, ll_bytes], + ) + [td, tkey, thashval] = sig.args + [d, key, hashval] = args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_lookup') + + dm_key = context.data_model_manager[tkey] + dm_val = context.data_model_manager[td.value_type] + + data_key = dm_key.as_data(builder, key) + ptr_key = cgutils.alloca_once_value(builder, data_key) + cgutils.memset_padding(builder, ptr_key) + + ll_val = context.get_data_type(td.value_type) + ptr_val = cgutils.alloca_once(builder, ll_val) + + dp = _container_get_data(context, builder, td, d) + ix = builder.call( + fn, + [ + dp, + _as_bytes(builder, ptr_key), + hashval, + _as_bytes(builder, ptr_val), + ], + ) + # Load value if output is available + found = builder.icmp_signed('>', ix, ix.type(int(DKIX.EMPTY))) + + out = context.make_optional_none(builder, td.value_type) + pout = cgutils.alloca_once_value(builder, out) + + with builder.if_then(found): + val = dm_val.load_from_data_pointer(builder, ptr_val) + context.nrt.incref(builder, td.value_type, val) + loaded = context.make_optional_value(builder, td.value_type, val) + builder.store(loaded, pout) + + out = builder.load(pout) + return context.make_tuple(builder, resty, [ix, out]) + + return sig, codegen + + +@intrinsic +def _dict_popitem(typingctx, d): + """Wrap numba_dict_popitem + """ + + keyvalty = types.Tuple([d.key_type, d.value_type]) + resty = types.Tuple([types.int32, types.Optional(keyvalty)]) + sig = resty(d) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_dict_type, ll_bytes, ll_bytes], + ) + [d] = args + [td] = sig.args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_popitem') + + dm_key = context.data_model_manager[td.key_type] + dm_val = context.data_model_manager[td.value_type] + + ptr_key = cgutils.alloca_once(builder, dm_key.get_data_type()) + ptr_val = cgutils.alloca_once(builder, dm_val.get_data_type()) + + dp = _container_get_data(context, builder, td, d) + status = builder.call( + fn, + [ + dp, + _as_bytes(builder, ptr_key), + _as_bytes(builder, ptr_val), + ], + ) + out = context.make_optional_none(builder, keyvalty) + pout = cgutils.alloca_once_value(builder, out) + + cond = builder.icmp_signed('==', status, status.type(int(Status.OK))) + with builder.if_then(cond): + key = dm_key.load_from_data_pointer(builder, ptr_key) + val = dm_val.load_from_data_pointer(builder, ptr_val) + keyval = context.make_tuple(builder, keyvalty, [key, val]) + optkeyval = context.make_optional_value(builder, keyvalty, keyval) + builder.store(optkeyval, pout) + + out = builder.load(pout) + return cgutils.pack_struct(builder, [status, out]) + + return sig, codegen + + +@intrinsic +def _dict_delitem(typingctx, d, hk, ix): + """Wrap numba_dict_delitem + """ + resty = types.int32 + sig = resty(d, hk, types.intp) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_dict_type, ll_hash, ll_ssize_t], + ) + [d, hk, ix] = args + [td, thk, tix] = sig.args + + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_delitem') + + dp = _container_get_data(context, builder, td, d) + status = builder.call(fn, [dp, hk, ix]) + return status + + return sig, codegen + + +def _iterator_codegen(resty): + """The common codegen for iterator intrinsics. + + Populates the iterator struct and increfs. + """ + + def codegen(context, builder, sig, args): + [d] = args + [td] = sig.args + iterhelper = context.make_helper(builder, resty) + iterhelper.parent = d + iterhelper.state = iterhelper.state.type(None) + return impl_ret_borrowed( + context, + builder, + resty, + iterhelper._getvalue(), + ) + + return codegen + + +@intrinsic +def _dict_items(typingctx, d): + """Get dictionary iterator for .items()""" + resty = types.DictItemsIterableType(d) + sig = resty(d) + codegen = _iterator_codegen(resty) + return sig, codegen + + +@intrinsic +def _dict_keys(typingctx, d): + """Get dictionary iterator for .keys()""" + resty = types.DictKeysIterableType(d) + sig = resty(d) + codegen = _iterator_codegen(resty) + return sig, codegen + + +@intrinsic +def _dict_values(typingctx, d): + """Get dictionary iterator for .values()""" + resty = types.DictValuesIterableType(d) + sig = resty(d) + codegen = _iterator_codegen(resty) + return sig, codegen + + +@intrinsic +def _make_dict(typingctx, keyty, valty, ptr): + """Make a dictionary struct with the given *ptr* + + Parameters + ---------- + keyty, valty: Type + Type of the key and value, respectively. + ptr : llvm pointer value + Points to the dictionary object. + """ + dict_ty = types.DictType(keyty.instance_type, valty.instance_type) + + def codegen(context, builder, signature, args): + [_, _, ptr] = args + ctor = cgutils.create_struct_proxy(dict_ty) + dstruct = ctor(context, builder) + dstruct.data = ptr + + alloc_size = context.get_abi_sizeof( + context.get_value_type(types.voidptr), + ) + dtor = _imp_dtor(context, builder.module) + meminfo = context.nrt.meminfo_alloc_dtor( + builder, + context.get_constant(types.uintp, alloc_size), + dtor, + ) + + data_pointer = context.nrt.meminfo_data(builder, meminfo) + data_pointer = builder.bitcast(data_pointer, ll_dict_type.as_pointer()) + builder.store(ptr, data_pointer) + + dstruct.meminfo = meminfo + + return dstruct._getvalue() + + sig = dict_ty(keyty, valty, ptr) + return sig, codegen + + +@overload(new_dict) +def impl_new_dict(key, value, n_keys=0): + """Creates a new dictionary with *key* and *value* as the type + of the dictionary key and value, respectively. *n_keys* is the + number of keys to insert without requiring a resize, where a + value of 0 creates a dictionary with minimum size. + """ + if any([ + not isinstance(key, Type), + not isinstance(value, Type), + ]): + raise NumbaTypeError("expecting *key* and *value* to be a Numba Type") + + keyty, valty = key, value + + def imp(key, value, n_keys=0): + if n_keys < 0: + raise RuntimeError("expecting *n_keys* to be >= 0") + dp = _dict_new_sized(n_keys, keyty, valty) + _dict_set_method_table(dp, keyty, valty) + d = _make_dict(keyty, valty, dp) + return d + + return imp + + +@overload(len) +def impl_len(d): + """len(dict) + """ + if not isinstance(d, types.DictType): + return + + def impl(d): + return _dict_length(d) + + return impl + + +@overload(len) +def impl_len_iters(d): + """len(dict.keys()), len(dict.values()), len(dict.items()) + """ + if not isinstance(d, (DictKeysIterableType, + DictValuesIterableType, DictItemsIterableType)): + return + + def impl(d): + return _dict_length(d._parent) + + return impl + + +@overload_method(types.DictType, '__setitem__') +@overload(operator.setitem) +def impl_setitem(d, key, value): + if not isinstance(d, types.DictType): + return + + keyty, valty = d.key_type, d.value_type + + def impl(d, key, value): + castedkey = _cast(key, keyty) + castedval = _cast(value, valty) + status = _dict_insert(d, castedkey, hash(castedkey), castedval) + if status == Status.OK: + return + elif status == Status.OK_REPLACED: + # replaced + # XXX handle refcount + return + elif status == Status.ERR_CMP_FAILED: + raise ValueError('key comparison failed') + else: + raise RuntimeError('dict.__setitem__ failed unexpectedly') + + if d.is_precise(): + # Handle the precise case. + return impl + else: + # Handle the imprecise case. + d = d.refine(key, value) + # Re-bind the key type and value type to match the arguments. + keyty, valty = d.key_type, d.value_type + # Create the signature that we wanted this impl to have. + sig = typing.signature(types.void, d, keyty, valty) + return sig, impl + + +@overload_method(types.DictType, 'get') +def impl_get(dct, key, default=None): + if not isinstance(dct, types.DictType): + return + keyty = dct.key_type + valty = dct.value_type + _sentry_safe_cast_default(default, valty) + + def impl(dct, key, default=None): + castedkey = _cast(key, keyty) + ix, val = _dict_lookup(dct, castedkey, hash(castedkey)) + if ix > DKIX.EMPTY: + return val + return default + + return impl + + +@overload_attribute(types.DictType, '__hash__') +def impl_hash(dct): + if not isinstance(dct, types.DictType): + return + return lambda dct: None + + +@overload(operator.getitem) +def impl_getitem(d, key): + if not isinstance(d, types.DictType): + return + + keyty = d.key_type + + def impl(d, key): + castedkey = _cast(key, keyty) + ix, val = _dict_lookup(d, castedkey, hash(castedkey)) + if ix == DKIX.EMPTY: + raise KeyError() + elif ix < DKIX.EMPTY: + raise AssertionError("internal dict error during lookup") + else: + return _nonoptional(val) + + return impl + + +@overload_method(types.DictType, 'popitem') +def impl_popitem(d): + if not isinstance(d, types.DictType): + return + + def impl(d): + status, keyval = _dict_popitem(d) + if status == Status.OK: + return _nonoptional(keyval) + elif status == Status.ERR_DICT_EMPTY: + raise KeyError() + else: + raise AssertionError('internal dict error during popitem') + + return impl + + +@overload_method(types.DictType, 'pop') +def impl_pop(dct, key, default=None): + if not isinstance(dct, types.DictType): + return + + keyty = dct.key_type + valty = dct.value_type + should_raise = isinstance(default, types.Omitted) + _sentry_safe_cast_default(default, valty) + + def impl(dct, key, default=None): + castedkey = _cast(key, keyty) + hashed = hash(castedkey) + ix, val = _dict_lookup(dct, castedkey, hashed) + if ix == DKIX.EMPTY: + if should_raise: + raise KeyError() + else: + return default + elif ix < DKIX.EMPTY: + raise AssertionError("internal dict error during lookup") + else: + status = _dict_delitem(dct, hashed, ix) + if status != Status.OK: + raise AssertionError("internal dict error during delitem") + return val + + return impl + + +@overload(operator.delitem) +def impl_delitem(d, k): + if not isinstance(d, types.DictType): + return + + def impl(d, k): + d.pop(k) + return impl + + +@overload(operator.contains) +def impl_contains(d, k): + if not isinstance(d, types.DictType): + return + + keyty = d.key_type + + def impl(d, k): + k = _cast(k, keyty) + ix, val = _dict_lookup(d, k, hash(k)) + return ix > DKIX.EMPTY + return impl + + +@overload_method(types.DictType, 'clear') +def impl_clear(d): + if not isinstance(d, types.DictType): + return + + def impl(d): + while len(d): + d.popitem() + + return impl + + +@overload_method(types.DictType, 'copy') +def impl_copy(d): + if not isinstance(d, types.DictType): + return + + key_type, val_type = d.key_type, d.value_type + + def impl(d): + newd = new_dict(key_type, val_type, n_keys=len(d)) + for k, v in d.items(): + newd[k] = v + return newd + + return impl + + +@overload_method(types.DictType, 'setdefault') +def impl_setdefault(dct, key, default=None): + if not isinstance(dct, types.DictType): + return + + def impl(dct, key, default=None): + if key not in dct: + dct[key] = default + return dct[key] + + return impl + + +@overload_method(types.DictType, 'items') +def impl_items(d): + if not isinstance(d, types.DictType): + return + + def impl(d): + it = _dict_items(d) + return it + + return impl + + +@overload_method(types.DictType, 'keys') +def impl_keys(d): + if not isinstance(d, types.DictType): + return + + def impl(d): + return _dict_keys(d) + + return impl + + +@overload_method(types.DictType, 'values') +def impl_values(d): + if not isinstance(d, types.DictType): + return + + def impl(d): + return _dict_values(d) + + return impl + + +@overload_method(types.DictType, 'update') +def ol_dict_update(d, other): + if not isinstance(d, types.DictType): + return + if not isinstance(other, types.DictType): + return + + def impl(d, other): + for k, v in other.items(): + d[k] = v + return impl + + +@overload(operator.eq) +def impl_equal(da, db): + if not isinstance(da, types.DictType): + return + if not isinstance(db, types.DictType): + # If RHS is not a dictionary, always returns False + def impl_type_mismatch(da, db): + return False + return impl_type_mismatch + + otherkeyty = db.key_type + + def impl_type_matched(da, db): + if len(da) != len(db): + return False + for ka, va in da.items(): + # Cast key from LHS to the key-type of RHS + kb = _cast(ka, otherkeyty) + ix, vb = _dict_lookup(db, kb, hash(kb)) + if ix <= DKIX.EMPTY: + # Quit early if the key is not found + return False + if va != vb: + # Quit early if the values do not match + return False + return True + + return impl_type_matched + + +@overload(operator.ne) +def impl_not_equal(da, db): + if not isinstance(da, types.DictType): + return + + def impl(da, db): + return not (da == db) + + return impl + + +@lower_builtin('getiter', types.DictItemsIterableType) +@lower_builtin('getiter', types.DictKeysIterableType) +@lower_builtin('getiter', types.DictValuesIterableType) +def impl_iterable_getiter(context, builder, sig, args): + """Implement iter() for .keys(), .values(), .items() + """ + iterablety = sig.args[0] + it = context.make_helper(builder, iterablety.iterator_type, args[0]) + + fnty = ir.FunctionType( + ir.VoidType(), + [ll_dictiter_type, ll_dict_type], + ) + + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_dict_iter') + + proto = ctypes.CFUNCTYPE(ctypes.c_size_t) + dictiter_sizeof = proto(_helperlib.c_helpers['dict_iter_sizeof']) + state_type = ir.ArrayType(ir.IntType(8), dictiter_sizeof()) + + pstate = cgutils.alloca_once(builder, state_type, zfill=True) + it.state = _as_bytes(builder, pstate) + + dp = _container_get_data(context, builder, iterablety.parent, it.parent) + builder.call(fn, [it.state, dp]) + return impl_ret_borrowed( + context, + builder, + sig.return_type, + it._getvalue(), + ) + + +@lower_builtin('getiter', types.DictType) +def impl_dict_getiter(context, builder, sig, args): + """Implement iter(Dict). Semantically equivalent to dict.keys() + """ + [td] = sig.args + [d] = args + iterablety = types.DictKeysIterableType(td) + it = context.make_helper(builder, iterablety.iterator_type) + + fnty = ir.FunctionType( + ir.VoidType(), + [ll_dictiter_type, ll_dict_type], + ) + + fn = cgutils.get_or_insert_function(builder.module, fnty, 'numba_dict_iter') + + proto = ctypes.CFUNCTYPE(ctypes.c_size_t) + dictiter_sizeof = proto(_helperlib.c_helpers['dict_iter_sizeof']) + state_type = ir.ArrayType(ir.IntType(8), dictiter_sizeof()) + + pstate = cgutils.alloca_once(builder, state_type, zfill=True) + it.state = _as_bytes(builder, pstate) + it.parent = d + + dp = _container_get_data(context, builder, iterablety.parent, args[0]) + builder.call(fn, [it.state, dp]) + return impl_ret_borrowed( + context, + builder, + sig.return_type, + it._getvalue(), + ) + + +@lower_builtin('iternext', types.DictIteratorType) +@iternext_impl(RefType.BORROWED) +def impl_iterator_iternext(context, builder, sig, args, result): + iter_type = sig.args[0] + it = context.make_helper(builder, iter_type, args[0]) + + p2p_bytes = ll_bytes.as_pointer() + + iternext_fnty = ir.FunctionType( + ll_status, + [ll_bytes, p2p_bytes, p2p_bytes] + ) + iternext = cgutils.get_or_insert_function( + builder.module, iternext_fnty, 'numba_dict_iter_next', + ) + key_raw_ptr = cgutils.alloca_once(builder, ll_bytes) + val_raw_ptr = cgutils.alloca_once(builder, ll_bytes) + + status = builder.call(iternext, (it.state, key_raw_ptr, val_raw_ptr)) + # TODO: no handling of error state i.e. mutated dictionary + # all errors are treated as exhausted iterator + is_valid = builder.icmp_unsigned('==', status, status.type(0)) + result.set_valid(is_valid) + + with builder.if_then(is_valid): + yield_type = iter_type.yield_type + key_ty, val_ty = iter_type.parent.keyvalue_type + + dm_key = context.data_model_manager[key_ty] + dm_val = context.data_model_manager[val_ty] + + key_ptr = builder.bitcast( + builder.load(key_raw_ptr), + dm_key.get_data_type().as_pointer(), + ) + val_ptr = builder.bitcast( + builder.load(val_raw_ptr), + dm_val.get_data_type().as_pointer(), + ) + + key = dm_key.load_from_data_pointer(builder, key_ptr) + val = dm_val.load_from_data_pointer(builder, val_ptr) + + # All dict iterators use this common implementation. + # Their differences are resolved here. + if isinstance(iter_type.iterable, DictItemsIterableType): + # .items() + tup = context.make_tuple(builder, yield_type, [key, val]) + result.yield_(tup) + elif isinstance(iter_type.iterable, DictKeysIterableType): + # .keys() + result.yield_(key) + elif isinstance(iter_type.iterable, DictValuesIterableType): + # .values() + result.yield_(val) + else: + # unreachable + raise AssertionError('unknown type: {}'.format(iter_type.iterable)) + + +def build_map(context, builder, dict_type, item_types, items): + + if isinstance(dict_type, types.LiteralStrKeyDict): + unliteral_tys = [x for x in + dict_type.literal_value.values()] + nbty = types.NamedTuple(unliteral_tys, + dict_type.tuple_ty) + values = [x[1] for x in items] + # replace with make_tuple call? + tup = context.get_constant_undef(nbty) + literal_tys = [x for x in dict_type.literal_value.values()] + + # this is to deal with repeated keys + value_index = dict_type.value_index + if value_index is None: + # 1:1 map keys:values + value_indexer = range(len(values)) + else: + # 1:>1 map keys:values, e.g. {'a':1, 'a': 'foo'} + value_indexer = value_index.values() + + for i, ix in enumerate(value_indexer): + val = values[ix] + casted = context.cast(builder, val, literal_tys[i], + unliteral_tys[i]) + tup = builder.insert_value(tup, casted, i) + d = tup + context.nrt.incref(builder, nbty, d) + + else: + from numba.typed import Dict + + dt = types.DictType(dict_type.key_type, dict_type.value_type) + kt, vt = dict_type.key_type, dict_type.value_type + sig = typing.signature(dt) + + def make_dict(): + return Dict.empty(kt, vt) + + d = context.compile_internal(builder, make_dict, sig, ()) + + if items: + for (kt, vt), (k, v) in zip(item_types, items): + sig = typing.signature(types.void, dt, kt, vt) + args = d, k, v + + def put(d, k, v): + d[k] = v + + context.compile_internal(builder, put, sig, args) + + return d + + +# ------------------------------------------------------------------------------ +# Literal dictionaries +# ------------------------------------------------------------------------------ + +@intrinsic +def _mixed_values_to_tuple(tyctx, d): + keys = [x for x in d.literal_value.keys()] + literal_tys = [x for x in d.literal_value.values()] + + def impl(cgctx, builder, sig, args): + lld, = args + impl = cgctx.get_function('static_getitem', + types.none(d, types.literal('dummy'))) + items = [] + for k in range(len(keys)): + item = impl(builder, (lld, k),) + casted = cgctx.cast(builder, item, literal_tys[k], d.types[k]) + items.append(casted) + cgctx.nrt.incref(builder, d.types[k], item) + ret = cgctx.make_tuple(builder, sig.return_type, items) + return ret + sig = types.Tuple(d.types)(d) + return sig, impl + + +@overload_method(types.LiteralStrKeyDict, 'values') +def literalstrkeydict_impl_values(d): + # This requires faking a values() iterator simply as a tuple, creating a + # type specialising iterator would be possible but horrendous and end up + # down the "versioned" loop body route. + if not isinstance(d, types.LiteralStrKeyDict): + return + + def impl(d): + return _mixed_values_to_tuple(d) + return impl + + +@overload_method(types.LiteralStrKeyDict, 'keys') +def literalstrkeydict_impl_keys(d): + if not isinstance(d, types.LiteralStrKeyDict): + return + # create a key iterator by specialising a DictType instance with the + # literal keys and returning that + t = tuple([x.literal_value for x in d.literal_value.keys()]) + + def impl(d): + d = dict() + for x in t: + d[x] = 0 + return d.keys() + return impl + + +# have to lower_builtin as this inherits from tuple and literal, both of which +# provide a match, hence ambiguous before proper resolution gets a chance +@lower_builtin(operator.eq, types.LiteralStrKeyDict, types.LiteralStrKeyDict) +def literalstrkeydict_impl_equals(context, builder, sig, args): + tu, tv = sig.args + u, v = args + pred = tu.literal_value == tv.literal_value + res = context.get_constant(types.boolean, pred) + return impl_ret_untracked(context, builder, sig.return_type, res) + + +@overload(operator.getitem) +@overload_method(types.LiteralStrKeyDict, 'get') +def literalstrkeydict_impl_get(dct, *args): + if not isinstance(dct, types.LiteralStrKeyDict): + return + msg = ("Cannot get{item}() on a literal dictionary, return type cannot be " + "statically determined") + raise TypingError(msg) + + +@overload_method(types.LiteralStrKeyDict, 'copy') +def literalstrkeydict_impl_copy(d): + if not isinstance(d, types.LiteralStrKeyDict): + return + + def impl(d): + return d + return impl + + +@intrinsic +def _str_items_mixed_values_to_tuple(tyctx, d): + keys = [x for x in d.literal_value.keys()] + literal_tys = [x for x in d.literal_value.values()] + + def impl(cgctx, builder, sig, args): + + lld, = args + impl = cgctx.get_function('static_getitem', + types.none(d, types.literal('dummy'))) + items = [] + from numba.cpython.unicode import make_string_from_constant + for k in range(len(keys)): + item = impl(builder, (lld, k),) + casted = cgctx.cast(builder, item, literal_tys[k], d.types[k]) + cgctx.nrt.incref(builder, d.types[k], item) + keydata = make_string_from_constant(cgctx, builder, + types.unicode_type, + keys[k].literal_value) + pair = cgctx.make_tuple(builder, + types.Tuple([types.unicode_type, + d.types[k]]), (keydata, casted)) + items.append(pair) + ret = cgctx.make_tuple(builder, sig.return_type, items) + return ret + kvs = [types.Tuple((types.unicode_type, x)) for x in d.types] + sig = types.Tuple(kvs)(d) + return sig, impl + + +@overload_method(types.LiteralStrKeyDict, 'items') +def literalstrkeydict_impl_items(d): + if not isinstance(d, types.LiteralStrKeyDict): + return + + def impl(d): + return _str_items_mixed_values_to_tuple(d) + return impl + + +@overload(operator.contains) +def literalstrkeydict_impl_contains(d, k): + if not isinstance(d, types.LiteralStrKeyDict): + return + + def impl(d, k): + for key in d.keys(): + if k == key: + return True + return False + return impl + + +@overload(len) +def literalstrkeydict_impl_len(d): + if not isinstance(d, types.LiteralStrKeyDict): + return + l = d.count + return lambda d: l + + +@overload(operator.setitem) +def literalstrkeydict_banned_impl_setitem(d, key, value): + if not isinstance(d, types.LiteralStrKeyDict): + return + raise TypingError("Cannot mutate a literal dictionary") + + +@overload(operator.delitem) +def literalstrkeydict_banned_impl_delitem(d, k): + if not isinstance(d, types.LiteralStrKeyDict): + return + raise TypingError("Cannot mutate a literal dictionary") + + +@overload_method(types.LiteralStrKeyDict, 'popitem') +@overload_method(types.LiteralStrKeyDict, 'pop') +@overload_method(types.LiteralStrKeyDict, 'clear') +@overload_method(types.LiteralStrKeyDict, 'setdefault') +@overload_method(types.LiteralStrKeyDict, 'update') +def literalstrkeydict_banned_impl_mutators(d, *args): + if not isinstance(d, types.LiteralStrKeyDict): + return + raise TypingError("Cannot mutate a literal dictionary") + + +@lower_cast(types.LiteralStrKeyDict, types.LiteralStrKeyDict) +def cast_LiteralStrKeyDict_LiteralStrKeyDict(context, builder, fromty, toty, + val): + # should have been picked up by typing + for (k1, v1), (k2, v2) in zip(fromty.literal_value.items(), + toty.literal_value.items()): + # these checks are just guards, typing should have picked up any + # problems + if k1 != k2: # keys must be same + msg = "LiteralDictionary keys are not the same {} != {}" + raise LoweringError(msg.format(k1, k2)) + # values must be same ty + if context.typing_context.unify_pairs(v1, v2) is None: + msg = "LiteralDictionary values cannot by unified, have {} and {}" + raise LoweringError(msg.format(v1, v2)) + else: + fromty = types.Tuple(fromty.types) + toty = types.Tuple(toty.types) + olditems = cgutils.unpack_tuple(builder, val, len(fromty)) + items = [context.cast(builder, v, f, t) + for v, f, t in zip(olditems, fromty, toty)] + return context.make_tuple(builder, toty, items) + + +@lower_cast(types.DictType, types.DictType) +def cast_DictType_DictType(context, builder, fromty, toty, val): + # should have been picked up by typing + return val diff --git a/venv/lib/python3.10/site-packages/numba/typed/listobject.py b/venv/lib/python3.10/site-packages/numba/typed/listobject.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5b49cc65ff95a127db9f00ab142554c8b55a27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/listobject.py @@ -0,0 +1,1543 @@ +""" +Compiler-side implementation of the Numba typed-list. +""" +import operator +from enum import IntEnum + +from llvmlite import ir + +from numba.core.extending import ( + overload, + overload_method, + overload_attribute, + register_jitable, + intrinsic, + register_model, + models, + lower_builtin, +) +from numba.core.imputils import iternext_impl +from numba.core import types, cgutils, config +from numba.core.types import ( + ListType, + ListTypeIterableType, + ListTypeIteratorType, + Type, + NoneType, +) +from numba.core.imputils import impl_ret_borrowed, RefType +from numba.core.errors import TypingError, NumbaTypeError +from numba.core import typing +from numba.typed.typedobjectutils import (_as_bytes, _cast, _nonoptional, + _get_incref_decref, + _container_get_data, + _container_get_meminfo,) +from numba.cpython import listobj + +ll_list_type = cgutils.voidptr_t +ll_listiter_type = cgutils.voidptr_t +ll_voidptr_type = cgutils.voidptr_t +ll_status = cgutils.int32_t +ll_ssize_t = cgutils.intp_t +ll_bytes = cgutils.voidptr_t + + +_meminfo_listptr = types.MemInfoPointer(types.voidptr) + +if config.USE_LEGACY_TYPE_SYSTEM: + INDEXTY = types.intp + + index_types = types.integer_domain +else: + INDEXTY = types.py_int + + index_types = types.py_integer_domain + +DEFAULT_ALLOCATED = 0 + + +@register_model(ListType) +class ListModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('meminfo', _meminfo_listptr), + ('data', types.voidptr), # ptr to the C list + ] + super(ListModel, self).__init__(dmm, fe_type, members) + + +@register_model(ListTypeIterableType) +@register_model(ListTypeIteratorType) +class ListIterModel(models.StructModel): + def __init__(self, dmm, fe_type): + members = [ + ('size', types.intp), # the size of the iteration space + ('parent', fe_type.parent), # the parent list + ('index', types.EphemeralPointer(types.intp)), # current index + ] + super(ListIterModel, self).__init__(dmm, fe_type, members) + + +class ListStatus(IntEnum): + """Status code for other list operations. + """ + LIST_OK = 0, + LIST_ERR_INDEX = -1 + LIST_ERR_NO_MEMORY = -2 + LIST_ERR_MUTATED = -3 + LIST_ERR_ITER_EXHAUSTED = -4 + LIST_ERR_IMMUTABLE = -5 + + +class ErrorHandler(object): + """ErrorHandler for calling codegen functions from this file. + + Stores the state needed to raise an exception from nopython mode. + """ + + def __init__(self, context): + self.context = context + + def __call__(self, builder, status, msg): + ok_status = status.type(int(ListStatus.LIST_OK)) + with builder.if_then(builder.icmp_signed('!=', status, ok_status), + likely=True): + self.context.call_conv.return_user_exc( + builder, RuntimeError, (msg,)) + + +def _check_for_none_typed(lst, method): + if isinstance(lst.dtype, NoneType): + raise TypingError("method support for List[None] is limited, " + "not supported: '{}'.".format(method)) + + +@intrinsic +def _as_meminfo(typingctx, lstobj): + """Returns the MemInfoPointer of a list. + """ + if not isinstance(lstobj, types.ListType): + raise TypingError('expected *lstobj* to be a ListType') + + def codegen(context, builder, sig, args): + [tl] = sig.args + [l] = args + # Incref + context.nrt.incref(builder, tl, l) + ctor = cgutils.create_struct_proxy(tl) + lstruct = ctor(context, builder, value=l) + # Returns the plain MemInfo + return lstruct.meminfo + + sig = _meminfo_listptr(lstobj) + return sig, codegen + + +@intrinsic +def _from_meminfo(typingctx, mi, listtyperef): + """Recreate a list from a MemInfoPointer + """ + if mi != _meminfo_listptr: + raise TypingError('expected a MemInfoPointer for list.') + listtype = listtyperef.instance_type + if not isinstance(listtype, ListType): + raise TypingError('expected a {}'.format(ListType)) + + def codegen(context, builder, sig, args): + [tmi, tdref] = sig.args + td = tdref.instance_type + [mi, _] = args + + ctor = cgutils.create_struct_proxy(td) + dstruct = ctor(context, builder) + + data_pointer = context.nrt.meminfo_data(builder, mi) + data_pointer = builder.bitcast(data_pointer, ll_list_type.as_pointer()) + + dstruct.data = builder.load(data_pointer) + dstruct.meminfo = mi + + return impl_ret_borrowed( + context, + builder, + listtype, + dstruct._getvalue(), + ) + + sig = listtype(mi, listtyperef) + return sig, codegen + + +def _list_codegen_set_method_table(context, builder, lp, itemty): + vtablety = ir.LiteralStructType([ + ll_voidptr_type, # item incref + ll_voidptr_type, # item decref + ]) + setmethod_fnty = ir.FunctionType( + ir.VoidType(), + [ll_list_type, vtablety.as_pointer()] + ) + + setmethod_fn = cgutils.get_or_insert_function( + builder.module, + setmethod_fnty, + 'numba_list_set_method_table') + vtable = cgutils.alloca_once(builder, vtablety, zfill=True) + + # install item incref/decref + item_incref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 0) + item_decref_ptr = cgutils.gep_inbounds(builder, vtable, 0, 1) + + dm_item = context.data_model_manager[itemty] + if dm_item.contains_nrt_meminfo(): + item_incref, item_decref = _get_incref_decref( + context, builder.module, dm_item, "list" + ) + builder.store( + builder.bitcast(item_incref, item_incref_ptr.type.pointee), + item_incref_ptr, + ) + builder.store( + builder.bitcast(item_decref, item_decref_ptr.type.pointee), + item_decref_ptr, + ) + + builder.call(setmethod_fn, [lp, vtable]) + + +@intrinsic +def _list_set_method_table(typingctx, lp, itemty): + """Wrap numba_list_set_method_table + """ + resty = types.void + sig = resty(lp, itemty) + + def codegen(context, builder, sig, args): + _list_codegen_set_method_table( + context, builder, args[0], itemty.instance_type) + + return sig, codegen + + +@lower_builtin(operator.is_, types.ListType, types.ListType) +def list_is(context, builder, sig, args): + a_meminfo = _container_get_meminfo(context, builder, sig.args[0], args[0]) + b_meminfo = _container_get_meminfo(context, builder, sig.args[1], args[1]) + ma = builder.ptrtoint(a_meminfo, cgutils.intp_t) + mb = builder.ptrtoint(b_meminfo, cgutils.intp_t) + return builder.icmp_signed('==', ma, mb) + + +def _call_list_free(context, builder, ptr): + """Call numba_list_free(ptr) + """ + fnty = ir.FunctionType( + ir.VoidType(), + [ll_list_type], + ) + free = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_free') + builder.call(free, [ptr]) + + +# FIXME: this needs a careful review +def _imp_dtor(context, module): + """Define the dtor for list + """ + llvoidptr = context.get_value_type(types.voidptr) + llsize = context.get_value_type(types.uintp) + fnty = ir.FunctionType( + ir.VoidType(), + [llvoidptr, llsize, llvoidptr], + ) + fname = '_numba_list_dtor' + fn = cgutils.get_or_insert_function(module, fnty, fname) + + if fn.is_declaration: + # Set linkage + fn.linkage = 'linkonce_odr' + # Define + builder = ir.IRBuilder(fn.append_basic_block()) + lp = builder.bitcast(fn.args[0], ll_list_type.as_pointer()) + l = builder.load(lp) + _call_list_free(context, builder, l) + builder.ret_void() + + return fn + + +def new_list(item, allocated=DEFAULT_ALLOCATED): + """Construct a new list. (Not implemented in the interpreter yet) + + Parameters + ---------- + item: TypeRef + Item type of the new list. + allocated: int + number of items to pre-allocate + + """ + # With JIT disabled, ignore all arguments and return a Python list. + return list() + + +def _add_meminfo(context, builder, lstruct): + alloc_size = context.get_abi_sizeof( + context.get_value_type(types.voidptr), + ) + dtor = _imp_dtor(context, builder.module) + meminfo = context.nrt.meminfo_alloc_dtor( + builder, + context.get_constant(types.uintp, alloc_size), + dtor, + ) + + data_pointer = context.nrt.meminfo_data(builder, meminfo) + data_pointer = builder.bitcast(data_pointer, ll_list_type.as_pointer()) + builder.store(lstruct.data, data_pointer) + lstruct.meminfo = meminfo + + +@intrinsic +def _make_list(typingctx, itemty, ptr): + """Make a list struct with the given *ptr* + + Parameters + ---------- + itemty: Type + Type of the item. + ptr : llvm pointer value + Points to the list object. + """ + list_ty = types.ListType(itemty.instance_type) + + def codegen(context, builder, signature, args): + ptr = args[1] + ctor = cgutils.create_struct_proxy(list_ty) + lstruct = ctor(context, builder) + lstruct.data = ptr + _add_meminfo(context, builder, lstruct) + return lstruct._getvalue() + + sig = list_ty(itemty, ptr) + return sig, codegen + + +def _list_new_codegen(context, builder, itemty, new_size, error_handler): + fnty = ir.FunctionType( + ll_status, + [ll_list_type.as_pointer(), ll_ssize_t, ll_ssize_t], + ) + fn = cgutils.get_or_insert_function(builder.module, fnty, 'numba_list_new') + # Determine sizeof item types + ll_item = context.get_data_type(itemty) + sz_item = context.get_abi_sizeof(ll_item) + reflp = cgutils.alloca_once(builder, ll_list_type, zfill=True) + status = builder.call( + fn, + [reflp, ll_ssize_t(sz_item), new_size], + ) + msg = "Failed to allocate list" + error_handler( + builder, + status, + msg, + ) + lp = builder.load(reflp) + return lp + + +@intrinsic +def _list_new(typingctx, itemty, allocated): + """Wrap numba_list_new. + + Allocate a new list object with zero capacity. + + Parameters + ---------- + itemty: Type + Type of the items + allocated: int + number of items to pre-allocate + + """ + resty = types.voidptr + sig = resty(itemty, allocated) + + def codegen(context, builder, sig, args): + error_handler = ErrorHandler(context) + return _list_new_codegen(context, + builder, + itemty.instance_type, + args[1], + error_handler, + ) + + return sig, codegen + + +@overload(new_list) +def impl_new_list(item, allocated=DEFAULT_ALLOCATED): + """Creates a new list. + + Parameters + ---------- + item: Numba type + type of the list item. + allocated: int + number of items to pre-allocate + + """ + if not isinstance(item, Type): + raise NumbaTypeError("expecting *item* to be a Numba Type") + + itemty = item + + def imp(item, allocated=DEFAULT_ALLOCATED): + if allocated < 0: + raise RuntimeError("expecting *allocated* to be >= 0") + lp = _list_new(itemty, allocated) + _list_set_method_table(lp, itemty) + l = _make_list(itemty, lp) + return l + + return imp + + +@overload(len) +def impl_len(l): + """len(list) + """ + if isinstance(l, types.ListType): + def impl(l): + return _list_length(l) + + return impl + + +@intrinsic +def _list_length(typingctx, l): + """Wrap numba_list_length + + Returns the length of the list. + """ + sig = types.intp(l) + + def codegen(context, builder, sig, args): + [tl] = sig.args + [l] = args + fnty = ir.FunctionType( + ll_ssize_t, + [ll_list_type], + ) + fname = 'numba_list_size_address' + fn = cgutils.get_or_insert_function(builder.module, fnty, fname) + fn.attributes.add('alwaysinline') + fn.attributes.add('readonly') + fn.attributes.add('nounwind') + lp = _container_get_data(context, builder, tl, l) + len_addr = builder.call(fn, [lp,],) + ptr = builder.inttoptr(len_addr, cgutils.intp_t.as_pointer()) + return builder.load(ptr) + + return sig, codegen + + +@overload_method(types.ListType, "_allocated") +def impl_allocated(l): + """list._allocated() + """ + if isinstance(l, types.ListType): + def impl(l): + return _list_allocated(l) + + return impl + + +@intrinsic +def _list_allocated(typingctx, l): + """Wrap numba_list_allocated + + Returns the allocation of the list. + """ + resty = types.intp + sig = resty(l) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_ssize_t, + [ll_list_type], + ) + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_allocated') + [l] = args + [tl] = sig.args + lp = _container_get_data(context, builder, tl, l) + n = builder.call(fn, [lp]) + return n + + return sig, codegen + + +@overload_method(types.ListType, "_is_mutable") +def impl_is_mutable(l): + """list._is_mutable()""" + if isinstance(l, types.ListType): + def impl(l): + return bool(_list_is_mutable(l)) + + return impl + + +@intrinsic +def _list_is_mutable(typingctx, l): + """Wrap numba_list_is_mutable + + Returns the state of the is_mutable member + """ + resty = types.int32 + sig = resty(l) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_list_type], + ) + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_is_mutable') + [l] = args + [tl] = sig.args + lp = _container_get_data(context, builder, tl, l) + n = builder.call(fn, [lp]) + return n + + return sig, codegen + + +@overload_method(types.ListType, "_make_mutable") +def impl_make_mutable(l): + """list._make_mutable()""" + if isinstance(l, types.ListType): + def impl(l): + _list_set_is_mutable(l, 1) + + return impl + + +@overload_method(types.ListType, "_make_immutable") +def impl_make_immutable(l): + """list._make_immutable()""" + if isinstance(l, types.ListType): + def impl(l): + _list_set_is_mutable(l, 0) + + return impl + + +@intrinsic +def _list_set_is_mutable(typingctx, l, is_mutable): + """Wrap numba_list_set_mutable + + Sets the state of the is_mutable member. + """ + resty = types.void + sig = resty(l, is_mutable) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ir.VoidType(), + [ll_list_type, cgutils.intp_t], + ) + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_set_is_mutable') + [l, i] = args + [tl, ti] = sig.args + lp = _container_get_data(context, builder, tl, l) + builder.call(fn, [lp, i]) + + return sig, codegen + + +@intrinsic +def _list_append(typingctx, l, item): + """Wrap numba_list_append + """ + resty = types.int32 + sig = resty(l, l.item_type) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_list_type, ll_bytes], + ) + [l, item] = args + [tl, titem] = sig.args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_append') + + dm_item = context.data_model_manager[titem] + + data_item = dm_item.as_data(builder, item) + + ptr_item = cgutils.alloca_once_value(builder, data_item) + + lp = _container_get_data(context, builder, tl, l) + status = builder.call( + fn, + [ + lp, + _as_bytes(builder, ptr_item), + ], + ) + return status + + return sig, codegen + + +@overload_method(types.ListType, 'append') +def impl_append(l, item): + if not isinstance(l, types.ListType): + return + + itemty = l.item_type + + def impl(l, item): + casteditem = _cast(item, itemty) + status = _list_append(l, casteditem) + if status == ListStatus.LIST_OK: + return + elif status == ListStatus.LIST_ERR_IMMUTABLE: + raise ValueError('list is immutable') + elif status == ListStatus.LIST_ERR_NO_MEMORY: + raise MemoryError('Unable to allocate memory to append item') + else: + raise RuntimeError('list.append failed unexpectedly') + + if l.is_precise(): + # Handle the precise case. + return impl + else: + # Handle the imprecise case. + l = l.refine(item) + # Re-bind the item type to match the arguments. + itemty = l.item_type + # Create the signature that we wanted this impl to have. + sig = typing.signature(types.void, l, itemty) + return sig, impl + + +@intrinsic +def fix_index(tyctx, list_ty, index_ty): + sig = types.intp(list_ty, index_ty) + + def codegen(context, builder, sig, args): + [list_ty, index_ty] = sig.args + [ll_list, ll_idx] = args + is_negative = builder.icmp_signed('<', ll_idx, + ir.Constant(ll_idx.type, 0)) + fast_len_sig, length_fn = _list_length._defn(context.typing_context, + list_ty) + length = length_fn(context, builder, fast_len_sig, (ll_list,)) + # length is an intp + # index can be any sort of int + # indexing in general is done with a ssize_t which correlates to an + # intp. In llvmlite sext and trunc are guarded to return the value + # itself if the types are the same, so there's no need to handle the + # "equal widths" case separately. This sexts/truncs the index to the + # length type such that `add` works for the wraparound case. + st = 'sext' if ll_idx.type.width < length.type.width else 'trunc' + op = getattr(builder, st) + fixedup_idx = op(ll_idx, length.type) + wrapped_index = builder.add(fixedup_idx, length) + return builder.select(is_negative, wrapped_index, fixedup_idx) + return sig, codegen + + +@register_jitable +def handle_index(l, index): + """Handle index. + + If the index is negative, convert it. If the index is out of range, raise + an IndexError. + """ + # convert negative indices to positive ones + index = fix_index(l, index) + # check that the index is in range + if index < 0 or index >= len(l): + raise IndexError("list index out of range") + return index + + +@register_jitable +def handle_slice(l, s): + """Handle slice. + + Convert a slice object for a given list into a range object that can be + used to index the list. Many subtle caveats here, especially if the step is + negative. + """ + if len(l) == 0: # ignore slice for empty list + return range(0) + ll, sa, so, se = len(l), s.start, s.stop, s.step + if se > 0: + start = max(ll + sa, 0) if s.start < 0 else min(ll, sa) + stop = max(ll + so, 0) if so < 0 else min(ll, so) + elif se < 0: + start = max(ll + sa, -1) if s.start < 0 else min(ll - 1, sa) + stop = max(ll + so, -1) if so < 0 else min(ll, so) + else: + # should be caught earlier, but isn't, so we raise here + raise ValueError("slice step cannot be zero") + return range(start, stop, s.step) + + +def _gen_getitem(borrowed): + + @intrinsic + def impl(typingctx, l_ty, index_ty): + + is_none = isinstance(l_ty.item_type, types.NoneType) + if is_none: + resty = types.Tuple([types.int32, l_ty.item_type]) + else: + resty = types.Tuple([types.int32, types.Optional(l_ty.item_type)]) + sig = resty(l_ty, index_ty) + + def codegen(context, builder, sig, args): + [tl, tindex] = sig.args + [l, index] = args + fnty = ir.FunctionType( + ll_voidptr_type, + [ll_list_type], + ) + fname = 'numba_list_base_ptr' + fn = cgutils.get_or_insert_function(builder.module, fnty, fname) + fn.attributes.add('alwaysinline') + fn.attributes.add('nounwind') + fn.attributes.add('readonly') + + lp = _container_get_data(context, builder, tl, l) + + base_ptr = builder.call( + fn, + [lp,], + ) + + llty = context.get_data_type(tl.item_type) + casted_base_ptr = builder.bitcast(base_ptr, llty.as_pointer()) + + item_ptr = cgutils.gep(builder, casted_base_ptr, index) + + if is_none: + out = builder.load(item_ptr) + else: + out = context.make_optional_none(builder, tl.item_type) + pout = cgutils.alloca_once_value(builder, out) + + dm_item = context.data_model_manager[tl.item_type] + item = dm_item.load_from_data_pointer(builder, item_ptr) + if not borrowed: + context.nrt.incref(builder, tl.item_type, item) + + if is_none: + loaded = item + else: + loaded = context.make_optional_value(builder, tl.item_type, + item) + builder.store(loaded, pout) + + out = builder.load(pout) + return context.make_tuple(builder, resty, [ll_status(0), out]) + + return sig, codegen + return impl + + +_list_getitem = _gen_getitem(False) +_list_getitem_borrowed = _gen_getitem(True) + + +@overload(operator.getitem) +def impl_getitem(l, index): + if not isinstance(l, types.ListType): + return + + indexty = INDEXTY + itemty = l.item_type + IS_NOT_NONE = not isinstance(l.item_type, types.NoneType) + + if index in index_types: + if IS_NOT_NONE: + def integer_non_none_impl(l, index): + castedindex = _cast(index, indexty) + handledindex = handle_index(l, castedindex) + status, item = _list_getitem(l, handledindex) + if status == ListStatus.LIST_OK: + return _nonoptional(item) + else: + raise AssertionError("internal list error during getitem") + return integer_non_none_impl + else: + def integer_none_impl(l, index): + index = handle_index(l, index) + return None + return integer_none_impl + + elif isinstance(index, types.SliceType): + def slice_impl(l, index): + newl = new_list(itemty) + for i in handle_slice(l, index): + newl.append(l[i]) + return newl + + return slice_impl + + else: + raise TypingError("list indices must be integers or slices") + + +@intrinsic +def _list_setitem(typingctx, l, index, item): + """Wrap numba_list_setitem + """ + resty = types.int32 + sig = resty(l, index, item) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_list_type, ll_ssize_t, ll_bytes], + ) + [l, index, item] = args + [tl, tindex, titem] = sig.args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_setitem') + + dm_item = context.data_model_manager[titem] + data_item = dm_item.as_data(builder, item) + ptr_item = cgutils.alloca_once_value(builder, data_item) + + lp = _container_get_data(context, builder, tl, l) + status = builder.call( + fn, + [ + lp, + index, + _as_bytes(builder, ptr_item), + ], + ) + return status + + return sig, codegen + + +@overload(operator.setitem) +def impl_setitem(l, index, item): + if not isinstance(l, types.ListType): + return + + indexty = INDEXTY + itemty = l.item_type + + if index in index_types: + def impl_integer(l, index, item): + index = handle_index(l, index) + castedindex = _cast(index, indexty) + casteditem = _cast(item, itemty) + status = _list_setitem(l, castedindex, casteditem) + if status == ListStatus.LIST_OK: + return + elif status == ListStatus.LIST_ERR_IMMUTABLE: + raise ValueError("list is immutable") + else: + raise AssertionError("internal list error during settitem") + + return impl_integer + + elif isinstance(index, types.SliceType): + if not isinstance(item, types.IterableType): + raise TypingError("can only assign an iterable when using a slice " + "with assignment/setitem") + + def impl_slice(l, index, item): + if not l._is_mutable(): + raise ValueError("list is immutable") + # special case "a[i:j] = a", need to copy first + if l is item: + item = item.copy() + slice_range = handle_slice(l, index) + # non-extended (simple) slices + if slice_range.step == 1: + # replace + if len(item) == len(slice_range): + for i, j in zip(slice_range, item): + l[i] = j + # replace and insert + if len(item) > len(slice_range): + # do the replaces we can + for i, j in zip(slice_range, item[:len(slice_range)]): + l[i] = j + # insert the remaining ones + insert_range = range(slice_range.stop, + slice_range.stop + + len(item) - len(slice_range)) + for i, k in zip(insert_range, item[len(slice_range):]): + # FIXME: This may be slow. Each insert can incur a + # memory copy of one or more items. + l.insert(i, k) + # replace and delete + if len(item) < len(slice_range): + # do the replaces we can + replace_range = range(slice_range.start, + slice_range.start + len(item)) + for i,j in zip(replace_range, item): + l[i] = j + # delete remaining ones + del l[slice_range.start + len(item):slice_range.stop] + # Extended slices + else: + if len(slice_range) != len(item): + raise ValueError("length mismatch for extended slice " + "and sequence") + # extended slice can only replace + for i, j in zip(slice_range, item): + l[i] = j + + return impl_slice + + else: + raise TypingError("list indices must be integers or slices") + + +@overload_method(types.ListType, 'pop') +def impl_pop(l, index=-1): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'pop') + + indexty = INDEXTY + + # FIXME: this type check works, but it isn't clear why and if it optimal + if (isinstance(index, int) + or index in index_types + or isinstance(index, types.Omitted)): + def impl(l, index=-1): + if len(l) == 0: + raise IndexError("pop from empty list") + cindex = _cast(handle_index(l, index), indexty) + item = l[cindex] + del l[cindex] + return item + return impl + + else: + raise TypingError("argument for pop must be an integer") + + +@intrinsic +def _list_delitem(typingctx, l, index): + resty = types.int32 + sig = resty(l, index) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_list_type, ll_ssize_t], + ) + [tl, tindex] = sig.args + [l, index] = args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_delitem') + + lp = _container_get_data(context, builder, tl, l) + status = builder.call(fn, [lp, index]) + return status + + return sig, codegen + + +@intrinsic +def _list_delete_slice(typingctx, l, start, stop, step): + """Wrap numba_list_delete_slice + """ + resty = types.int32 + sig = resty(l, start, stop, step) + + def codegen(context, builder, sig, args): + fnty = ir.FunctionType( + ll_status, + [ll_list_type, ll_ssize_t, ll_ssize_t, ll_ssize_t], + ) + [l, start, stop, step] = args + [tl, tstart, tstop, tstep] = sig.args + fn = cgutils.get_or_insert_function(builder.module, fnty, + 'numba_list_delete_slice') + + lp = _container_get_data(context, builder, tl, l) + status = builder.call( + fn, + [ + lp, + start, + stop, + step, + ], + ) + return status + + return sig, codegen + + +@overload(operator.delitem) +def impl_delitem(l, index): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'delitem') + + if index in index_types: + def integer_impl(l, index): + cindex = _cast(handle_index(l, index), INDEXTY) + status = _list_delitem(l, cindex) + if status == ListStatus.LIST_OK: + return + elif status == ListStatus.LIST_ERR_IMMUTABLE: + raise ValueError("list is immutable") + else: + raise AssertionError("internal list error during delitem") + return integer_impl + + elif isinstance(index, types.SliceType): + def slice_impl(l, index): + slice_range = handle_slice(l, index) + status = _list_delete_slice( + l, + slice_range.start, + slice_range.stop, + slice_range.step) + if status == ListStatus.LIST_ERR_MUTATED: + raise ValueError("list is immutable") + return slice_impl + + else: + raise TypingError("list indices must be integers or slices") + + +@overload(operator.contains) +def impl_contains(l, item): + if not isinstance(l, types.ListType): + return + + itemty = l.item_type + _check_for_none_typed(l, "__contains__") + + def impl(l, item): + casteditem = _cast(item, itemty) + for i in l: + if i == casteditem: + return True + else: + return False + return impl + + +@overload_method(types.ListType, 'count') +def impl_count(l, item): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'count') + + itemty = l.item_type + + def impl(l, item): + casteditem = _cast(item, itemty) + total = 0 + for i in l: + if i == casteditem: + total += 1 + return total + + return impl + + +@overload_method(types.ListType, 'extend') +def impl_extend(l, iterable): + if not isinstance(l, types.ListType): + return + if not isinstance(iterable, types.IterableType): + raise TypingError("extend argument must be iterable") + + _check_for_none_typed(l, 'extend') + + def select_impl(): + if isinstance(iterable, types.ListType): + def impl(l, iterable): + if not l._is_mutable(): + raise ValueError("list is immutable") + # guard against l.extend(l) + if l is iterable: + iterable = iterable.copy() + for i in iterable: + l.append(i) + + return impl + else: + def impl(l, iterable): + for i in iterable: + l.append(i) + + return impl + + if l.is_precise(): + # Handle the precise case. + return select_impl() + else: + # Handle the imprecise case, try to 'guess' the underlying type of the + # values in the iterable. + if hasattr(iterable, "dtype"): # tuples and arrays + ty = iterable.dtype + elif hasattr(iterable, "item_type"): # lists + ty = iterable.item_type + elif hasattr(iterable, "yield_type"): # iterators and generators + ty = iterable.yield_type + elif isinstance(iterable, types.UnicodeType): + ty = iterable + else: + raise TypingError("unable to extend list, iterable is missing " + "either *dtype*, *item_type* or *yield_type*.") + l = l.refine(ty) + # Create the signature that we wanted this impl to have + sig = typing.signature(types.void, l, iterable) + return sig, select_impl() + + +@overload_method(types.ListType, 'insert') +def impl_insert(l, index, item): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'insert') + # insert can refine + if isinstance(item, NoneType): + raise TypingError("method support for List[None] is limited") + + if index in index_types: + def impl(l, index, item): + # If the index is larger than the size of the list or if the list is + # empty, just append. + if index >= len(l) or len(l) == 0: + l.append(item) + # Else, do the insert dance + else: + # convert negative indices + if index < 0: + # if the index is still negative after conversion, use 0 + index = max(len(l) + index, 0) + # grow the list by one, make room for item to insert + l.append(l[0]) + # reverse iterate over the list and shift all elements + i = len(l) - 1 + while (i > index): + l[i] = l[i - 1] + i -= 1 + # finally, insert the item + l[index] = item + + if l.is_precise(): + # Handle the precise case. + return impl + else: + # Handle the imprecise case + l = l.refine(item) + # Re-bind the item type to match the arguments. + itemty = l.item_type + # Create the signature that we wanted this impl to have. + sig = typing.signature(types.void, l, INDEXTY, itemty) + return sig, impl + else: + raise TypingError("list insert indices must be integers") + + +@overload_method(types.ListType, 'remove') +def impl_remove(l, item): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'remove') + + itemty = l.item_type + + def impl(l, item): + casteditem = _cast(item, itemty) + for i, n in enumerate(l): + if casteditem == n: + del l[i] + return + else: + raise ValueError("list.remove(x): x not in list") + + return impl + + +@overload_method(types.ListType, 'clear') +def impl_clear(l): + if not isinstance(l, types.ListType): + return + + def impl(l): + while len(l): + del l[-1] + + return impl + + +@overload_method(types.ListType, 'reverse') +def impl_reverse(l): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'reverse') + + def impl(l): + if not l._is_mutable(): + raise ValueError("list is immutable") + front = 0 + back = len(l) - 1 + while front < back: + l[front], l[back] = l[back], l[front] + front += 1 + back -= 1 + + return impl + + +@overload_method(types.ListType, 'copy') +def impl_copy(l): + + _check_for_none_typed(l, 'copy') + + itemty = l.item_type + + if isinstance(l, types.ListType): + def impl(l): + newl = new_list(itemty, len(l)) + for i in l: + newl.append(i) + return newl + + return impl + + +@overload_method(types.ListType, 'index') +def impl_index(l, item, start=None, end=None): + if not isinstance(l, types.ListType): + return + + _check_for_none_typed(l, 'index') + + itemty = l.item_type + + def check_arg(arg, name): + if not (arg is None + or arg in index_types + or isinstance(arg, (types.Omitted, types.NoneType))): + raise TypingError("{} argument for index must be an integer" + .format(name)) + check_arg(start, "start") + check_arg(end, "end") + + def impl(l, item, start=None, end=None): + casteditem = _cast(item, itemty) + for i in handle_slice(l, slice(start, end, 1)): + if l[i] == casteditem: + return i + else: + raise ValueError("item not in list") + + return impl + + +@overload_method(types.ListType, "sort") +def ol_list_sort(lst, key=None, reverse=False): + # The following is mostly borrowed from listobj.ol_list_sort + from numba.typed import List + + listobj._sort_check_key(key) + listobj._sort_check_reverse(reverse) + + if cgutils.is_nonelike(key): + KEY = False + sort_f = listobj.sort_forwards + sort_b = listobj.sort_backwards + elif isinstance(key, types.Dispatcher): + KEY = True + sort_f = listobj.arg_sort_forwards + sort_b = listobj.arg_sort_backwards + + def impl(lst, key=None, reverse=False): + if not lst._is_mutable(): + raise ValueError("list is immutable") + if KEY is True: + # There's an unknown refct problem in reflected list. + # Using an explicit loop with typedlist somehow "fixed" it. + _lst = List() + for x in lst: + _lst.append(key(x)) + else: + _lst = lst + if reverse is False or reverse == 0: + tmp = sort_f(_lst) + else: + tmp = sort_b(_lst) + if KEY is True: + # There's an unknown refct problem in reflected list. + # Using an explicit loop with typedlist somehow "fixed" it. + ordered = List() + for i in tmp: + ordered.append(lst[i]) + lst[:] = ordered + return impl + + +@overload_method(types.ListType, "getitem_unchecked") +def ol_getitem_unchecked(lst, index): + if not isinstance(index, types.Integer): + return + + def impl(lst, index): + index = fix_index(lst, index) + castedindex = _cast(index, types.intp) + _, item = _list_getitem(lst, castedindex) + return _nonoptional(item) + return impl + + +@overload_attribute(types.ListType, '__hash__') +def ol_list_hash(lst): + if not isinstance(lst, types.ListType): + return + return lambda lst: None + + +@overload_attribute(types.ListType, '_dtype') +def impl_dtype(l): + if not isinstance(l, types.ListType): + return + dt = l.dtype + + def impl(l): + return dt + + return impl + + +def _equals_helper(this, other, OP): + if not isinstance(this, types.ListType): + return + if not isinstance(other, types.ListType): + return lambda this, other: False + + this_is_none = isinstance(this.dtype, types.NoneType) + other_is_none = isinstance(other.dtype, types.NoneType) + + if this_is_none or other_is_none: + def impl_some_none(this, other): + def equals(this, other): + # Equal if both none-typed and have equal length + return bool(this_is_none == other_is_none + and len(this) == len(other)) + return OP(equals(this, other)) + return impl_some_none + else: + def impl_not_none(this, other): + def equals(this, other): + if len(this) != len(other): + return False + for i in range(len(this)): + if this[i] != other[i]: + return False + else: + return True + return OP(equals(this, other)) + return impl_not_none + + +@overload(operator.eq) +def impl_equals(this, other): + return _equals_helper(this, other, operator.truth) + + +@overload(operator.ne) +def impl_not_equals(this, other): + return _equals_helper(this, other, operator.not_) + + +@register_jitable +def compare_not_none(this, other): + """Oldschool (python 2.x) cmp. + + if this < other return -1 + if this = other return 0 + if this > other return 1 + """ + if len(this) != len(other): + return -1 if len(this) < len(other) else 1 + for i in range(len(this)): + this_item, other_item = this[i], other[i] + if this_item != other_item: + return -1 if this_item < other_item else 1 + else: + return 0 + + +@register_jitable +def compare_some_none(this, other, this_is_none, other_is_none): + """Oldschool (python 2.x) cmp for None typed lists. + + if this < other return -1 + if this = other return 0 + if this > other return 1 + """ + if len(this) != len(other): + return -1 if len(this) < len(other) else 1 + if this_is_none and other_is_none: # both none + return 0 + # to get here there is precisely one none, and if the first is none, by + # induction, the second cannot be + return -1 if this_is_none else 1 + + +def compare_helper(this, other, accepted): + if not isinstance(this, types.ListType): + return + if not isinstance(other, types.ListType): + return lambda this, other: False + + this_is_none = isinstance(this.dtype, types.NoneType) + other_is_none = isinstance(other.dtype, types.NoneType) + + if this_is_none or other_is_none: + def impl(this, other): + return compare_some_none( + this, other, this_is_none, other_is_none) in accepted + else: + def impl(this, other): + return compare_not_none(this, other) in accepted + return impl + + +@overload(operator.lt) +def impl_less_than(this, other): + return compare_helper(this, other, (-1, )) + + +@overload(operator.le) +def impl_less_than_or_equal(this, other): + return compare_helper(this, other, (-1, 0)) + + +@overload(operator.gt) +def impl_greater_than(this, other): + return compare_helper(this, other, (1,)) + + +@overload(operator.ge) +def impl_greater_than_or_equal(this, other): + return compare_helper(this, other, (0, 1)) + + +class ListIterInstance(object): + + def __init__(self, context, builder, iter_type, iter_val): + self._context = context + self._builder = builder + self._iter_ty = iter_type + self._list_ty = self._iter_ty.parent + self._iter = context.make_helper(builder, iter_type, iter_val) + + @classmethod + def from_list(cls, context, builder, iter_type, list_val): + self = cls(context, builder, iter_type, None) + index = context.get_constant(types.intp, 0) + self._iter.index = cgutils.alloca_once_value(builder, index) + self._iter.parent = list_val + self._iter.size = cls._size_of_list(context, builder, self._list_ty, + self._iter.parent) + return self + + @classmethod + def _size_of_list(cls, context, builder, list_ty, ll_list): + tyctx = context.typing_context + fnty = tyctx.resolve_value_type(len) + sig = fnty.get_call_type(tyctx, (list_ty,), {}) + impl = context.get_function(fnty, sig) + return impl(builder, (ll_list,)) + + @property + def size(self): + tyctx = self._context.typing_context + fnty = tyctx.resolve_value_type(len) + ty = self._list_ty + sig = fnty.get_call_type(tyctx, (ty,), {}) + impl = self._context.get_function(fnty, sig) + return impl(self._builder, (self._iter.parent,)) + + @property + def value(self): + return self._iter._getvalue() + + def getitem(self, index): + tyctx = self._context.typing_context + ty = self._list_ty + sig, fn = _list_getitem_borrowed._defn(tyctx, ty, types.intp) + + statnitem = fn(self._context, self._builder, sig, (self._iter.parent, + index)) + _, item = cgutils.unpack_tuple(self._builder, statnitem) + retty = sig.return_type[1] + if isinstance(self._list_ty.dtype, types.NoneType): + raw_ty = self._list_ty.dtype + else: + raw_ty = retty.type + raw_item = self._context.cast(self._builder, item, retty, raw_ty) + return raw_item + + @property + def index(self): + return self._builder.load(self._iter.index) + + @index.setter + def index(self, value): + self._builder.store(value, self._iter.index) + + +@lower_builtin('getiter', types.ListType) +def getiter_list(context, builder, sig, args): + inst = ListIterInstance.from_list(context, builder, sig.return_type, + args[0]) + return impl_ret_borrowed(context, builder, sig.return_type, inst.value) + + +@lower_builtin('iternext', types.ListTypeIteratorType) +@iternext_impl(RefType.BORROWED) +def iternext_listiter(context, builder, sig, args, result): + inst = ListIterInstance(context, builder, sig.args[0], args[0]) + index = inst.index + + nitems = inst.size # this is current size + init_size = inst._iter.size # this is initial size + + # if the current count is different to the initial count, bail, list is + # being mutated whilst iterated. + is_mutated = builder.icmp_signed('!=', init_size, nitems) + with builder.if_then(is_mutated, likely=False): + context.call_conv.return_user_exc( + builder, RuntimeError, ("list was mutated during iteration",)) + + is_valid = builder.icmp_signed('<', index, nitems) + result.set_valid(is_valid) + with builder.if_then(is_valid): + result.yield_(inst.getitem(index)) + inst.index = builder.add(index, context.get_constant(types.intp, 1)) diff --git a/venv/lib/python3.10/site-packages/numba/typed/py.typed b/venv/lib/python3.10/site-packages/numba/typed/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/numba/typed/typeddict.py b/venv/lib/python3.10/site-packages/numba/typed/typeddict.py new file mode 100644 index 0000000000000000000000000000000000000000..a542063dcb569c123f6d1ba309846edae8f8e98d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/typeddict.py @@ -0,0 +1,417 @@ +""" +Python wrapper that connects CPython interpreter to the numba dictobject. +""" +from collections.abc import MutableMapping, Iterable, Mapping +from numba.core.types import DictType +from numba.core.imputils import numba_typeref_ctor +from numba import njit, typeof +from numba.core import types, errors, config, cgutils +from numba.core.extending import ( + overload, + box, + unbox, + NativeValue, + type_callable, + overload_classmethod, +) +from numba.typed import dictobject +from numba.core.typing import signature + + +@njit +def _make_dict(keyty, valty, n_keys=0): + return dictobject._as_meminfo(dictobject.new_dict(keyty, valty, + n_keys=n_keys)) + + +@njit +def _length(d): + return len(d) + + +@njit +def _setitem(d, key, value): + d[key] = value + + +@njit +def _getitem(d, key): + return d[key] + + +@njit +def _delitem(d, key): + del d[key] + + +@njit +def _contains(d, key): + return key in d + + +@njit +def _get(d, key, default): + return d.get(key, default) + + +@njit +def _setdefault(d, key, default): + return d.setdefault(key, default) + + +@njit +def _iter(d): + return list(d.keys()) + + +@njit +def _popitem(d): + return d.popitem() + + +@njit +def _copy(d): + return d.copy() + + +def _from_meminfo_ptr(ptr, dicttype): + d = Dict(meminfo=ptr, dcttype=dicttype) + return d + + +class Dict(MutableMapping): + """A typed-dictionary usable in Numba compiled functions. + + Implements the MutableMapping interface. + """ + + def __new__(cls, dcttype=None, meminfo=None, n_keys=0): + if config.DISABLE_JIT: + return dict.__new__(dict) + else: + return object.__new__(cls) + + @classmethod + def empty(cls, key_type, value_type, n_keys=0): + """Create a new empty Dict with *key_type* and *value_type* + as the types for the keys and values of the dictionary respectively. + + Optionally, allocate enough memory to hold *n_keys* without requiring + resizes. The default value of 0 returns a dict with minimum size. + """ + if config.DISABLE_JIT: + return dict() + else: + return cls(dcttype=DictType(key_type, value_type), n_keys=n_keys) + + def __init__(self, *args, **kwargs): + """ + For users, the constructor does not take any parameters. + The keyword arguments are for internal use only. + + Parameters + ---------- + dcttype : numba.core.types.DictType; keyword-only + Used internally for the dictionary type. + meminfo : MemInfo; keyword-only + Used internally to pass the MemInfo object when boxing. + """ + if kwargs: + self._dict_type, self._opaque = self._parse_arg(**kwargs) + else: + self._dict_type = None + + if args: + # CPython checks for at most 1 argument + # https://github.com/python/cpython/blob/f215d7cac9a6f9b51ba864e4252686dee4e45d64/Objects/dictobject.c#L2693-L2695 + _len = len(args) + if _len > 1: + raise errors.TypingError("Dict expect at most 1 argument, " + f"got {_len}") + + # check if argument is iterable + arg = args[0] + if not isinstance(arg, Iterable): + msg = (f"'{type(arg)}' object is not iterable. Supported type " + "constructor are Dict() and Dict(iterable)") + raise errors.TypingError(msg) + elif isinstance(arg, Mapping): + raise errors.TypingError("dict(mapping) is not supported") + + for idx, item in enumerate(arg): + if len(item) != 2: + msg = (f"dictionary update sequence element #{idx} has " + f"length {len(item)}; 2 is required") + raise ValueError(msg) + k, v = item + self.__setitem__(k, v) + + def _parse_arg(self, dcttype, meminfo=None, n_keys=0): + if not isinstance(dcttype, DictType): + raise TypeError('*dcttype* must be a DictType') + + if meminfo is not None: + opaque = meminfo + else: + opaque = _make_dict(dcttype.key_type, dcttype.value_type, + n_keys=n_keys) + return dcttype, opaque + + @property + def _numba_type_(self): + if self._dict_type is None: + raise TypeError("invalid operation on untyped dictionary") + return self._dict_type + + @property + def _typed(self): + """Returns True if the dictionary is typed. + """ + return self._dict_type is not None + + def _initialise_dict(self, key, value): + dcttype = types.DictType(typeof(key), typeof(value)) + self._dict_type, self._opaque = self._parse_arg(dcttype) + + def __getitem__(self, key): + if not self._typed: + raise KeyError(key) + else: + return _getitem(self, key) + + def __setitem__(self, key, value): + if not self._typed: + self._initialise_dict(key, value) + return _setitem(self, key, value) + + def __delitem__(self, key): + if not self._typed: + raise KeyError(key) + _delitem(self, key) + + def __iter__(self): + if not self._typed: + return iter(()) + else: + return iter(_iter(self)) + + def __len__(self): + if not self._typed: + return 0 + else: + return _length(self) + + def __contains__(self, key): + if len(self) == 0: + return False + else: + return _contains(self, key) + + def __str__(self): + buf = [] + for k, v in self.items(): + buf.append("{}: {}".format(k, v)) + return '{{{0}}}'.format(', '.join(buf)) + + def __repr__(self): + body = str(self) + prefix = str(self._dict_type) + return "{prefix}({body})".format(prefix=prefix, body=body) + + def get(self, key, default=None): + if not self._typed: + return default + return _get(self, key, default) + + def setdefault(self, key, default=None): + if not self._typed: + if default is not None: + self._initialise_dict(key, default) + return _setdefault(self, key, default) + + def popitem(self): + if len(self) == 0: + raise KeyError('dictionary is empty') + return _popitem(self) + + def copy(self): + return _copy(self) + + +@overload_classmethod(types.DictType, 'empty') +def typeddict_empty(cls, key_type, value_type, n_keys=0): + if cls.instance_type is not DictType: + return + + def impl(cls, key_type, value_type, n_keys=0): + return dictobject.new_dict(key_type, value_type, n_keys=n_keys) + + return impl + + +@box(types.DictType) +def box_dicttype(typ, val, c): + context = c.context + builder = c.builder + + # XXX deduplicate + ctor = cgutils.create_struct_proxy(typ) + dstruct = ctor(context, builder, value=val) + # Returns the plain MemInfo + boxed_meminfo = c.box( + types.MemInfoPointer(types.voidptr), + dstruct.meminfo, + ) + + modname = c.context.insert_const_string( + c.builder.module, 'numba.typed.typeddict', + ) + typeddict_mod = c.pyapi.import_module(modname) + fmp_fn = c.pyapi.object_getattr_string(typeddict_mod, '_from_meminfo_ptr') + + dicttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ)) + + result_var = builder.alloca(c.pyapi.pyobj) + builder.store(cgutils.get_null_value(c.pyapi.pyobj), result_var) + with builder.if_then(cgutils.is_not_null(builder, dicttype_obj)): + res = c.pyapi.call_function_objargs( + fmp_fn, (boxed_meminfo, dicttype_obj), + ) + c.pyapi.decref(fmp_fn) + c.pyapi.decref(typeddict_mod) + c.pyapi.decref(boxed_meminfo) + builder.store(res, result_var) + return builder.load(result_var) + + +@unbox(types.DictType) +def unbox_dicttype(typ, val, c): + context = c.context + + # Check that `type(val) is Dict` + dict_type = c.pyapi.unserialize(c.pyapi.serialize_object(Dict)) + valtype = c.pyapi.object_type(val) + same_type = c.builder.icmp_unsigned("==", valtype, dict_type) + + with c.builder.if_else(same_type) as (then, orelse): + with then: + miptr = c.pyapi.object_getattr_string(val, '_opaque') + + mip_type = types.MemInfoPointer(types.voidptr) + native = c.unbox(mip_type, miptr) + + mi = native.value + + argtypes = mip_type, typeof(typ) + + def convert(mi, typ): + return dictobject._from_meminfo(mi, typ) + + sig = signature(typ, *argtypes) + nil_typeref = context.get_constant_null(argtypes[1]) + args = (mi, nil_typeref) + is_error, dctobj = c.pyapi.call_jit_code(convert, sig, args) + # decref here because we are stealing a reference. + c.context.nrt.decref(c.builder, typ, dctobj) + + c.pyapi.decref(miptr) + bb_unboxed = c.builder.basic_block + + with orelse: + # Raise error on incorrect type + c.pyapi.err_format( + "PyExc_TypeError", + "can't unbox a %S as a %S", + valtype, dict_type, + ) + bb_else = c.builder.basic_block + + # Phi nodes to gather the output + dctobj_res = c.builder.phi(dctobj.type) + is_error_res = c.builder.phi(is_error.type) + + dctobj_res.add_incoming(dctobj, bb_unboxed) + dctobj_res.add_incoming(dctobj.type(None), bb_else) + + is_error_res.add_incoming(is_error, bb_unboxed) + is_error_res.add_incoming(cgutils.true_bit, bb_else) + + # cleanup + c.pyapi.decref(dict_type) + c.pyapi.decref(valtype) + + return NativeValue(dctobj_res, is_error=is_error_res) + + +@type_callable(DictType) +def typeddict_call(context): + """ + Defines typing logic for ``Dict()`` and ``Dict(iterable)``. + Produces Dict[undefined, undefined] or Dict[key, value] + """ + def typer(arg=None): + if arg is None: + return types.DictType(types.undefined, types.undefined) + elif isinstance(arg, types.DictType): + return arg + elif isinstance(arg, types.Tuple) and len(arg) == 0: # Dict(()) + msg = "non-precise type 'dict(())'" + raise errors.TypingError(msg) + elif isinstance(arg, types.IterableType): + dtype = arg.iterator_type.yield_type + if isinstance(dtype, types.UniTuple): + key = value = dtype.key[0] + return types.DictType(key, value) + elif isinstance(dtype, types.Tuple): + key, value = dtype.key + return types.DictType(key, value) + return typer + + +@overload(numba_typeref_ctor) +def impl_numba_typeref_ctor(cls, *args): + """ + Defines lowering for ``Dict()`` and ``Dict(iterable)``. + + The type-inferred version of the dictionary ctor. + + Parameters + ---------- + cls : TypeRef + Expecting a TypeRef of a precise DictType. + args: tuple + A tuple that contains a single iterable (optional) + + Returns + ------- + impl : function + An implementation suitable for lowering the constructor call. + + See also: `redirect_type_ctor` in numba/cpython/builtins.py + """ + dict_ty = cls.instance_type + if not isinstance(dict_ty, types.DictType): + return # reject + # Ensure the dictionary is precisely typed. + if not dict_ty.is_precise(): + msg = "expecting a precise DictType but got {}".format(dict_ty) + raise errors.LoweringError(msg) + + key_type = types.TypeRef(dict_ty.key_type) + value_type = types.TypeRef(dict_ty.value_type) + + if args: + if isinstance(args[0], types.IterableType): + def impl(cls, *args): + # Instantiate an empty dict and populate it with values from + # the iterable. + d = Dict.empty(key_type, value_type) + for k, v in args[0]: + d[k] = v + return d + else: + def impl(cls, *args): + # Simply call .empty() with the key/value types from *cls* + return Dict.empty(key_type, value_type) + + return impl diff --git a/venv/lib/python3.10/site-packages/numba/typed/typedlist.py b/venv/lib/python3.10/site-packages/numba/typed/typedlist.py new file mode 100644 index 0000000000000000000000000000000000000000..2c90dabeac43c5728ab2242e5ac75b7ad0e266c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/typedlist.py @@ -0,0 +1,688 @@ +""" +Python wrapper that connects CPython interpreter to the Numba typed-list. + +This is the code that is used when creating typed lists outside of a `@jit` +context and when returning a typed-list from a `@jit` decorated function. It +basically a Python class that has a Numba allocated typed-list under the hood +and uses `@jit` functions to access it. Since it inherits from MutableSequence +it should really quack like the CPython `list`. + +""" +from collections.abc import MutableSequence + +from numba.core.types import ListType +from numba.core.imputils import numba_typeref_ctor +from numba.core.dispatcher import Dispatcher +from numba.core import types, config, cgutils +from numba import njit, typeof +from numba.core.extending import ( + overload, + box, + unbox, + NativeValue, + type_callable, + overload_classmethod, +) +from numba.typed import listobject +from numba.core.errors import TypingError, LoweringError +from numba.core.typing.templates import Signature +import typing as pt + + +Int_or_Slice = pt.Union["pt.SupportsIndex", slice] + + +T_co = pt.TypeVar('T_co', covariant=True) + + +class _Sequence(pt.Protocol[T_co]): + def __getitem__(self, i: int) -> T_co: + ... + + def __len__(self) -> int: + ... + + +DEFAULT_ALLOCATED = listobject.DEFAULT_ALLOCATED + + +@njit +def _make_list(itemty, allocated=DEFAULT_ALLOCATED): + return listobject._as_meminfo(listobject.new_list(itemty, + allocated=allocated)) + + +@njit +def _length(l): + return len(l) + + +@njit +def _allocated(l): + return l._allocated() + + +@njit +def _is_mutable(l): + return l._is_mutable() + + +@njit +def _make_mutable(l): + return l._make_mutable() + + +@njit +def _make_immutable(l): + return l._make_immutable() + + +@njit +def _append(l, item): + l.append(item) + + +@njit +def _setitem(l, i, item): + l[i] = item + + +@njit +def _getitem(l, i): + return l[i] + + +@njit +def _contains(l, item): + return item in l + + +@njit +def _count(l, item): + return l.count(item) + + +@njit +def _pop(l, i): + return l.pop(i) + + +@njit +def _delitem(l, i): + del l[i] + + +@njit +def _extend(l, iterable): + return l.extend(iterable) + + +@njit +def _insert(l, i, item): + l.insert(i, item) + + +@njit +def _remove(l, item): + l.remove(item) + + +@njit +def _clear(l): + l.clear() + + +@njit +def _reverse(l): + l.reverse() + + +@njit +def _copy(l): + return l.copy() + + +@njit +def _eq(t, o): + return t == o + + +@njit +def _ne(t, o): + return t != o + + +@njit +def _lt(t, o): + return t < o + + +@njit +def _le(t, o): + return t <= o + + +@njit +def _gt(t, o): + return t > o + + +@njit +def _ge(t, o): + return t >= o + + +@njit +def _index(l, item, start, end): + return l.index(item, start, end) + + +@njit +def _sort(l, key, reverse): + return l.sort(key, reverse) + + +def _from_meminfo_ptr(ptr, listtype): + return List(meminfo=ptr, lsttype=listtype) + + +T = pt.TypeVar('T') +T_or_ListT = pt.Union[T, 'List[T]'] + + +class List(MutableSequence, pt.Generic[T]): + """A typed-list usable in Numba compiled functions. + + Implements the MutableSequence interface. + """ + + _legal_kwargs = ["lsttype", "meminfo", "allocated"] + + def __new__(cls, + *args, + lsttype=None, + meminfo=None, + allocated=DEFAULT_ALLOCATED, + **kwargs): + if config.DISABLE_JIT: + return list(*args, **kwargs) + else: + return object.__new__(cls) + + @classmethod + def empty_list(cls, item_type, allocated=DEFAULT_ALLOCATED): + """Create a new empty List. + + Parameters + ---------- + item_type: Numba type + type of the list item. + allocated: int + number of items to pre-allocate + """ + if config.DISABLE_JIT: + return list() + else: + return cls(lsttype=ListType(item_type), allocated=allocated) + + def __init__(self, *args, **kwargs): + """ + For users, the constructor does not take any parameters. + The keyword arguments are for internal use only. + + Parameters + ---------- + args: iterable + The iterable to initialize the list from + lsttype : numba.core.types.ListType; keyword-only + Used internally for the list type. + meminfo : MemInfo; keyword-only + Used internally to pass the MemInfo object when boxing. + allocated: int; keyword-only + Used internally to pre-allocate space for items + """ + illegal_kwargs = any((kw not in self._legal_kwargs for kw in kwargs)) + if illegal_kwargs or args and kwargs: + raise TypeError("List() takes no keyword arguments") + if kwargs: + self._list_type, self._opaque = self._parse_arg(**kwargs) + else: + self._list_type = None + if args: + if not 0 <= len(args) <= 1: + raise TypeError( + "List() expected at most 1 argument, got {}" + .format(len(args)) + ) + iterable = args[0] + # Special case Numpy scalars or anything that quacks like a + # NumPy Array. + if hasattr(iterable, "ndim") and iterable.ndim == 0: + self.append(iterable.item()) + else: + try: + iter(iterable) + except TypeError: + raise TypeError("List() argument must be iterable") + for i in args[0]: + self.append(i) + + def _parse_arg(self, lsttype, meminfo=None, allocated=DEFAULT_ALLOCATED): + if not isinstance(lsttype, ListType): + raise TypeError('*lsttype* must be a ListType') + + if meminfo is not None: + opaque = meminfo + else: + opaque = _make_list(lsttype.item_type, allocated=allocated) + return lsttype, opaque + + @property + def _numba_type_(self): + if self._list_type is None: + raise TypeError("invalid operation on untyped list") + return self._list_type + + @property + def _typed(self): + """Returns True if the list is typed. + """ + return self._list_type is not None + + @property + def _dtype(self): + if not self._typed: + raise RuntimeError("invalid operation on untyped list") + return self._list_type.dtype + + def _initialise_list(self, item): + lsttype = types.ListType(typeof(item)) + self._list_type, self._opaque = self._parse_arg(lsttype) + + def __len__(self) -> int: + if not self._typed: + return 0 + else: + return _length(self) + + def _allocated(self): + if not self._typed: + return DEFAULT_ALLOCATED + else: + return _allocated(self) + + def _is_mutable(self): + return _is_mutable(self) + + def _make_mutable(self): + return _make_mutable(self) + + def _make_immutable(self): + return _make_immutable(self) + + def __eq__(self, other): + return _eq(self, other) + + def __ne__(self, other): + return _ne(self, other) + + def __lt__(self, other): + return _lt(self, other) + + def __le__(self, other): + return _le(self, other) + + def __gt__(self, other): + return _gt(self, other) + + def __ge__(self, other): + return _ge(self, other) + + def append(self, item: T) -> None: + if not self._typed: + self._initialise_list(item) + _append(self, item) + + # noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592 + # noqa E704 required to follow overload style of using ... in the same line + @pt.overload # type: ignore[override] + def __setitem__(self, i: int, o: T) -> None: ... # noqa: F811, E704 + @pt.overload + def __setitem__(self, s: slice, o: 'List[T]') -> None: ... # noqa: F811, E704, E501 + + def __setitem__(self, i: Int_or_Slice, item: T_or_ListT) -> None: # noqa: F811, E501 + if not self._typed: + self._initialise_list(item) + _setitem(self, i, item) + + # noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592 + # noqa E704 required to follow overload style of using ... in the same line + @pt.overload + def __getitem__(self, i: int) -> T: ... # noqa: F811, E704 + @pt.overload + def __getitem__(self, i: slice) -> 'List[T]': ... # noqa: F811, E704 + + def __getitem__(self, i: Int_or_Slice) -> T_or_ListT: # noqa: F811 + if not self._typed: + raise IndexError + else: + return _getitem(self, i) + + def __iter__(self) -> pt.Iterator[T]: + for i in range(len(self)): + yield self[i] + + def __contains__(self, item: T) -> bool: # type: ignore[override] + return _contains(self, item) + + def __delitem__(self, i: Int_or_Slice) -> None: + _delitem(self, i) + + def insert(self, i: int, item: T) -> None: + if not self._typed: + self._initialise_list(item) + _insert(self, i, item) + + def count(self, item: T) -> int: + return _count(self, item) + + def pop(self, i: "pt.SupportsIndex" = -1) -> T: + return _pop(self, i) + + def extend(self, iterable: "_Sequence[T]") -> None: #type: ignore[override] + # Empty iterable, do nothing + if len(iterable) == 0: + return None + if not self._typed: + # Need to get the first element of the iterable to initialise the + # type of the list. FIXME: this may be a problem if the iterable + # can not be sliced. + self._initialise_list(iterable[0]) + return _extend(self, iterable) + + def remove(self, item: T) -> None: + return _remove(self, item) + + def clear(self): + return _clear(self) + + def reverse(self): + return _reverse(self) + + def copy(self): + return _copy(self) + + def index(self, item: T, start: pt.Optional[int] = None, + stop: pt.Optional[int] = None) -> int: + return _index(self, item, start, stop) + + def sort(self, key=None, reverse=False): + """Sort the list inplace. + + See also ``list.sort()`` + """ + # If key is not already a dispatcher object, make it so + if callable(key) and not isinstance(key, Dispatcher): + key = njit(key) + return _sort(self, key, reverse) + + def __str__(self): + buf = [] + for x in self: + buf.append("{}".format(x)) + # Check whether the code was invoked from IPython shell + try: + get_ipython + preview = ', '.join(buf[:1000]) + suffix = ', ...' if len(buf) > 1000 else '' + return '[{0}{1}]'.format(preview, suffix) + except (NameError, IndexError): + return '[{0}]'.format(', '.join(buf)) + + def __repr__(self): + body = str(self) + prefix = str(self._list_type) if self._typed else "ListType[Undefined]" + return "{prefix}({body})".format(prefix=prefix, body=body) + + +@overload_classmethod(ListType, 'empty_list') +def typedlist_empty(cls, item_type, allocated=DEFAULT_ALLOCATED): + if cls.instance_type is not ListType: + return + + def impl(cls, item_type, allocated=DEFAULT_ALLOCATED): + return listobject.new_list(item_type, allocated=allocated) + + return impl + + +@box(types.ListType) +def box_lsttype(typ, val, c): + context = c.context + builder = c.builder + + # XXX deduplicate + ctor = cgutils.create_struct_proxy(typ) + lstruct = ctor(context, builder, value=val) + # Returns the plain MemInfo + boxed_meminfo = c.box( + types.MemInfoPointer(types.voidptr), + lstruct.meminfo, + ) + + modname = c.context.insert_const_string( + c.builder.module, 'numba.typed.typedlist', + ) + typedlist_mod = c.pyapi.import_module(modname) + fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr') + + lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ)) + + result_var = builder.alloca(c.pyapi.pyobj) + builder.store(cgutils.get_null_value(c.pyapi.pyobj), result_var) + + with builder.if_then(cgutils.is_not_null(builder, lsttype_obj)): + res = c.pyapi.call_function_objargs( + fmp_fn, (boxed_meminfo, lsttype_obj), + ) + c.pyapi.decref(fmp_fn) + c.pyapi.decref(typedlist_mod) + c.pyapi.decref(boxed_meminfo) + builder.store(res, result_var) + return builder.load(result_var) + + +@unbox(types.ListType) +def unbox_listtype(typ, val, c): + context = c.context + builder = c.builder + + # Check that `type(val) is Dict` + list_type = c.pyapi.unserialize(c.pyapi.serialize_object(List)) + valtype = c.pyapi.object_type(val) + same_type = builder.icmp_unsigned("==", valtype, list_type) + + with c.builder.if_else(same_type) as (then, orelse): + with then: + miptr = c.pyapi.object_getattr_string(val, '_opaque') + + native = c.unbox(types.MemInfoPointer(types.voidptr), miptr) + + mi = native.value + ctor = cgutils.create_struct_proxy(typ) + lstruct = ctor(context, builder) + + data_pointer = context.nrt.meminfo_data(builder, mi) + data_pointer = builder.bitcast( + data_pointer, + listobject.ll_list_type.as_pointer(), + ) + + lstruct.data = builder.load(data_pointer) + lstruct.meminfo = mi + + lstobj = lstruct._getvalue() + c.pyapi.decref(miptr) + bb_unboxed = c.builder.basic_block + + with orelse: + # Raise error on incorrect type + c.pyapi.err_format( + "PyExc_TypeError", + "can't unbox a %S as a %S", + valtype, list_type, + ) + bb_else = c.builder.basic_block + + # Phi nodes to gather the output + lstobj_res = c.builder.phi(lstobj.type) + is_error_res = c.builder.phi(cgutils.bool_t) + + lstobj_res.add_incoming(lstobj, bb_unboxed) + lstobj_res.add_incoming(lstobj.type(None), bb_else) + + is_error_res.add_incoming(cgutils.false_bit, bb_unboxed) + is_error_res.add_incoming(cgutils.true_bit, bb_else) + + # cleanup + c.pyapi.decref(list_type) + c.pyapi.decref(valtype) + + return NativeValue(lstobj_res, is_error=is_error_res) + + +# +# The following contains the logic for the type-inferred constructor +# + +def _guess_dtype(iterable): + """Guess the correct dtype of the iterable type. """ + if not isinstance(iterable, types.IterableType): + raise TypingError( + "List() argument must be iterable") + # Special case for nested NumPy arrays. + elif isinstance(iterable, types.Array) and iterable.ndim > 1: + return iterable.copy(ndim=iterable.ndim - 1, layout='A') + elif hasattr(iterable, "dtype"): + return iterable.dtype + elif hasattr(iterable, "yield_type"): + return iterable.yield_type + elif isinstance(iterable, types.UnicodeType): + return iterable + elif isinstance(iterable, types.DictType): + return iterable.key_type + else: + # This should never happen, since the 'dtype' of any iterable + # should have determined above. + raise TypingError( + "List() argument does not have a suitable dtype") + + +@type_callable(ListType) +def typedlist_call(context): + """Defines typing logic for ``List()`` and ``List(iterable)``. + + If no argument is given, the returned typer types a new typed-list with an + undefined item type. If a single argument is given it must be iterable with + a guessable 'dtype'. In this case, the typer types a new typed-list with + the type set to the 'dtype' of the iterable arg. + + Parameters + ---------- + arg : single iterable (optional) + The single optional argument. + + Returns + ------- + typer : function + A typer suitable to type constructor calls. + + Raises + ------ + The returned typer raises a TypingError in case of unsuitable arguments. + + """ + + class Typer(object): + + def attach_sig(self): + from inspect import signature as mypysig + + def mytyper(iterable): + pass + self.pysig = mypysig(mytyper) + + def __call__(self, *args, **kwargs): + if kwargs: + raise TypingError( + "List() takes no keyword arguments" + ) + elif args: + if not 0 <= len(args) <= 1: + raise TypingError( + "List() expected at most 1 argument, got {}" + .format(len(args)) + ) + rt = types.ListType(_guess_dtype(args[0])) + self.attach_sig() + return Signature(rt, args, None, pysig=self.pysig) + else: + item_type = types.undefined + return types.ListType(item_type) + + return Typer() + + +@overload(numba_typeref_ctor) +def impl_numba_typeref_ctor(cls, *args): + """Defines lowering for ``List()`` and ``List(iterable)``. + + This defines the lowering logic to instantiate either an empty typed-list + or a typed-list initialised with values from a single iterable argument. + + Parameters + ---------- + cls : TypeRef + Expecting a TypeRef of a precise ListType. + args: tuple + A tuple that contains a single iterable (optional) + + Returns + ------- + impl : function + An implementation suitable for lowering the constructor call. + + See also: `redirect_type_ctor` in numba/cpython/bulitins.py + """ + list_ty = cls.instance_type + if not isinstance(list_ty, types.ListType): + return # reject + # Ensure the list is precisely typed. + if not list_ty.is_precise(): + msg = "expecting a precise ListType but got {}".format(list_ty) + raise LoweringError(msg) + + item_type = types.TypeRef(list_ty.item_type) + if args: + # special case 0d Numpy arrays + if isinstance(args[0], types.Array) and args[0].ndim == 0: + def impl(cls, *args): + # Instantiate an empty list and populate it with the single + # value from the array. + r = List.empty_list(item_type) + r.append(args[0].item()) + return r + else: + def impl(cls, *args): + # Instantiate an empty list and populate it with values from + # the iterable. + r = List.empty_list(item_type) + for i in args[0]: + r.append(i) + return r + else: + def impl(cls, *args): + # Simply call .empty_list with the item type from *cls* + return List.empty_list(item_type) + + return impl diff --git a/venv/lib/python3.10/site-packages/numba/typed/typedobjectutils.py b/venv/lib/python3.10/site-packages/numba/typed/typedobjectutils.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb20488ea55e171b8bb0acca3c49f7b3adf484d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/typed/typedobjectutils.py @@ -0,0 +1,200 @@ +""" Common compiler level utilities for typed dict and list. """ + +import operator +import warnings + +from llvmlite import ir + +from numba.core import types, cgutils +from numba.core import typing +from numba.core.registry import cpu_target +from numba.core.typeconv import Conversion +from numba.core.extending import intrinsic +from numba.core.errors import (TypingError, NumbaTypeSafetyWarning, + NumbaTypeError) + + +def _as_bytes(builder, ptr): + """Helper to do (void*)ptr + """ + return builder.bitcast(ptr, cgutils.voidptr_t) + + +@intrinsic +def _cast(typingctx, val, typ): + """Cast *val* to *typ* + """ + def codegen(context, builder, signature, args): + [val, typ] = args + context.nrt.incref(builder, signature.return_type, val) + return val + # Using implicit casting in argument types + casted = typ.instance_type + _sentry_safe_cast(val, casted) + sig = casted(casted, typ) + return sig, codegen + + +def _sentry_safe_cast(fromty, toty): + """Check and raise TypingError if *fromty* cannot be safely cast to *toty* + """ + tyctxt = cpu_target.typing_context + fromty, toty = map(types.unliteral, (fromty, toty)) + by = tyctxt.can_convert(fromty, toty) + + def warn(): + m = 'unsafe cast from {} to {}. Precision may be lost.' + warnings.warn(m.format(fromty, toty), + category=NumbaTypeSafetyWarning) + + isint = lambda x: isinstance(x, types.Integer) + isflt = lambda x: isinstance(x, types.Float) + iscmplx = lambda x: isinstance(x, types.Complex) + isdict = lambda x: isinstance(x, types.DictType) + # Only check against numeric types. + if by is None or by > Conversion.safe: + if isint(fromty) and isint(toty): + # Accept if both types are ints + warn() + elif isint(fromty) and isflt(toty): + # Accept if ints to floats + warn() + elif isflt(fromty) and isflt(toty): + # Accept if floats to floats + warn() + elif iscmplx(fromty) and iscmplx(toty): + # Accept if complex to complex + warn() + elif isdict(fromty) and isdict(toty): + pass # it's complaining about initial values being different + elif not isinstance(toty, types.Number): + # Non-numbers + warn() + else: + # Make it a hard error for numeric type that changes domain. + m = 'cannot safely cast {} to {}. Please cast explicitly.' + raise TypingError(m.format(fromty, toty)) + + +def _sentry_safe_cast_default(default, valty): + """Similar to _sentry_safe_cast but handle default value. + """ + # Handle default values + # TODO: simplify default values; too many possible way to spell None + if default is None: + return + if isinstance(default, (types.Omitted, types.NoneType)): + return + return _sentry_safe_cast(default, valty) + + +@intrinsic +def _nonoptional(typingctx, val): + """Typing trick to cast Optional[T] to T + """ + if not isinstance(val, types.Optional): + raise NumbaTypeError('expected an optional') + + def codegen(context, builder, sig, args): + context.nrt.incref(builder, sig.return_type, args[0]) + return args[0] + + casted = val.type + sig = casted(casted) + return sig, codegen + + +def _container_get_data(context, builder, container_ty, c): + """Helper to get the C list pointer in a numba containers. + """ + ctor = cgutils.create_struct_proxy(container_ty) + conatainer_struct = ctor(context, builder, value=c) + return conatainer_struct.data + + +def _container_get_meminfo(context, builder, container_ty, c): + """Helper to get the meminfo for a container + """ + ctor = cgutils.create_struct_proxy(container_ty) + conatainer_struct = ctor(context, builder, value=c) + return conatainer_struct.meminfo + + +def _get_incref_decref(context, module, datamodel, container_element_type): + assert datamodel.contains_nrt_meminfo() + + fe_type = datamodel.fe_type + data_ptr_ty = datamodel.get_data_type().as_pointer() + refct_fnty = ir.FunctionType(ir.VoidType(), [data_ptr_ty]) + incref_fn = cgutils.get_or_insert_function( + module, refct_fnty, '.numba_{}.{}_incref'.format( + context.fndesc.mangled_name, container_element_type),) + + builder = ir.IRBuilder(incref_fn.append_basic_block()) + context.nrt.incref( + builder, fe_type, + datamodel.load_from_data_pointer(builder, incref_fn.args[0]), + ) + builder.ret_void() + + decref_fn = cgutils.get_or_insert_function( + module, refct_fnty, name='.numba_{}.{}_decref'.format( + context.fndesc.mangled_name, container_element_type),) + builder = ir.IRBuilder(decref_fn.append_basic_block()) + context.nrt.decref( + builder, fe_type, + datamodel.load_from_data_pointer(builder, decref_fn.args[0]), + ) + builder.ret_void() + + return incref_fn, decref_fn + + +def _get_equal(context, module, datamodel, container_element_type): + assert datamodel.contains_nrt_meminfo() + + fe_type = datamodel.fe_type + data_ptr_ty = datamodel.get_data_type().as_pointer() + + wrapfnty = context.call_conv.get_function_type(types.int32, + [fe_type, fe_type]) + argtypes = [fe_type, fe_type] + + def build_wrapper(fn): + builder = ir.IRBuilder(fn.append_basic_block()) + args = context.call_conv.decode_arguments(builder, argtypes, fn) + + sig = typing.signature(types.boolean, fe_type, fe_type) + op = operator.eq + fnop = context.typing_context.resolve_value_type(op) + fnop.get_call_type(context.typing_context, sig.args, {}) + eqfn = context.get_function(fnop, sig) + res = eqfn(builder, args) + intres = context.cast(builder, res, types.boolean, types.int32) + context.call_conv.return_value(builder, intres) + + wrapfn = cgutils.get_or_insert_function( + module, wrapfnty, name='.numba_{}.{}_equal.wrap'.format( + context.fndesc.mangled_name, container_element_type)) + build_wrapper(wrapfn) + + equal_fnty = ir.FunctionType(ir.IntType(32), [data_ptr_ty, data_ptr_ty]) + equal_fn = cgutils.get_or_insert_function( + module, equal_fnty, name='.numba_{}.{}_equal'.format( + context.fndesc.mangled_name, container_element_type),) + builder = ir.IRBuilder(equal_fn.append_basic_block()) + lhs = datamodel.load_from_data_pointer(builder, equal_fn.args[0]) + rhs = datamodel.load_from_data_pointer(builder, equal_fn.args[1]) + + status, retval = context.call_conv.call_function( + builder, wrapfn, types.int32, argtypes, [lhs, rhs], + ) + with builder.if_then(status.is_ok, likely=True): + with builder.if_then(status.is_none): + builder.ret(context.get_constant(types.int32, 0)) + retval = context.cast(builder, retval, types.boolean, types.int32) + builder.ret(retval) + # Error out + builder.ret(context.get_constant(types.int32, -1)) + + return equal_fn diff --git a/venv/lib/python3.10/site-packages/numba/types/__init__.py b/venv/lib/python3.10/site-packages/numba/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..825cc24401cd83f70f5fa44574a0d1f33553aa23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numba/types/__init__.py @@ -0,0 +1,3 @@ +import sys +from numba.core.utils import _RedirectSubpackage +sys.modules[__name__] = _RedirectSubpackage(locals(), "numba.core.types") diff --git a/venv/lib/python3.10/site-packages/numba/types/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numba/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d74e18ad20ff6a90cdf088e27dc7f5adc4ed70e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numba/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5e7570b80298ca890248a3e0b22dd91b96b8f951 --- /dev/null +++ b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/METADATA @@ -0,0 +1,51 @@ +Metadata-Version: 2.3 +Name: opentelemetry-exporter-otlp-proto-common +Version: 1.26.0 +Summary: OpenTelemetry Protobuf encoding +Project-URL: Homepage, https://github.com/open-telemetry/opentelemetry-python/tree/main/exporter/opentelemetry-exporter-otlp-proto-common +Author-email: OpenTelemetry Authors +License: Apache-2.0 +License-File: LICENSE +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: OpenTelemetry +Classifier: Framework :: OpenTelemetry :: Exporters +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Python: >=3.8 +Requires-Dist: opentelemetry-proto==1.26.0 +Description-Content-Type: text/x-rst + +OpenTelemetry Protobuf Encoding +=============================== + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/opentelemetry-exporter-otlp-proto-common.svg + :target: https://pypi.org/project/opentelemetry-exporter-otlp-proto-common/ + +This library is provided as a convenience to encode to Protobuf. Currently used by: + +* opentelemetry-exporter-otlp-proto-grpc +* opentelemetry-exporter-otlp-proto-http + + +Installation +------------ + +:: + + pip install opentelemetry-exporter-otlp-proto-common + + +References +---------- + +* `OpenTelemetry `_ +* `OpenTelemetry Protocol Specification `_ diff --git a/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ee2f1e9a886984cac8457e58d0445dc8b7b82739 --- /dev/null +++ b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/RECORD @@ -0,0 +1,24 @@ +opentelemetry/exporter/otlp/proto/common/__init__.py,sha256=YWtqvL-G6zhW4ffqKorRYXYS2AaURt7DRseCiqBkJh0,686 +opentelemetry/exporter/otlp/proto/common/__pycache__/__init__.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/__pycache__/_log_encoder.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/__pycache__/metrics_encoder.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/__pycache__/trace_encoder.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/__pycache__/version.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/_internal/__init__.py,sha256=C5ZJ3Qb0NRmH15k-6cCHlertSqeFr3vPkefnMa1WK3A,5434 +opentelemetry/exporter/otlp/proto/common/_internal/__pycache__/__init__.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py,sha256=itBktCAO0h4Kkt2-Hd5r1bqJRh4mmArglREkQcjmWgU,3318 +opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__pycache__/__init__.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__init__.py,sha256=PDUtB47mZGmCXU92uw92rHPHv1UFMbUNh9EFveWFz48,13522 +opentelemetry/exporter/otlp/proto/common/_internal/metrics_encoder/__pycache__/__init__.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__init__.py,sha256=SMDmsABKy3IGoeiFMkptUCIWcMWzBPZX9rG40_JEi5k,6619 +opentelemetry/exporter/otlp/proto/common/_internal/trace_encoder/__pycache__/__init__.cpython-310.pyc,, +opentelemetry/exporter/otlp/proto/common/_log_encoder.py,sha256=Z_YgLKvwFggTFCwY9XE3ayjNEKWbfV5T_jnt3V8PkcU,710 +opentelemetry/exporter/otlp/proto/common/metrics_encoder.py,sha256=fjToqUyngmE1vv0bKOWAPNvAjj4rQjG5-oass1TAVEc,719 +opentelemetry/exporter/otlp/proto/common/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +opentelemetry/exporter/otlp/proto/common/trace_encoder.py,sha256=BLdY5F73uejAQIAeMBW7Pmi5sE7n1Gbtt59P22GF0jk,713 +opentelemetry/exporter/otlp/proto/common/version.py,sha256=ANYEMcxW_7kp7m-QhNKZUKat8Jf1JBtQ3N9YJF-3SLU,608 +opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/METADATA,sha256=XJSQGQ10v8NKxIWzJJh6FlUaE45sTb4cWAvW0vkdwSs,1793 +opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/RECORD,, +opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 diff --git a/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..cdd68a497cdfa8d3f2b837225beacef711b85047 --- /dev/null +++ b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/opentelemetry_exporter_otlp_proto_common-1.26.0.dist-info/licenses/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__init__.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..98100211af8c7383d12650ff864624f4e3a967bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/__init__.py @@ -0,0 +1 @@ +__version__ = '2.10.5' diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca7dba365e38c7f84a063a14cb7d1db71376ddf Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/color.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a8e523ae189f5678d319f95ce2b6ed1c165da1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/color.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/coordinate.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/coordinate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..709ec18ddfa7c127737143d1b580940553e571f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/coordinate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/country.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/country.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02a8fe8302639fd887ef66b65ee71f893d22d934 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/country.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/currency_code.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/currency_code.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc450228f9140c133878a248335bfcd8f15e77de Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/currency_code.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/domain.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/domain.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dafda8a175df1e39351e8889e68dfe634c84896b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/domain.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/epoch.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/epoch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c75ab9b8d88a12a1db0bf07bc2a3bcb438248b60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/epoch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/isbn.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/isbn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6e209e5facb1d0d3f599250b6c05900d8ac9da2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/isbn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/language_code.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/language_code.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bc9a906843738988a50ef135388879eb82e3c0f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/language_code.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/mac_address.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/mac_address.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a880f87960d77c26de3659a218dbb971be501c47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/mac_address.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/mongo_object_id.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/mongo_object_id.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ca87265103c4485d633dd12bbbaccb97ad2576b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/mongo_object_id.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/path.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/path.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..268fc1a15d14cb6764f9238f0d0a07de5cb21e55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/path.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/payment.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/payment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efde67a9828bfbb60b26ed4edb4f482233dc2e3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/payment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/pendulum_dt.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/pendulum_dt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8e9d6423720bfffa985fde868a515a0b6e1050b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/pendulum_dt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/phone_numbers.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/phone_numbers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b833ada806019c76ff23a09a7a54ae74b7aa0cee Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/phone_numbers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/routing_number.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/routing_number.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb2bea5ec0ceacf8a096daadf2beea265857351f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/routing_number.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/s3.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/s3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1f04d77f43cded09d6073fe9993788fbeb44642 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/s3.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/script_code.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/script_code.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f3267e85b2b81139dd9a79445a1a7ce1fff7020 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/script_code.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/semantic_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/semantic_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef0b12a3ab438383c229b97b4e078ccaa7c5ed34 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/semantic_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/semver.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/semver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d90075e3d79ec9d937f143609bbeadb73f866ec5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/semver.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/timezone_name.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/timezone_name.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab5d48977af0eb3a39c6c8fbe7f85938a04d5380 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/timezone_name.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/ulid.cpython-310.pyc b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/ulid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..763d917a8629152fa075650841eb9e05764d1b46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pydantic_extra_types/__pycache__/ulid.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/color.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/color.py new file mode 100644 index 0000000000000000000000000000000000000000..18d9633376b42b8bc4bd66cb816391ec9cf0ecac --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/color.py @@ -0,0 +1,598 @@ +"""Color definitions are used as per the CSS3 +[CSS Color Module Level 3](http://www.w3.org/TR/css3-color/#svg-color) specification. + +A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`. + +In these cases the _last_ color when sorted alphabetically takes preferences, +eg. `Color((0, 255, 255)).as_named() == 'cyan'` because "cyan" comes after "aqua". +""" + +from __future__ import annotations + +import math +import re +from colorsys import hls_to_rgb, rgb_to_hls +from typing import Any, Callable, Literal, Tuple, Union, cast + +from pydantic import GetJsonSchemaHandler +from pydantic._internal import _repr +from pydantic.json_schema import JsonSchemaValue +from pydantic_core import CoreSchema, PydanticCustomError, core_schema + +ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]] +ColorType = Union[ColorTuple, str, 'Color'] +HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]] + + +class RGBA: + """Internal use only as a representation of a color.""" + + __slots__ = 'r', 'g', 'b', 'alpha', '_tuple' + + def __init__(self, r: float, g: float, b: float, alpha: float | None): + self.r = r + self.g = g + self.b = b + self.alpha = alpha + + self._tuple: tuple[float, float, float, float | None] = (r, g, b, alpha) + + def __getitem__(self, item: Any) -> Any: + return self._tuple[item] + + +# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached +_r_255 = r'(\d{1,3}(?:\.\d+)?)' +_r_comma = r'\s*,\s*' +_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)' +_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?' +_r_sl = r'(\d{1,3}(?:\.\d+)?)%' +r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*' +r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*' +# CSS3 RGB examples: rgb(0, 0, 0), rgba(0, 0, 0, 0.5), rgba(0, 0, 0, 50%) +r_rgb = rf'\s*rgba?\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}(?:{_r_comma}{_r_alpha})?\s*\)\s*' +# CSS3 HSL examples: hsl(270, 60%, 50%), hsla(270, 60%, 50%, 0.5), hsla(270, 60%, 50%, 50%) +r_hsl = rf'\s*hsla?\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}(?:{_r_comma}{_r_alpha})?\s*\)\s*' +# CSS4 RGB examples: rgb(0 0 0), rgb(0 0 0 / 0.5), rgb(0 0 0 / 50%), rgba(0 0 0 / 50%) +r_rgb_v4_style = rf'\s*rgba?\(\s*{_r_255}\s+{_r_255}\s+{_r_255}(?:\s*/\s*{_r_alpha})?\s*\)\s*' +# CSS4 HSL examples: hsl(270 60% 50%), hsl(270 60% 50% / 0.5), hsl(270 60% 50% / 50%), hsla(270 60% 50% / 50%) +r_hsl_v4_style = rf'\s*hsla?\(\s*{_r_h}\s+{_r_sl}\s+{_r_sl}(?:\s*/\s*{_r_alpha})?\s*\)\s*' + +# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used +repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'} +rads = 2 * math.pi + + +class Color(_repr.Representation): + """Represents a color.""" + + __slots__ = '_original', '_rgba' + + def __init__(self, value: ColorType) -> None: + self._rgba: RGBA + self._original: ColorType + if isinstance(value, (tuple, list)): + self._rgba = parse_tuple(value) + elif isinstance(value, str): + self._rgba = parse_str(value) + elif isinstance(value, Color): + self._rgba = value._rgba + value = value._original + else: + raise PydanticCustomError( + 'color_error', + 'value is not a valid color: value must be a tuple, list or string', + ) + + # if we've got here value must be a valid color + self._original = value + + @classmethod + def __get_pydantic_json_schema__( + cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + field_schema: dict[str, Any] = {} + field_schema.update(type='string', format='color') + return field_schema + + def original(self) -> ColorType: + """Original value passed to `Color`.""" + return self._original + + def as_named(self, *, fallback: bool = False) -> str: + """Returns the name of the color if it can be found in `COLORS_BY_VALUE` dictionary, + otherwise returns the hexadecimal representation of the color or raises `ValueError`. + + Args: + fallback: If True, falls back to returning the hexadecimal representation of + the color instead of raising a ValueError when no named color is found. + + Returns: + The name of the color, or the hexadecimal representation of the color. + + Raises: + ValueError: When no named color is found and fallback is `False`. + """ + if self._rgba.alpha is not None: + return self.as_hex() + rgb = cast('tuple[int, int, int]', self.as_rgb_tuple()) + + if rgb in COLORS_BY_VALUE: + return COLORS_BY_VALUE[rgb] + else: + if fallback: + return self.as_hex() + else: + raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') + + def as_hex(self, format: Literal['short', 'long'] = 'short') -> str: + """Returns the hexadecimal representation of the color. + + Hex string representing the color can be 3, 4, 6, or 8 characters depending on whether the string + a "short" representation of the color is possible and whether there's an alpha channel. + + Returns: + The hexadecimal representation of the color. + """ + values = [float_to_255(c) for c in self._rgba[:3]] + if self._rgba.alpha is not None: + values.append(float_to_255(self._rgba.alpha)) + + as_hex = ''.join(f'{v:02x}' for v in values) + if format == 'short' and all(c in repeat_colors for c in values): + as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2)) + return f'#{as_hex}' + + def as_rgb(self) -> str: + """Color as an `rgb(, , )` or `rgba(, , , )` string.""" + if self._rgba.alpha is None: + return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})' + else: + return ( + f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, ' + f'{round(self._alpha_float(), 2)})' + ) + + def as_rgb_tuple(self, *, alpha: bool | None = None) -> ColorTuple: + """Returns the color as an RGB or RGBA tuple. + + Args: + alpha: Whether to include the alpha channel. There are three options for this input: + + - `None` (default): Include alpha only if it's set. (e.g. not `None`) + - `True`: Always include alpha. + - `False`: Always omit alpha. + + Returns: + A tuple that contains the values of the red, green, and blue channels in the range 0 to 255. + If alpha is included, it is in the range 0 to 1. + """ + r, g, b = (float_to_255(c) for c in self._rgba[:3]) + if alpha is None and self._rgba.alpha is None or alpha is not None and not alpha: + return r, g, b + else: + return r, g, b, self._alpha_float() + + def as_hsl(self) -> str: + """Color as an `hsl(, , )` or `hsl(, , , )` string.""" + if self._rgba.alpha is None: + h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore + return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})' + else: + h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore + return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})' + + def as_hsl_tuple(self, *, alpha: bool | None = None) -> HslColorTuple: + """Returns the color as an HSL or HSLA tuple. + + Args: + alpha: Whether to include the alpha channel. + + - `None` (default): Include the alpha channel only if it's set (e.g. not `None`). + - `True`: Always include alpha. + - `False`: Always omit alpha. + + Returns: + The color as a tuple of hue, saturation, lightness, and alpha (if included). + All elements are in the range 0 to 1. + + Note: + This is HSL as used in HTML and most other places, not HLS as used in Python's `colorsys`. + """ + h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b) + if alpha is None: + if self._rgba.alpha is None: + return h, s, l + else: + return h, s, l, self._alpha_float() + return (h, s, l, self._alpha_float()) if alpha else (h, s, l) + + def _alpha_float(self) -> float: + return 1 if self._rgba.alpha is None else self._rgba.alpha + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: Callable[[Any], CoreSchema] + ) -> core_schema.CoreSchema: + return core_schema.with_info_plain_validator_function( + cls._validate, serialization=core_schema.to_string_ser_schema() + ) + + @classmethod + def _validate(cls, __input_value: Any, _: Any) -> Color: + return cls(__input_value) + + def __str__(self) -> str: + return self.as_named(fallback=True) + + def __repr_args__(self) -> _repr.ReprArgs: + return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Color) and self.as_rgb_tuple() == other.as_rgb_tuple() + + def __hash__(self) -> int: + return hash(self.as_rgb_tuple()) + + +def parse_tuple(value: tuple[Any, ...]) -> RGBA: + """Parse a tuple or list to get RGBA values. + + Args: + value: A tuple or list. + + Returns: + An `RGBA` tuple parsed from the input tuple. + + Raises: + PydanticCustomError: If tuple is not valid. + """ + if len(value) == 3: + r, g, b = (parse_color_value(v) for v in value) + return RGBA(r, g, b, None) + elif len(value) == 4: + r, g, b = (parse_color_value(v) for v in value[:3]) + return RGBA(r, g, b, parse_float_alpha(value[3])) + else: + raise PydanticCustomError('color_error', 'value is not a valid color: tuples must have length 3 or 4') + + +def parse_str(value: str) -> RGBA: + """Parse a string representing a color to an RGBA tuple. + + Possible formats for the input string include: + + * named color, see `COLORS_BY_NAME` + * hex short eg. `fff` (prefix can be `#`, `0x` or nothing) + * hex long eg. `ffffff` (prefix can be `#`, `0x` or nothing) + * `rgb(, , )` + * `rgba(, , , )` + * `transparent` + + Args: + value: A string representing a color. + + Returns: + An `RGBA` tuple parsed from the input string. + + Raises: + ValueError: If the input string cannot be parsed to an RGBA tuple. + """ + value_lower = value.lower() + if value_lower in COLORS_BY_NAME: + r, g, b = COLORS_BY_NAME[value_lower] + return ints_to_rgba(r, g, b, None) + + m = re.fullmatch(r_hex_short, value_lower) + if m: + *rgb, a = m.groups() + r, g, b = (int(v * 2, 16) for v in rgb) + alpha = int(a * 2, 16) / 255 if a else None + return ints_to_rgba(r, g, b, alpha) + + m = re.fullmatch(r_hex_long, value_lower) + if m: + *rgb, a = m.groups() + r, g, b = (int(v, 16) for v in rgb) + alpha = int(a, 16) / 255 if a else None + return ints_to_rgba(r, g, b, alpha) + + m = re.fullmatch(r_rgb, value_lower) or re.fullmatch(r_rgb_v4_style, value_lower) + if m: + return ints_to_rgba(*m.groups()) # type: ignore + + m = re.fullmatch(r_hsl, value_lower) or re.fullmatch(r_hsl_v4_style, value_lower) + if m: + return parse_hsl(*m.groups()) # type: ignore + + if value_lower == 'transparent': + return RGBA(0, 0, 0, 0) + + raise PydanticCustomError( + 'color_error', + 'value is not a valid color: string not recognised as a valid color', + ) + + +def ints_to_rgba( + r: int | str, + g: int | str, + b: int | str, + alpha: float | None = None, +) -> RGBA: + """Converts integer or string values for RGB color and an optional alpha value to an `RGBA` object. + + Args: + r: An integer or string representing the red color value. + g: An integer or string representing the green color value. + b: An integer or string representing the blue color value. + alpha: A float representing the alpha value. Defaults to None. + + Returns: + An instance of the `RGBA` class with the corresponding color and alpha values. + """ + return RGBA( + parse_color_value(r), + parse_color_value(g), + parse_color_value(b), + parse_float_alpha(alpha), + ) + + +def parse_color_value(value: int | str, max_val: int = 255) -> float: + """Parse the color value provided and return a number between 0 and 1. + + Args: + value: An integer or string color value. + max_val: Maximum range value. Defaults to 255. + + Raises: + PydanticCustomError: If the value is not a valid color. + + Returns: + A number between 0 and 1. + """ + try: + color = float(value) + except (ValueError, TypeError) as e: + raise PydanticCustomError( + 'color_error', + 'value is not a valid color: color values must be a valid number', + ) from e + if 0 <= color <= max_val: + return color / max_val + else: + raise PydanticCustomError( + 'color_error', + 'value is not a valid color: color values must be in the range 0 to {max_val}', + {'max_val': max_val}, + ) + + +def parse_float_alpha(value: None | str | float | int) -> float | None: + """Parse an alpha value checking it's a valid float in the range 0 to 1. + + Args: + value: The input value to parse. + + Returns: + The parsed value as a float, or `None` if the value was None or equal 1. + + Raises: + PydanticCustomError: If the input value cannot be successfully parsed as a float in the expected range. + """ + if value is None: + return None + try: + if isinstance(value, str) and value.endswith('%'): + alpha = float(value[:-1]) / 100 + else: + alpha = float(value) + except ValueError as e: + raise PydanticCustomError( + 'color_error', + 'value is not a valid color: alpha values must be a valid float', + ) from e + + if math.isclose(alpha, 1): + return None + elif 0 <= alpha <= 1: + return alpha + else: + raise PydanticCustomError( + 'color_error', + 'value is not a valid color: alpha values must be in the range 0 to 1', + ) + + +def parse_hsl(h: str, h_units: str, sat: str, light: str, alpha: float | None = None) -> RGBA: + """Parse raw hue, saturation, lightness, and alpha values and convert to RGBA. + + Args: + h: The hue value. + h_units: The unit for hue value. + sat: The saturation value. + light: The lightness value. + alpha: Alpha value. + + Returns: + An instance of `RGBA`. + """ + s_value, l_value = parse_color_value(sat, 100), parse_color_value(light, 100) + + h_value = float(h) + if h_units in {None, 'deg'}: + h_value = h_value % 360 / 360 + elif h_units == 'rad': + h_value = h_value % rads / rads + else: + # turns + h_value %= 1 + + r, g, b = hls_to_rgb(h_value, l_value, s_value) + return RGBA(r, g, b, parse_float_alpha(alpha)) + + +def float_to_255(c: float) -> int: + """Converts a float value between 0 and 1 (inclusive) to an integer between 0 and 255 (inclusive). + + Args: + c: The float value to be converted. Must be between 0 and 1 (inclusive). + + Returns: + The integer equivalent of the given float value rounded to the nearest whole number. + """ + return round(c * 255) + + +COLORS_BY_NAME = { + 'aliceblue': (240, 248, 255), + 'antiquewhite': (250, 235, 215), + 'aqua': (0, 255, 255), + 'aquamarine': (127, 255, 212), + 'azure': (240, 255, 255), + 'beige': (245, 245, 220), + 'bisque': (255, 228, 196), + 'black': (0, 0, 0), + 'blanchedalmond': (255, 235, 205), + 'blue': (0, 0, 255), + 'blueviolet': (138, 43, 226), + 'brown': (165, 42, 42), + 'burlywood': (222, 184, 135), + 'cadetblue': (95, 158, 160), + 'chartreuse': (127, 255, 0), + 'chocolate': (210, 105, 30), + 'coral': (255, 127, 80), + 'cornflowerblue': (100, 149, 237), + 'cornsilk': (255, 248, 220), + 'crimson': (220, 20, 60), + 'cyan': (0, 255, 255), + 'darkblue': (0, 0, 139), + 'darkcyan': (0, 139, 139), + 'darkgoldenrod': (184, 134, 11), + 'darkgray': (169, 169, 169), + 'darkgreen': (0, 100, 0), + 'darkgrey': (169, 169, 169), + 'darkkhaki': (189, 183, 107), + 'darkmagenta': (139, 0, 139), + 'darkolivegreen': (85, 107, 47), + 'darkorange': (255, 140, 0), + 'darkorchid': (153, 50, 204), + 'darkred': (139, 0, 0), + 'darksalmon': (233, 150, 122), + 'darkseagreen': (143, 188, 143), + 'darkslateblue': (72, 61, 139), + 'darkslategray': (47, 79, 79), + 'darkslategrey': (47, 79, 79), + 'darkturquoise': (0, 206, 209), + 'darkviolet': (148, 0, 211), + 'deeppink': (255, 20, 147), + 'deepskyblue': (0, 191, 255), + 'dimgray': (105, 105, 105), + 'dimgrey': (105, 105, 105), + 'dodgerblue': (30, 144, 255), + 'firebrick': (178, 34, 34), + 'floralwhite': (255, 250, 240), + 'forestgreen': (34, 139, 34), + 'fuchsia': (255, 0, 255), + 'gainsboro': (220, 220, 220), + 'ghostwhite': (248, 248, 255), + 'gold': (255, 215, 0), + 'goldenrod': (218, 165, 32), + 'gray': (128, 128, 128), + 'green': (0, 128, 0), + 'greenyellow': (173, 255, 47), + 'grey': (128, 128, 128), + 'honeydew': (240, 255, 240), + 'hotpink': (255, 105, 180), + 'indianred': (205, 92, 92), + 'indigo': (75, 0, 130), + 'ivory': (255, 255, 240), + 'khaki': (240, 230, 140), + 'lavender': (230, 230, 250), + 'lavenderblush': (255, 240, 245), + 'lawngreen': (124, 252, 0), + 'lemonchiffon': (255, 250, 205), + 'lightblue': (173, 216, 230), + 'lightcoral': (240, 128, 128), + 'lightcyan': (224, 255, 255), + 'lightgoldenrodyellow': (250, 250, 210), + 'lightgray': (211, 211, 211), + 'lightgreen': (144, 238, 144), + 'lightgrey': (211, 211, 211), + 'lightpink': (255, 182, 193), + 'lightsalmon': (255, 160, 122), + 'lightseagreen': (32, 178, 170), + 'lightskyblue': (135, 206, 250), + 'lightslategray': (119, 136, 153), + 'lightslategrey': (119, 136, 153), + 'lightsteelblue': (176, 196, 222), + 'lightyellow': (255, 255, 224), + 'lime': (0, 255, 0), + 'limegreen': (50, 205, 50), + 'linen': (250, 240, 230), + 'magenta': (255, 0, 255), + 'maroon': (128, 0, 0), + 'mediumaquamarine': (102, 205, 170), + 'mediumblue': (0, 0, 205), + 'mediumorchid': (186, 85, 211), + 'mediumpurple': (147, 112, 219), + 'mediumseagreen': (60, 179, 113), + 'mediumslateblue': (123, 104, 238), + 'mediumspringgreen': (0, 250, 154), + 'mediumturquoise': (72, 209, 204), + 'mediumvioletred': (199, 21, 133), + 'midnightblue': (25, 25, 112), + 'mintcream': (245, 255, 250), + 'mistyrose': (255, 228, 225), + 'moccasin': (255, 228, 181), + 'navajowhite': (255, 222, 173), + 'navy': (0, 0, 128), + 'oldlace': (253, 245, 230), + 'olive': (128, 128, 0), + 'olivedrab': (107, 142, 35), + 'orange': (255, 165, 0), + 'orangered': (255, 69, 0), + 'orchid': (218, 112, 214), + 'palegoldenrod': (238, 232, 170), + 'palegreen': (152, 251, 152), + 'paleturquoise': (175, 238, 238), + 'palevioletred': (219, 112, 147), + 'papayawhip': (255, 239, 213), + 'peachpuff': (255, 218, 185), + 'peru': (205, 133, 63), + 'pink': (255, 192, 203), + 'plum': (221, 160, 221), + 'powderblue': (176, 224, 230), + 'purple': (128, 0, 128), + 'red': (255, 0, 0), + 'rosybrown': (188, 143, 143), + 'royalblue': (65, 105, 225), + 'saddlebrown': (139, 69, 19), + 'salmon': (250, 128, 114), + 'sandybrown': (244, 164, 96), + 'seagreen': (46, 139, 87), + 'seashell': (255, 245, 238), + 'sienna': (160, 82, 45), + 'silver': (192, 192, 192), + 'skyblue': (135, 206, 235), + 'slateblue': (106, 90, 205), + 'slategray': (112, 128, 144), + 'slategrey': (112, 128, 144), + 'snow': (255, 250, 250), + 'springgreen': (0, 255, 127), + 'steelblue': (70, 130, 180), + 'tan': (210, 180, 140), + 'teal': (0, 128, 128), + 'thistle': (216, 191, 216), + 'tomato': (255, 99, 71), + 'turquoise': (64, 224, 208), + 'violet': (238, 130, 238), + 'wheat': (245, 222, 179), + 'white': (255, 255, 255), + 'whitesmoke': (245, 245, 245), + 'yellow': (255, 255, 0), + 'yellowgreen': (154, 205, 50), +} + +COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()} diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/coordinate.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/coordinate.py new file mode 100644 index 0000000000000000000000000000000000000000..a709eb22162654e497a246f8cd01e76277c2117e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/coordinate.py @@ -0,0 +1,180 @@ +"""The `pydantic_extra_types.coordinate` module provides the [`Latitude`][pydantic_extra_types.coordinate.Latitude], +[`Longitude`][pydantic_extra_types.coordinate.Longitude], and +[`Coordinate`][pydantic_extra_types.coordinate.Coordinate] data types. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from decimal import Decimal +from typing import Any, ClassVar, Tuple, Union + +from pydantic import GetCoreSchemaHandler +from pydantic._internal import _repr +from pydantic_core import ArgsKwargs, PydanticCustomError, core_schema + +LatitudeType = Union[float, Decimal] +LongitudeType = Union[float, Decimal] +CoordinateType = Tuple[LatitudeType, LongitudeType] + + +class Latitude(float): + """Latitude value should be between -90 and 90, inclusive. + + Supports both float and Decimal types. + + ```py + from decimal import Decimal + from pydantic import BaseModel + from pydantic_extra_types.coordinate import Latitude + + + class Location(BaseModel): + latitude: Latitude + + + # Using float + location1 = Location(latitude=41.40338) + # Using Decimal + location2 = Location(latitude=Decimal('41.40338')) + ``` + """ + + min: ClassVar[float] = -90.00 + max: ClassVar[float] = 90.00 + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.union_schema( + [ + core_schema.float_schema(ge=cls.min, le=cls.max), + core_schema.decimal_schema(ge=Decimal(cls.min), le=Decimal(cls.max)), + ] + ) + + +class Longitude(float): + """Longitude value should be between -180 and 180, inclusive. + + Supports both float and Decimal types. + + ```py + from decimal import Decimal + from pydantic import BaseModel + + from pydantic_extra_types.coordinate import Longitude + + + class Location(BaseModel): + longitude: Longitude + + + # Using float + location1 = Location(longitude=2.17403) + # Using Decimal + location2 = Location(longitude=Decimal('2.17403')) + ``` + """ + + min: ClassVar[float] = -180.00 + max: ClassVar[float] = 180.00 + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.union_schema( + [ + core_schema.float_schema(ge=cls.min, le=cls.max), + core_schema.decimal_schema(ge=Decimal(cls.min), le=Decimal(cls.max)), + ] + ) + + +@dataclass +class Coordinate(_repr.Representation): + """Coordinate parses Latitude and Longitude. + + You can use the `Coordinate` data type for storing coordinates. Coordinates can be + defined using one of the following formats: + + 1. Tuple: `(Latitude, Longitude)`. For example: `(41.40338, 2.17403)` or `(Decimal('41.40338'), Decimal('2.17403'))`. + 2. `Coordinate` instance: `Coordinate(latitude=Latitude, longitude=Longitude)`. + + ```py + from decimal import Decimal + from pydantic import BaseModel + + from pydantic_extra_types.coordinate import Coordinate + + + class Location(BaseModel): + coordinate: Coordinate + + + # Using float values + location1 = Location(coordinate=(41.40338, 2.17403)) + # > coordinate=Coordinate(latitude=41.40338, longitude=2.17403) + + # Using Decimal values + location2 = Location(coordinate=(Decimal('41.40338'), Decimal('2.17403'))) + # > coordinate=Coordinate(latitude=41.40338, longitude=2.17403) + ``` + """ + + _NULL_ISLAND: ClassVar[Tuple[float, float]] = (0.0, 0.0) + + latitude: Latitude + longitude: Longitude + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + schema_chain = [ + core_schema.no_info_wrap_validator_function(cls._parse_str, core_schema.str_schema()), + core_schema.no_info_wrap_validator_function( + cls._parse_tuple, + handler.generate_schema(CoordinateType), + ), + handler(source), + ] + + chain_length = len(schema_chain) + chain_schemas = [core_schema.chain_schema(schema_chain[x:]) for x in range(chain_length - 1, -1, -1)] + return core_schema.no_info_wrap_validator_function( + cls._parse_args, + core_schema.union_schema(chain_schemas), # type: ignore[arg-type] + ) + + @classmethod + def _parse_args(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Any: + if isinstance(value, ArgsKwargs) and not value.kwargs: + n_args = len(value.args) + if n_args == 0: + value = cls._NULL_ISLAND + elif n_args == 1: + value = value.args[0] + return handler(value) + + @classmethod + def _parse_str(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Any: + if not isinstance(value, str): + return value + try: + value = tuple(float(x) for x in value.split(',')) + except ValueError as e: + raise PydanticCustomError( + 'coordinate_error', + 'value is not a valid coordinate: string is not recognized as a valid coordinate', + ) from e + return ArgsKwargs(args=value) + + @classmethod + def _parse_tuple(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Any: + return ArgsKwargs(args=handler(value)) if isinstance(value, tuple) else value + + def __str__(self) -> str: + return f'{self.latitude},{self.longitude}' + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Coordinate) and self.latitude == other.latitude and self.longitude == other.longitude + + def __hash__(self) -> int: + return hash((self.latitude, self.longitude)) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/country.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/country.py new file mode 100644 index 0000000000000000000000000000000000000000..38d6e10ef1b00645ddd1f83afb990b9730c02599 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/country.py @@ -0,0 +1,288 @@ +"""Country definitions that are based on the [ISO 3166](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes).""" + +from __future__ import annotations + +from dataclasses import dataclass +from functools import lru_cache +from typing import Any + +from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + +try: + import pycountry +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `country` module requires "pycountry" to be installed. You can install it with "pip install pycountry".' + ) from e + + +@dataclass +class CountryInfo: + alpha2: str + alpha3: str + numeric_code: str + short_name: str + + +@lru_cache +def _countries() -> list[CountryInfo]: + return [ + CountryInfo( + alpha2=country.alpha_2, + alpha3=country.alpha_3, + numeric_code=country.numeric, + short_name=country.name, + ) + for country in pycountry.countries + ] + + +@lru_cache +def _index_by_alpha2() -> dict[str, CountryInfo]: + return {country.alpha2: country for country in _countries()} + + +@lru_cache +def _index_by_alpha3() -> dict[str, CountryInfo]: + return {country.alpha3: country for country in _countries()} + + +@lru_cache +def _index_by_numeric_code() -> dict[str, CountryInfo]: + return {country.numeric_code: country for country in _countries()} + + +@lru_cache +def _index_by_short_name() -> dict[str, CountryInfo]: + return {country.short_name: country for country in _countries()} + + +class CountryAlpha2(str): + """CountryAlpha2 parses country codes in the [ISO 3166-1 alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.country import CountryAlpha2 + + + class Product(BaseModel): + made_in: CountryAlpha2 + + + product = Product(made_in='ES') + print(product) + # > made_in='ES' + ``` + """ + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> CountryAlpha2: + if __input_value not in _index_by_alpha2(): + raise PydanticCustomError('country_alpha2', 'Invalid country alpha2 code') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(to_upper=True), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + json_schema = handler(schema) + json_schema.update({'pattern': r'^\w{2}$'}) + return json_schema + + @property + def alpha3(self) -> str: + """The country code in the [ISO 3166-1 alpha-3](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) format.""" + return _index_by_alpha2()[self].alpha3 + + @property + def numeric_code(self) -> str: + """The country code in the [ISO 3166-1 numeric](https://en.wikipedia.org/wiki/ISO_3166-1_numeric) format.""" + return _index_by_alpha2()[self].numeric_code + + @property + def short_name(self) -> str: + """The country short name.""" + return _index_by_alpha2()[self].short_name + + +class CountryAlpha3(str): + """CountryAlpha3 parses country codes in the [ISO 3166-1 alpha-3](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.country import CountryAlpha3 + + + class Product(BaseModel): + made_in: CountryAlpha3 + + + product = Product(made_in='USA') + print(product) + # > made_in='USA' + ``` + """ + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> CountryAlpha3: + if __input_value not in _index_by_alpha3(): + raise PydanticCustomError('country_alpha3', 'Invalid country alpha3 code') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(to_upper=True), + serialization=core_schema.to_string_ser_schema(), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + json_schema = handler(schema) + json_schema.update({'pattern': r'^\w{3}$'}) + return json_schema + + @property + def alpha2(self) -> str: + """The country code in the [ISO 3166-1 alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) format.""" + return _index_by_alpha3()[self].alpha2 + + @property + def numeric_code(self) -> str: + """The country code in the [ISO 3166-1 numeric](https://en.wikipedia.org/wiki/ISO_3166-1_numeric) format.""" + return _index_by_alpha3()[self].numeric_code + + @property + def short_name(self) -> str: + """The country short name.""" + return _index_by_alpha3()[self].short_name + + +class CountryNumericCode(str): + """CountryNumericCode parses country codes in the + [ISO 3166-1 numeric](https://en.wikipedia.org/wiki/ISO_3166-1_numeric) format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.country import CountryNumericCode + + + class Product(BaseModel): + made_in: CountryNumericCode + + + product = Product(made_in='840') + print(product) + # > made_in='840' + ``` + """ + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> CountryNumericCode: + if __input_value not in _index_by_numeric_code(): + raise PydanticCustomError('country_numeric_code', 'Invalid country numeric code') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(to_upper=True), + serialization=core_schema.to_string_ser_schema(), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + json_schema = handler(schema) + json_schema.update({'pattern': r'^[0-9]{3}$'}) + return json_schema + + @property + def alpha2(self) -> str: + """The country code in the [ISO 3166-1 alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) format.""" + return _index_by_numeric_code()[self].alpha2 + + @property + def alpha3(self) -> str: + """The country code in the [ISO 3166-1 alpha-3](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) format.""" + return _index_by_numeric_code()[self].alpha3 + + @property + def short_name(self) -> str: + """The country short name.""" + return _index_by_numeric_code()[self].short_name + + +class CountryShortName(str): + """CountryShortName parses country codes in the short name format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.country import CountryShortName + + + class Product(BaseModel): + made_in: CountryShortName + + + product = Product(made_in='United States') + print(product) + # > made_in='United States' + ``` + """ + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> CountryShortName: + if __input_value not in _index_by_short_name(): + raise PydanticCustomError('country_short_name', 'Invalid country short name') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(), + serialization=core_schema.to_string_ser_schema(), + ) + + @property + def alpha2(self) -> str: + """The country code in the [ISO 3166-1 alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) format.""" + return _index_by_short_name()[self].alpha2 + + @property + def alpha3(self) -> str: + """The country code in the [ISO 3166-1 alpha-3](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3) format.""" + return _index_by_short_name()[self].alpha3 + + @property + def numeric_code(self) -> str: + """The country code in the [ISO 3166-1 numeric](https://en.wikipedia.org/wiki/ISO_3166-1_numeric) format.""" + return _index_by_short_name()[self].numeric_code diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/currency_code.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/currency_code.py new file mode 100644 index 0000000000000000000000000000000000000000..47dd08b3e860559b2aa72611617698df44d331b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/currency_code.py @@ -0,0 +1,180 @@ +"""Currency definitions that are based on the [ISO4217](https://en.wikipedia.org/wiki/ISO_4217).""" + +from __future__ import annotations + +from typing import Any + +from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + +try: + import pycountry +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `currency_code` module requires "pycountry" to be installed. You can install it with "pip install ' + 'pycountry".' + ) from e + +# List of codes that should not be usually used within regular transactions +_CODES_FOR_BONDS_METAL_TESTING = { + 'XTS', # testing + 'XAU', # gold + 'XAG', # silver + 'XPD', # palladium + 'XPT', # platinum + 'XBA', # Bond Markets Unit European Composite Unit (EURCO) + 'XBB', # Bond Markets Unit European Monetary Unit (E.M.U.-6) + 'XBC', # Bond Markets Unit European Unit of Account 9 (E.U.A.-9) + 'XBD', # Bond Markets Unit European Unit of Account 17 (E.U.A.-17) + 'XXX', # no currency + 'XDR', # SDR (Special Drawing Right) +} + + +class ISO4217(str): + """ISO4217 parses Currency in the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.currency_code import ISO4217 + + + class Currency(BaseModel): + alpha_3: ISO4217 + + + currency = Currency(alpha_3='AED') + print(currency) + # > alpha_3='AED' + ``` + """ + + allowed_countries_list = [country.alpha_3 for country in pycountry.currencies] + allowed_currencies = set(allowed_countries_list) + + @classmethod + def _validate(cls, currency_code: str, _: core_schema.ValidationInfo) -> str: + """Validate a ISO 4217 language code from the provided str value. + + Args: + currency_code: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated ISO 4217 currency code. + + Raises: + PydanticCustomError: If the ISO 4217 currency code is not valid. + """ + currency_code = currency_code.upper() + if currency_code not in cls.allowed_currencies: + raise PydanticCustomError( + 'ISO4217', 'Invalid ISO 4217 currency code. See https://en.wikipedia.org/wiki/ISO_4217' + ) + return currency_code + + @classmethod + def __get_pydantic_core_schema__(cls, _: type[Any], __: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(min_length=3, max_length=3), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + json_schema = handler(schema) + json_schema.update({'enum': cls.allowed_countries_list}) + return json_schema + + +class Currency(str): + """Currency parses currency subset of the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) format. + It excludes bonds testing codes and precious metals. + ```py + from pydantic import BaseModel + + from pydantic_extra_types.currency_code import Currency + + + class currency(BaseModel): + alpha_3: Currency + + + cur = currency(alpha_3='AED') + print(cur) + # > alpha_3='AED' + ``` + """ + + allowed_countries_list = list( + filter(lambda x: x not in _CODES_FOR_BONDS_METAL_TESTING, ISO4217.allowed_countries_list) + ) + allowed_currencies = set(allowed_countries_list) + + @classmethod + def _validate(cls, currency_symbol: str, _: core_schema.ValidationInfo) -> str: + """Validate a subset of the [ISO4217](https://en.wikipedia.org/wiki/ISO_4217) format. + It excludes bonds testing codes and precious metals. + + Args: + currency_symbol: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated ISO 4217 currency code. + + Raises: + PydanticCustomError: If the ISO 4217 currency code is not valid or is bond, precious metal or testing code. + """ + currency_symbol = currency_symbol.upper() + if currency_symbol not in cls.allowed_currencies: + raise PydanticCustomError( + 'InvalidCurrency', + 'Invalid currency code.' + ' See https://en.wikipedia.org/wiki/ISO_4217 . ' + 'Bonds, testing and precious metals codes are not allowed.', + ) + return currency_symbol + + @classmethod + def __get_pydantic_core_schema__(cls, _: type[Any], __: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Return a Pydantic CoreSchema with the currency subset of the + [ISO4217](https://en.wikipedia.org/wiki/ISO_4217) format. + It excludes bonds testing codes and precious metals. + + Args: + _: The source type. + __: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the subset of the currency subset of the + [ISO4217](https://en.wikipedia.org/wiki/ISO_4217) format. + It excludes bonds testing codes and precious metals. + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(min_length=3, max_length=3), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + """Return a Pydantic JSON Schema with subset of the [ISO4217](https://en.wikipedia.org/wiki/ISO_4217) format. + Excluding bonds testing codes and precious metals. + + Args: + schema: The Pydantic CoreSchema. + handler: The handler to get the JSON Schema. + + Returns: + A Pydantic JSON Schema with the subset of the ISO4217 currency code validation. without bonds testing codes + and precious metals. + + """ + json_schema = handler(schema) + json_schema.update({'enum': cls.allowed_countries_list}) + return json_schema diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/domain.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/domain.py new file mode 100644 index 0000000000000000000000000000000000000000..b772979f1bc05c614897877440bcd0d36ec824f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/domain.py @@ -0,0 +1,59 @@ +"""The `domain_str` module provides the `DomainStr` data type. +This class depends on the `pydantic` package and implements custom validation for domain string format. +""" + +from __future__ import annotations + +import re +from typing import Any + +from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +class DomainStr(str): + """A string subclass with custom validation for domain string format.""" + + _domain_re_pattern = r'(?=^.{1,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(? str: + """Validate a domain name from the provided value. + + Args: + __input_value: The value to be validated. + _: The source type to be converted. + + Returns: + str: The parsed domain name. + + """ + return cls._validate(__input_value) + + @classmethod + def _validate(cls, v: Any) -> DomainStr: + if not isinstance(v, str): + raise PydanticCustomError('domain_type', 'Value must be a string') + + v = v.strip().lower() + if len(v) < 1 or len(v) > 253: + raise PydanticCustomError('domain_length', 'Domain must be between 1 and 253 characters') + + if not re.match(cls._domain_re_pattern, v): + raise PydanticCustomError('domain_format', 'Invalid domain format') + + return cls(v) + + @classmethod + def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.with_info_before_validator_function( + cls.validate, + core_schema.str_schema(), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetCoreSchemaHandler + ) -> dict[str, Any]: + # Cast the return value to dict[str, Any] + return dict(handler(schema)) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/epoch.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/epoch.py new file mode 100644 index 0000000000000000000000000000000000000000..7fb1da42c4c555a172e21e66197462eef401abe0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/epoch.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import datetime +from typing import Any, Callable + +import pydantic_core.core_schema +from pydantic import GetJsonSchemaHandler +from pydantic.json_schema import JsonSchemaValue +from pydantic_core import CoreSchema, core_schema + +EPOCH = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + + +class _Base(datetime.datetime): + TYPE: str = '' + SCHEMA: pydantic_core.core_schema.CoreSchema + + @classmethod + def __get_pydantic_json_schema__( + cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + field_schema: dict[str, Any] = {} + field_schema.update(type=cls.TYPE, format='date-time') + return field_schema + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: Callable[[Any], CoreSchema] + ) -> core_schema.CoreSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + cls.SCHEMA, + serialization=core_schema.wrap_serializer_function_ser_schema(cls._f, return_schema=cls.SCHEMA), + ) + + @classmethod + def _validate(cls, __input_value: Any, _: Any) -> datetime.datetime: + return EPOCH + datetime.timedelta(seconds=__input_value) + + @classmethod + def _f(cls, value: Any, serializer: Callable[[Any], Any]) -> Any: # pragma: no cover + raise NotImplementedError(cls) + + +class Number(_Base): + """epoch.Number parses unix timestamp as float and converts it to datetime. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types import epoch + + + class LogEntry(BaseModel): + timestamp: epoch.Number + + + logentry = LogEntry(timestamp=1.1) + print(logentry) + # > timestamp=datetime.datetime(1970, 1, 1, 0, 0, 1, 100000, tzinfo=datetime.timezone.utc) + ``` + """ + + TYPE = 'number' + SCHEMA = core_schema.float_schema() + + @classmethod + def _f(cls, value: Any, serializer: Callable[[float], float]) -> float: + ts = value.timestamp() + return serializer(ts) + + +class Integer(_Base): + """epoch.Integer parses unix timestamp as integer and converts it to datetime. + + ``` + ```py + from pydantic import BaseModel + + from pydantic_extra_types import epoch + + class LogEntry(BaseModel): + timestamp: epoch.Integer + + logentry = LogEntry(timestamp=1) + print(logentry) + #> timestamp=datetime.datetime(1970, 1, 1, 0, 0, 1, tzinfo=datetime.timezone.utc) + ``` + """ + + TYPE = 'integer' + SCHEMA = core_schema.int_schema() + + @classmethod + def _f(cls, value: Any, serializer: Callable[[int], int]) -> int: + ts = value.timestamp() + return serializer(int(ts)) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/isbn.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/isbn.py new file mode 100644 index 0000000000000000000000000000000000000000..8161c0d34a3fb589ba9473fd3e8fee7d77b9c9a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/isbn.py @@ -0,0 +1,146 @@ +"""The `pydantic_extra_types.isbn` module provides functionality to recieve and validate ISBN. + +ISBN (International Standard Book Number) is a numeric commercial book identifier which is intended to be unique. This module provides a ISBN type for Pydantic models. +""" + +from __future__ import annotations + +import itertools as it +from typing import Any + +from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +def isbn10_digit_calc(isbn: str) -> str: + """Calc a ISBN-10 last digit from the provided str value. More information of validation algorithm on [Wikipedia](https://en.wikipedia.org/wiki/ISBN#Check_digits) + + Args: + isbn: The str value representing the ISBN in 10 digits. + + Returns: + The calculated last digit of the ISBN-10 value. + """ + total = sum(int(digit) * (10 - idx) for idx, digit in enumerate(isbn[:9])) + diff = (11 - total) % 11 + valid_check_digit = 'X' if diff == 10 else str(diff) + return valid_check_digit + + +def isbn13_digit_calc(isbn: str) -> str: + """Calc a ISBN-13 last digit from the provided str value. More information of validation algorithm on [Wikipedia](https://en.wikipedia.org/wiki/ISBN#Check_digits) + + Args: + isbn: The str value representing the ISBN in 13 digits. + + Returns: + The calculated last digit of the ISBN-13 value. + """ + total = sum(int(digit) * factor for digit, factor in zip(isbn[:12], it.cycle((1, 3)))) + + check_digit = (10 - total) % 10 + + return str(check_digit) + + +class ISBN(str): + """Represents a ISBN and provides methods for conversion, validation, and serialization. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.isbn import ISBN + + + class Book(BaseModel): + isbn: ISBN + + + book = Book(isbn='8537809667') + print(book) + # > isbn='9788537809662' + ``` + """ + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Return a Pydantic CoreSchema with the ISBN validation. + + Args: + source: The source type to be converted. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the ISBN validation. + + """ + return core_schema.with_info_before_validator_function( + cls._validate, + core_schema.str_schema(), + ) + + @classmethod + def _validate(cls, __input_value: str, _: Any) -> str: + """Validate a ISBN from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The source type to be converted. + + Returns: + The validated ISBN. + + Raises: + PydanticCustomError: If the ISBN is not valid. + """ + cls.validate_isbn_format(__input_value) + + return cls.convert_isbn10_to_isbn13(__input_value) + + @staticmethod + def validate_isbn_format(value: str) -> None: + """Validate a ISBN format from the provided str value. + + Args: + value: The str value representing the ISBN in 10 or 13 digits. + + Raises: + PydanticCustomError: If the ISBN is not valid. + """ + isbn_length = len(value) + + if isbn_length not in (10, 13): + raise PydanticCustomError('isbn_length', f'Length for ISBN must be 10 or 13 digits, not {isbn_length}') + + if isbn_length == 10: + if not value[:-1].isdigit() or ((value[-1] != 'X') and (not value[-1].isdigit())): + raise PydanticCustomError('isbn10_invalid_characters', 'First 9 digits of ISBN-10 must be integers') + if isbn10_digit_calc(value) != value[-1]: + raise PydanticCustomError('isbn_invalid_digit_check_isbn10', 'Provided digit is invalid for given ISBN') + + if isbn_length == 13: + if not value.isdigit(): + raise PydanticCustomError('isbn13_invalid_characters', 'All digits of ISBN-13 must be integers') + if value[:3] not in ('978', '979'): + raise PydanticCustomError( + 'isbn_invalid_early_characters', 'The first 3 digits of ISBN-13 must be 978 or 979' + ) + if isbn13_digit_calc(value) != value[-1]: + raise PydanticCustomError('isbn_invalid_digit_check_isbn13', 'Provided digit is invalid for given ISBN') + + @staticmethod + def convert_isbn10_to_isbn13(value: str) -> str: + """Convert an ISBN-10 to ISBN-13. + + Args: + value: The ISBN-10 value to be converted. + + Returns: + The converted ISBN or the original value if no conversion is necessary. + """ + if len(value) == 10: + base_isbn = f'978{value[:-1]}' + isbn13_digit = isbn13_digit_calc(base_isbn) + return ISBN(f'{base_isbn}{isbn13_digit}') + + return ISBN(value) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/language_code.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/language_code.py new file mode 100644 index 0000000000000000000000000000000000000000..4c725a5576b43aa1eb9cded165c37759b19af3cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/language_code.py @@ -0,0 +1,379 @@ +"""Language definitions that are based on the [ISO 639-3](https://en.wikipedia.org/wiki/ISO_639-3) & [ISO 639-5](https://en.wikipedia.org/wiki/ISO_639-5).""" + +from __future__ import annotations + +from dataclasses import dataclass +from functools import lru_cache +from typing import Any, Union + +from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + +try: + import pycountry +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `language_code` module requires "pycountry" to be installed.' + ' You can install it with "pip install pycountry".' + ) from e + + +@dataclass +class LanguageInfo: + """LanguageInfo is a dataclass that contains the language information. + + Args: + alpha2: The language code in the [ISO 639-1 alpha-2](https://en.wikipedia.org/wiki/ISO_639-1) format. + alpha3: The language code in the [ISO 639-3 alpha-3](https://en.wikipedia.org/wiki/ISO_639-3) format. + name: The language name. + """ + + alpha2: Union[str, None] + alpha3: str + name: str + + +@lru_cache +def _languages() -> list[LanguageInfo]: + """Return a list of LanguageInfo objects containing the language information. + + Returns: + A list of LanguageInfo objects containing the language information. + """ + return [ + LanguageInfo( + alpha2=getattr(language, 'alpha_2', None), + alpha3=language.alpha_3, + name=language.name, + ) + for language in pycountry.languages + ] + + +@lru_cache +def _index_by_alpha2() -> dict[str, LanguageInfo]: + """Return a dictionary with the language code in the [ISO 639-1 alpha-2](https://en.wikipedia.org/wiki/ISO_639-1) format as the key and the LanguageInfo object as the value.""" + return {language.alpha2: language for language in _languages() if language.alpha2 is not None} + + +@lru_cache +def _index_by_alpha3() -> dict[str, LanguageInfo]: + """Return a dictionary with the language code in the [ISO 639-3 alpha-3](https://en.wikipedia.org/wiki/ISO_639-3) format as the key and the LanguageInfo object as the value.""" + return {language.alpha3: language for language in _languages()} + + +@lru_cache +def _index_by_name() -> dict[str, LanguageInfo]: + """Return a dictionary with the language name as the key and the LanguageInfo object as the value.""" + return {language.name: language for language in _languages()} + + +class LanguageAlpha2(str): + """LanguageAlpha2 parses languages codes in the [ISO 639-1 alpha-2](https://en.wikipedia.org/wiki/ISO_639-1) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.language_code import LanguageAlpha2 + + + class Movie(BaseModel): + audio_lang: LanguageAlpha2 + subtitles_lang: LanguageAlpha2 + + + movie = Movie(audio_lang='de', subtitles_lang='fr') + print(movie) + # > audio_lang='de' subtitles_lang='fr' + ``` + """ + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> LanguageAlpha2: + """Validate a language code in the ISO 639-1 alpha-2 format from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated language code in the ISO 639-1 alpha-2 format. + """ + if __input_value not in _index_by_alpha2(): + raise PydanticCustomError('language_alpha2', 'Invalid language alpha2 code') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + """Return a Pydantic CoreSchema with the language code in the ISO 639-1 alpha-2 format validation. + + Args: + source: The source type. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the language code in the ISO 639-1 alpha-2 format validation. + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(to_lower=True), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + """Return a Pydantic JSON Schema with the language code in the ISO 639-1 alpha-2 format validation. + + Args: + schema: The Pydantic CoreSchema. + handler: The handler to get the JSON Schema. + + Returns: + A Pydantic JSON Schema with the language code in the ISO 639-1 alpha-2 format validation. + """ + json_schema = handler(schema) + json_schema.update({'pattern': r'^\w{2}$'}) + return json_schema + + @property + def alpha3(self) -> str: + """The language code in the [ISO 639-3 alpha-3](https://en.wikipedia.org/wiki/ISO_639-3) format.""" + return _index_by_alpha2()[self].alpha3 + + @property + def name(self) -> str: + """The language name.""" + return _index_by_alpha2()[self].name + + +class LanguageName(str): + """LanguageName parses languages names listed in the [ISO 639-3 standard](https://en.wikipedia.org/wiki/ISO_639-3) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.language_code import LanguageName + + + class Movie(BaseModel): + audio_lang: LanguageName + subtitles_lang: LanguageName + + + movie = Movie(audio_lang='Dutch', subtitles_lang='Mandarin Chinese') + print(movie) + # > audio_lang='Dutch' subtitles_lang='Mandarin Chinese' + ``` + """ + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> LanguageName: + """Validate a language name from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated language name. + """ + if __input_value not in _index_by_name(): + raise PydanticCustomError('language_name', 'Invalid language name') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + """Return a Pydantic CoreSchema with the language name validation. + + Args: + source: The source type. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the language name validation. + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(), + serialization=core_schema.to_string_ser_schema(), + ) + + @property + def alpha2(self) -> Union[str, None]: + """The language code in the [ISO 639-1 alpha-2](https://en.wikipedia.org/wiki/ISO_639-1) format. Does not exist for all languages.""" + return _index_by_name()[self].alpha2 + + @property + def alpha3(self) -> str: + """The language code in the [ISO 639-3 alpha-3](https://en.wikipedia.org/wiki/ISO_639-3) format.""" + return _index_by_name()[self].alpha3 + + +class ISO639_3(str): + """ISO639_3 parses Language in the [ISO 639-3 alpha-3](https://en.wikipedia.org/wiki/ISO_639-3_alpha-3) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.language_code import ISO639_3 + + + class Language(BaseModel): + alpha_3: ISO639_3 + + + lang = Language(alpha_3='ssr') + print(lang) + # > alpha_3='ssr' + ``` + """ + + allowed_values_list = [lang.alpha_3 for lang in pycountry.languages] + allowed_values = set(allowed_values_list) + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> ISO639_3: + """Validate a ISO 639-3 language code from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated ISO 639-3 language code. + + Raises: + PydanticCustomError: If the ISO 639-3 language code is not valid. + """ + if __input_value not in cls.allowed_values: + raise PydanticCustomError( + 'ISO649_3', 'Invalid ISO 639-3 language code. See https://en.wikipedia.org/wiki/ISO_639-3' + ) + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, _: type[Any], __: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + """Return a Pydantic CoreSchema with the ISO 639-3 language code validation. + + Args: + _: The source type. + __: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the ISO 639-3 language code validation. + + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(min_length=3, max_length=3), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + """Return a Pydantic JSON Schema with the ISO 639-3 language code validation. + + Args: + schema: The Pydantic CoreSchema. + handler: The handler to get the JSON Schema. + + Returns: + A Pydantic JSON Schema with the ISO 639-3 language code validation. + + """ + json_schema = handler(schema) + json_schema.update({'enum': cls.allowed_values_list}) + return json_schema + + +class ISO639_5(str): + """ISO639_5 parses Language in the [ISO 639-5 alpha-3](https://en.wikipedia.org/wiki/ISO_639-5_alpha-3) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.language_code import ISO639_5 + + + class Language(BaseModel): + alpha_3: ISO639_5 + + + lang = Language(alpha_3='gem') + print(lang) + # > alpha_3='gem' + ``` + """ + + allowed_values_list = [lang.alpha_3 for lang in pycountry.language_families] + allowed_values_list.sort() + allowed_values = set(allowed_values_list) + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> ISO639_5: + """Validate a ISO 639-5 language code from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated ISO 639-3 language code. + + Raises: + PydanticCustomError: If the ISO 639-5 language code is not valid. + """ + if __input_value not in cls.allowed_values: + raise PydanticCustomError( + 'ISO649_5', 'Invalid ISO 639-5 language code. See https://en.wikipedia.org/wiki/ISO_639-5' + ) + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, _: type[Any], __: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + """Return a Pydantic CoreSchema with the ISO 639-5 language code validation. + + Args: + _: The source type. + __: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the ISO 639-5 language code validation. + + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(min_length=3, max_length=3), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + """Return a Pydantic JSON Schema with the ISO 639-5 language code validation. + + Args: + schema: The Pydantic CoreSchema. + handler: The handler to get the JSON Schema. + + Returns: + A Pydantic JSON Schema with the ISO 639-5 language code validation. + + """ + json_schema = handler(schema) + json_schema.update({'enum': cls.allowed_values_list}) + return json_schema diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/mac_address.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/mac_address.py new file mode 100644 index 0000000000000000000000000000000000000000..52355a9e9babd6978d4a9735a18ced9f2c002c3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/mac_address.py @@ -0,0 +1,98 @@ +"""The MAC address module provides functionality to parse and validate MAC addresses in different +formats, such as IEEE 802 MAC-48, EUI-48, EUI-64, or a 20-octet format. +""" + +from __future__ import annotations + +from typing import Any + +from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +class MacAddress(str): + """Represents a MAC address and provides methods for conversion, validation, and serialization. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.mac_address import MacAddress + + + class Network(BaseModel): + mac_address: MacAddress + + + network = Network(mac_address='00:00:5e:00:53:01') + print(network) + # > mac_address='00:00:5e:00:53:01' + ``` + """ + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Return a Pydantic CoreSchema with the MAC address validation. + + Args: + source: The source type to be converted. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the MAC address validation. + + """ + return core_schema.with_info_before_validator_function( + cls._validate, + core_schema.str_schema(), + ) + + @classmethod + def _validate(cls, __input_value: str, _: Any) -> str: + """Validate a MAC Address from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The source type to be converted. + + Returns: + str: The parsed MAC address. + + """ + return cls.validate_mac_address(__input_value.encode()) + + @staticmethod + def validate_mac_address(value: bytes) -> str: + """Validate a MAC Address from the provided byte value.""" + string = value.decode() + if len(string) < 14: + raise PydanticCustomError( + 'mac_address_len', + 'Length for a {mac_address} MAC address must be {required_length}', + {'mac_address': string, 'required_length': 14}, + ) + for sep, partbytes in ((':', 2), ('-', 2), ('.', 4)): + if sep in string: + parts = string.split(sep) + if any(len(part) != partbytes for part in parts): + raise PydanticCustomError( + 'mac_address_format', + f'Must have the format xx{sep}xx{sep}xx{sep}xx{sep}xx{sep}xx', + ) + if len(parts) * partbytes // 2 not in (6, 8, 20): + raise PydanticCustomError( + 'mac_address_format', + 'Length for a {mac_address} MAC address must be {required_length}', + {'mac_address': string, 'required_length': (6, 8, 20)}, + ) + mac_address = [] + for part in parts: + for idx in range(0, partbytes, 2): + try: + byte_value = int(part[idx : idx + 2], 16) + except ValueError as exc: + raise PydanticCustomError('mac_address_format', 'Unrecognized format') from exc + else: + mac_address.append(byte_value) + return ':'.join(f'{b:02x}' for b in mac_address) + else: + raise PydanticCustomError('mac_address_format', 'Unrecognized format') diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/mongo_object_id.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/mongo_object_id.py new file mode 100644 index 0000000000000000000000000000000000000000..767f7b596b81ede0ea9234df16246dbc04e8a5e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/mongo_object_id.py @@ -0,0 +1,71 @@ +""" +Validation for MongoDB ObjectId fields. + +Ref: https://github.com/pydantic/pydantic-extra-types/issues/133 +""" + +from typing import Any + +from pydantic import GetCoreSchemaHandler +from pydantic_core import core_schema + +try: + from bson import ObjectId +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `mongo_object_id` module requires "pymongo" to be installed. You can install it with "pip install ' + 'pymongo".' + ) from e + + +class MongoObjectId(str): + """MongoObjectId parses and validates MongoDB bson.ObjectId. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.mongo_object_id import MongoObjectId + + + class MongoDocument(BaseModel): + id: MongoObjectId + + + doc = MongoDocument(id='5f9f2f4b9d3c5a7b4c7e6c1d') + print(doc) + # > id='5f9f2f4b9d3c5a7b4c7e6c1d' + ``` + + Raises: + PydanticCustomError: If the provided value is not a valid MongoDB ObjectId. + """ + + OBJECT_ID_LENGTH = 24 + + @classmethod + def __get_pydantic_core_schema__(cls, _: Any, __: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.json_or_python_schema( + json_schema=core_schema.str_schema(min_length=cls.OBJECT_ID_LENGTH, max_length=cls.OBJECT_ID_LENGTH), + python_schema=core_schema.union_schema( + [ + core_schema.is_instance_schema(ObjectId), + core_schema.chain_schema( + [ + core_schema.str_schema(min_length=cls.OBJECT_ID_LENGTH, max_length=cls.OBJECT_ID_LENGTH), + core_schema.no_info_plain_validator_function(cls.validate), + ] + ), + ] + ), + serialization=core_schema.plain_serializer_function_ser_schema(lambda x: str(x), when_used='json'), + ) + + @classmethod + def validate(cls, value: str) -> ObjectId: + """Validate the MongoObjectId str is a valid ObjectId instance.""" + if not ObjectId.is_valid(value): + raise ValueError( + f"Invalid ObjectId {value} has to be 24 characters long and in the format '5f9f2f4b9d3c5a7b4c7e6c1d'." + ) + + return ObjectId(value) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/path.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/path.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c550f8e0175ad01dd154f2b86817b22c06bab1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/path.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +import typing +from dataclasses import dataclass +from pathlib import Path + +import pydantic +from pydantic.types import PathType +from pydantic_core import core_schema +from typing_extensions import Annotated + +ExistingPath = typing.Union[pydantic.FilePath, pydantic.DirectoryPath] + + +@dataclass +class ResolvedPathType(PathType): + """A custom PathType that resolves the path to its absolute form. + + Args: + path_type (typing.Literal['file', 'dir', 'new']): The type of path to resolve. Can be 'file', 'dir' or 'new'. + + Returns: + Resolved path as a pathlib.Path object. + + Example: + ```python + from pydantic import BaseModel + from pydantic_extra_types.path import ResolvedFilePath, ResolvedDirectoryPath, ResolvedNewPath + + + class MyModel(BaseModel): + file_path: ResolvedFilePath + dir_path: ResolvedDirectoryPath + new_path: ResolvedNewPath + + + model = MyModel(file_path='~/myfile.txt', dir_path='~/mydir', new_path='~/newfile.txt') + print(model.file_path) + # > file_path=PosixPath('/home/user/myfile.txt') dir_path=PosixPath('/home/user/mydir') new_path=PosixPath('/home/user/newfile.txt')""" + + @staticmethod + def validate_file(path: Path, _: core_schema.ValidationInfo) -> Path: + return PathType.validate_file(path.expanduser().resolve(), _) + + @staticmethod + def validate_directory(path: Path, _: core_schema.ValidationInfo) -> Path: + return PathType.validate_directory(path.expanduser().resolve(), _) + + @staticmethod + def validate_new(path: Path, _: core_schema.ValidationInfo) -> Path: + return PathType.validate_new(path.expanduser().resolve(), _) + + def __hash__(self) -> int: + return hash(type(self.path_type)) + + +ResolvedFilePath = Annotated[Path, ResolvedPathType('file')] +ResolvedDirectoryPath = Annotated[Path, ResolvedPathType('dir')] +ResolvedNewPath = Annotated[Path, ResolvedPathType('new')] +ResolvedExistingPath = typing.Union[ResolvedFilePath, ResolvedDirectoryPath] diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/payment.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/payment.py new file mode 100644 index 0000000000000000000000000000000000000000..a22232ae73059612235e9749f24108411d6553dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/payment.py @@ -0,0 +1,198 @@ +"""The `pydantic_extra_types.payment` module provides the +[`PaymentCardNumber`][pydantic_extra_types.payment.PaymentCardNumber] data type. +""" + +from __future__ import annotations + +from enum import Enum +from typing import Any, ClassVar + +from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +class PaymentCardBrand(str, Enum): + """Payment card brands supported by the [`PaymentCardNumber`][pydantic_extra_types.payment.PaymentCardNumber].""" + + amex = 'American Express' + mastercard = 'Mastercard' + visa = 'Visa' + mir = 'Mir' + maestro = 'Maestro' + discover = 'Discover' + verve = 'Verve' + dankort = 'Dankort' + troy = 'Troy' + unionpay = 'UnionPay' + jcb = 'JCB' + other = 'other' + + def __str__(self) -> str: + return self.value + + +class PaymentCardNumber(str): + """A [payment card number](https://en.wikipedia.org/wiki/Payment_card_number).""" + + strip_whitespace: ClassVar[bool] = True + """Whether to strip whitespace from the input value.""" + min_length: ClassVar[int] = 12 + """The minimum length of the card number.""" + max_length: ClassVar[int] = 19 + """The maximum length of the card number.""" + bin: str + """The first 6 digits of the card number.""" + last4: str + """The last 4 digits of the card number.""" + brand: PaymentCardBrand + """The brand of the card.""" + + def __init__(self, card_number: str): + self.validate_digits(card_number) + + card_number = self.validate_luhn_check_digit(card_number) + + self.bin = card_number[:6] + self.last4 = card_number[-4:] + self.brand = self.validate_brand(card_number) + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.with_info_after_validator_function( + cls.validate, + core_schema.str_schema( + min_length=cls.min_length, max_length=cls.max_length, strip_whitespace=cls.strip_whitespace + ), + ) + + @classmethod + def validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> PaymentCardNumber: + """Validate the `PaymentCardNumber` instance. + + Args: + __input_value: The input value to validate. + _: The validation info. + + Returns: + The validated `PaymentCardNumber` instance. + """ + return cls(__input_value) + + @property + def masked(self) -> str: + """The masked card number.""" + num_masked = len(self) - 10 # len(bin) + len(last4) == 10 + return f'{self.bin}{"*" * num_masked}{self.last4}' + + @classmethod + def validate_digits(cls, card_number: str) -> None: + """Validate that the card number is all digits. + + Args: + card_number: The card number to validate. + + Raises: + PydanticCustomError: If the card number is not all digits. + """ + if not card_number or not all('0' <= c <= '9' for c in card_number): + raise PydanticCustomError('payment_card_number_digits', 'Card number is not all digits') + + @classmethod + def validate_luhn_check_digit(cls, card_number: str) -> str: + """Validate the payment card number. + Based on the [Luhn algorithm](https://en.wikipedia.org/wiki/Luhn_algorithm). + + Args: + card_number: The card number to validate. + + Returns: + The validated card number. + + Raises: + PydanticCustomError: If the card number is not valid. + """ + sum_ = int(card_number[-1]) + length = len(card_number) + parity = length % 2 + for i in range(length - 1): + digit = int(card_number[i]) + if i % 2 == parity: + digit *= 2 + if digit > 9: + digit -= 9 + sum_ += digit + valid = sum_ % 10 == 0 + if not valid: + raise PydanticCustomError('payment_card_number_luhn', 'Card number is not luhn valid') + return card_number + + @staticmethod + def validate_brand(card_number: str) -> PaymentCardBrand: + """Validate length based on + [BIN](https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN)) + for major brands. + + Args: + card_number: The card number to validate. + + Returns: + The validated card brand. + + Raises: + PydanticCustomError: If the card number is not valid. + """ + brand = PaymentCardBrand.other + + if card_number[0] == '4': + brand = PaymentCardBrand.visa + required_length = [13, 16, 19] + elif 51 <= int(card_number[:2]) <= 55: + brand = PaymentCardBrand.mastercard + required_length = [16] + elif card_number[:2] in {'34', '37'}: + brand = PaymentCardBrand.amex + required_length = [15] + elif 2200 <= int(card_number[:4]) <= 2204: + brand = PaymentCardBrand.mir + required_length = list(range(16, 20)) + elif card_number[:4] in {'5018', '5020', '5038', '5893', '6304', '6759', '6761', '6762', '6763'} or card_number[ + :6 + ] in ( + '676770', + '676774', + ): + brand = PaymentCardBrand.maestro + required_length = list(range(12, 20)) + elif card_number.startswith('65') or 644 <= int(card_number[:3]) <= 649 or card_number.startswith('6011'): + brand = PaymentCardBrand.discover + required_length = list(range(16, 20)) + elif ( + 506099 <= int(card_number[:6]) <= 506198 + or 650002 <= int(card_number[:6]) <= 650027 + or 507865 <= int(card_number[:6]) <= 507964 + ): + brand = PaymentCardBrand.verve + required_length = [16, 18, 19] + elif card_number[:4] in {'5019', '4571'}: + brand = PaymentCardBrand.dankort + required_length = [16] + elif card_number.startswith('9792'): + brand = PaymentCardBrand.troy + required_length = [16] + elif card_number[:2] in {'62', '81'}: + brand = PaymentCardBrand.unionpay + required_length = [16, 19] + elif 3528 <= int(card_number[:4]) <= 3589: + brand = PaymentCardBrand.jcb + required_length = [16, 19] + + valid = len(card_number) in required_length if brand != PaymentCardBrand.other else True + + if not valid: + raise PydanticCustomError( + 'payment_card_number_brand', + f'Length for a {brand} card must be {" or ".join(map(str, required_length))}', + {'brand': brand, 'required_length': required_length}, + ) + + return brand diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/pendulum_dt.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/pendulum_dt.py new file mode 100644 index 0000000000000000000000000000000000000000..e0fdbf2863ee24eb533401a06ee91cb88f27bb5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/pendulum_dt.py @@ -0,0 +1,306 @@ +"""Native Pendulum DateTime object implementation. This is a copy of the Pendulum DateTime object, but with a Pydantic +CoreSchema implementation. This allows Pydantic to validate the DateTime object. +""" + +from __future__ import annotations + +try: + from pendulum import Date as _Date + from pendulum import DateTime as _DateTime + from pendulum import Duration as _Duration + from pendulum import parse +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `pendulum_dt` module requires "pendulum" to be installed. You can install it with "pip install pendulum".' + ) from e +from datetime import date, datetime, timedelta +from typing import Any + +from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +class DateTimeSettings(type): + def __new__(cls, name, bases, dct, **kwargs): # type: ignore[no-untyped-def] + dct['strict'] = kwargs.pop('strict', True) + return super().__new__(cls, name, bases, dct) + + def __init__(cls, name, bases, dct, **kwargs): # type: ignore[no-untyped-def] + super().__init__(name, bases, dct) + cls.strict = kwargs.get('strict', True) + + +class DateTime(_DateTime, metaclass=DateTimeSettings): + """A `pendulum.DateTime` object. At runtime, this type decomposes into pendulum.DateTime automatically. + This type exists because Pydantic throws a fit on unknown types. + + ```python + from pydantic import BaseModel + from pydantic_extra_types.pendulum_dt import DateTime + + + class test_model(BaseModel): + dt: DateTime + + + print(test_model(dt='2021-01-01T00:00:00+00:00')) + + # > test_model(dt=DateTime(2021, 1, 1, 0, 0, 0, tzinfo=FixedTimezone(0, name="+00:00"))) + ``` + """ + + __slots__: list[str] = [] + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Return a Pydantic CoreSchema with the Datetime validation + + Args: + source: The source type to be converted. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the Datetime validation. + """ + return core_schema.no_info_wrap_validator_function(cls._validate, core_schema.datetime_schema()) + + @classmethod + def _validate(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> DateTime: + """Validate the datetime object and return it. + + Args: + value: The value to validate. + handler: The handler to get the CoreSchema. + + Returns: + The validated value or raises a PydanticCustomError. + """ + # if we are passed an existing instance, pass it straight through. + if isinstance(value, (_DateTime, datetime)): + return DateTime.instance(value) + try: + # probably the best way to have feature parity with + # https://docs.pydantic.dev/latest/api/standard_library_types/#datetimedatetime + value = handler(value) + return DateTime.instance(value) + except ValueError: + try: + value = parse(value, strict=cls.strict) + if isinstance(value, _DateTime): + return DateTime.instance(value) + raise ValueError(f'value is not a valid datetime it is a {type(value)}') + except ValueError: + raise + except Exception as exc: + raise PydanticCustomError('value_error', 'value is not a valid datetime') from exc + + +class Date(_Date): + """A `pendulum.Date` object. At runtime, this type decomposes into pendulum.Date automatically. + This type exists because Pydantic throws a fit on unknown types. + + ```python + from pydantic import BaseModel + from pydantic_extra_types.pendulum_dt import Date + + + class test_model(BaseModel): + dt: Date + + + print(test_model(dt='2021-01-01')) + + # > test_model(dt=Date(2021, 1, 1)) + ``` + """ + + __slots__: list[str] = [] + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Return a Pydantic CoreSchema with the Date validation + + Args: + source: The source type to be converted. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the Date validation. + """ + return core_schema.no_info_wrap_validator_function(cls._validate, core_schema.date_schema()) + + @classmethod + def _validate(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Date: + """Validate the date object and return it. + + Args: + value: The value to validate. + handler: The handler to get the CoreSchema. + + Returns: + The validated value or raises a PydanticCustomError. + """ + # if we are passed an existing instance, pass it straight through. + if isinstance(value, (_Date, date)): + return Date(value.year, value.month, value.day) + + # otherwise, parse it. + try: + parsed = parse(value) + if isinstance(parsed, (_DateTime, _Date)): + return Date(parsed.year, parsed.month, parsed.day) + raise ValueError('value is not a valid date it is a {type(parsed)}') + except Exception as exc: + raise PydanticCustomError('value_error', 'value is not a valid date') from exc + + +class Duration(_Duration): + """A `pendulum.Duration` object. At runtime, this type decomposes into pendulum.Duration automatically. + This type exists because Pydantic throws a fit on unknown types. + + ```python + from pydantic import BaseModel + from pydantic_extra_types.pendulum_dt import Duration + + + class test_model(BaseModel): + delta_t: Duration + + + print(test_model(delta_t='P1DT25H')) + + # > test_model(delta_t=Duration(days=2, hours=1)) + ``` + """ + + __slots__: list[str] = [] + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + """Return a Pydantic CoreSchema with the Duration validation + + Args: + source: The source type to be converted. + handler: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the Duration validation. + """ + return core_schema.no_info_wrap_validator_function( + cls._validate, + core_schema.timedelta_schema(), + serialization=core_schema.plain_serializer_function_ser_schema( + lambda instance: instance.to_iso8601_string(), when_used='json-unless-none' + ), + ) + + def to_iso8601_string(self) -> str: + """ + Convert a Duration object to an ISO 8601 string. + + In addition to the standard ISO 8601 format, this method also supports the representation of fractions of a second and negative durations. + + Args: + duration (Duration): The Duration object. + + Returns: + str: The ISO 8601 string representation of the duration. + """ + # Extracting components from the Duration object + years = self.years + months = self.months + days = self._days + hours = self.hours + minutes = self.minutes + seconds = self.remaining_seconds + milliseconds = self.microseconds // 1000 + microseconds = self.microseconds % 1000 + + # Constructing the ISO 8601 duration string + iso_duration = 'P' + if years or months or days: + if years: + iso_duration += f'{years}Y' + if months: + iso_duration += f'{months}M' + if days: + iso_duration += f'{days}D' + + if hours or minutes or seconds or milliseconds or microseconds: + iso_duration += 'T' + if hours: + iso_duration += f'{hours}H' + if minutes: + iso_duration += f'{minutes}M' + if seconds or milliseconds or microseconds: + iso_duration += f'{seconds}' + if milliseconds or microseconds: + iso_duration += f'.{milliseconds:03d}' + if microseconds: + iso_duration += f'{microseconds:03d}' + iso_duration += 'S' + + # Prefix with '-' if the duration is negative + if self.total_seconds() < 0: + iso_duration = '-' + iso_duration + + if iso_duration == 'P': + iso_duration = 'P0D' + + return iso_duration + + @classmethod + def _validate(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Duration: + """Validate the Duration object and return it. + + Args: + value: The value to validate. + handler: The handler to get the CoreSchema. + + Returns: + The validated value or raises a PydanticCustomError. + """ + + if isinstance(value, _Duration): + return Duration( + years=value.years, + months=value.months, + weeks=value.weeks, + days=value.remaining_days, + hours=value.hours, + minutes=value.minutes, + seconds=value.remaining_seconds, + microseconds=value.microseconds, + ) + + if isinstance(value, timedelta): + return Duration( + days=value.days, + seconds=value.seconds, + microseconds=value.microseconds, + ) + + assert isinstance(value, str) + try: + # https://github.com/python-pendulum/pendulum/issues/532 + if value.startswith('-'): + parsed = parse(value.lstrip('-'), exact=True) + else: + parsed = parse(value, exact=True) + if not isinstance(parsed, _Duration): + raise ValueError(f'value is not a valid duration it is a {type(parsed)}') + if value.startswith('-'): + parsed = -parsed + + return Duration( + years=parsed.years, + months=parsed.months, + weeks=parsed.weeks, + days=parsed.remaining_days, + hours=parsed.hours, + minutes=parsed.minutes, + seconds=parsed.remaining_seconds, + microseconds=parsed.microseconds, + ) + except Exception as exc: + raise PydanticCustomError('value_error', 'value is not a valid duration') from exc diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/phone_numbers.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/phone_numbers.py new file mode 100644 index 0000000000000000000000000000000000000000..880446fadce05281c4f28ae2d8d97e296a2e8085 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/phone_numbers.py @@ -0,0 +1,181 @@ +"""The `pydantic_extra_types.phone_numbers` module provides the +[`PhoneNumber`][pydantic_extra_types.phone_numbers.PhoneNumber] data type. + +This class depends on the [phonenumbers] package, which is a Python port of Google's [libphonenumber]. +""" + +from __future__ import annotations + +from collections.abc import Sequence +from dataclasses import dataclass +from functools import partial +from typing import Any, ClassVar + +from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + +try: + import phonenumbers + from phonenumbers import PhoneNumber as BasePhoneNumber + from phonenumbers.phonenumberutil import NumberParseException +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + '`PhoneNumber` requires "phonenumbers" to be installed. You can install it with "pip install phonenumbers"' + ) from e + + +class PhoneNumber(str): + """A wrapper around [phonenumbers](https://pypi.org/project/phonenumbers/) package, which + is a Python port of Google's [libphonenumber](https://github.com/google/libphonenumber/). + """ + + supported_regions: list[str] = [] + """The supported regions. If empty, all regions are supported.""" + + default_region_code: ClassVar[str | None] = None + """The default region code to use when parsing phone numbers without an international prefix.""" + phone_format: str = 'RFC3966' + """The format of the phone number.""" + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + json_schema = handler(schema) + json_schema.update({'format': 'phone'}) + return json_schema + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(), + ) + + @classmethod + def _validate(cls, phone_number: str, _: core_schema.ValidationInfo) -> str: + try: + parsed_number = phonenumbers.parse(phone_number, cls.default_region_code) + except phonenumbers.phonenumberutil.NumberParseException as exc: + raise PydanticCustomError('value_error', 'value is not a valid phone number') from exc + if not phonenumbers.is_valid_number(parsed_number): + raise PydanticCustomError('value_error', 'value is not a valid phone number') + + if cls.supported_regions and not any( + phonenumbers.is_valid_number_for_region(parsed_number, region_code=region) + for region in cls.supported_regions + ): + raise PydanticCustomError('value_error', 'value is not from a supported region') + + return phonenumbers.format_number(parsed_number, getattr(phonenumbers.PhoneNumberFormat, cls.phone_format)) + + def __eq__(self, other: Any) -> bool: + return super().__eq__(other) + + def __hash__(self) -> int: + return super().__hash__() + + +@dataclass(frozen=True) +class PhoneNumberValidator: + """A pydantic before validator for phone numbers using the [phonenumbers](https://pypi.org/project/phonenumbers/) package, + a Python port of Google's [libphonenumber](https://github.com/google/libphonenumber/). + + Intended to be used to create custom pydantic data types using the `typing.Annotated` type construct. + + Args: + default_region (str | None): The default region code to use when parsing phone numbers without an international prefix. + If `None` (default), the region must be supplied in the phone number as an international prefix. + number_format (str): The format of the phone number to return. See `phonenumbers.PhoneNumberFormat` for valid values. + supported_regions (list[str]): The supported regions. If empty, all regions are supported (default). + + Returns: + The formatted phone number. + + Example: + MyNumberType = Annotated[ + Union[str, phonenumbers.PhoneNumber], + PhoneNumberValidator() + ] + USNumberType = Annotated[ + Union[str, phonenumbers.PhoneNumber], + PhoneNumberValidator(supported_regions=['US'], default_region='US') + ] + + class SomeModel(BaseModel): + phone_number: MyNumberType + us_number: USNumberType + """ + + default_region: str | None = None + number_format: str = 'RFC3966' + supported_regions: Sequence[str] | None = None + + def __post_init__(self) -> None: + if self.default_region and self.default_region not in phonenumbers.SUPPORTED_REGIONS: + raise ValueError(f'Invalid default region code: {self.default_region}') + + if self.number_format not in ( + number_format + for number_format in dir(phonenumbers.PhoneNumberFormat) + if not number_format.startswith('_') and number_format.isupper() + ): + raise ValueError(f'Invalid number format: {self.number_format}') + + if self.supported_regions: + for supported_region in self.supported_regions: + if supported_region not in phonenumbers.SUPPORTED_REGIONS: + raise ValueError(f'Invalid supported region code: {supported_region}') + + @staticmethod + def _parse( + region: str | None, + number_format: str, + supported_regions: Sequence[str] | None, + phone_number: Any, + ) -> str: + if not phone_number: + raise PydanticCustomError('value_error', 'value is not a valid phone number') + + if not isinstance(phone_number, (str, BasePhoneNumber)): + raise PydanticCustomError('value_error', 'value is not a valid phone number') + + parsed_number = None + if isinstance(phone_number, BasePhoneNumber): + parsed_number = phone_number + else: + try: + parsed_number = phonenumbers.parse(phone_number, region=region) + except NumberParseException as exc: + raise PydanticCustomError('value_error', 'value is not a valid phone number') from exc + + if not phonenumbers.is_valid_number(parsed_number): + raise PydanticCustomError('value_error', 'value is not a valid phone number') + + if supported_regions and not any( + phonenumbers.is_valid_number_for_region(parsed_number, region_code=region) for region in supported_regions + ): + raise PydanticCustomError('value_error', 'value is not from a supported region') + + return phonenumbers.format_number(parsed_number, getattr(phonenumbers.PhoneNumberFormat, number_format)) + + def __get_pydantic_core_schema__(self, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.no_info_before_validator_function( + partial( + self._parse, + self.default_region, + self.number_format, + self.supported_regions, + ), + core_schema.str_schema(), + ) + + def __get_pydantic_json_schema__( + self, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + json_schema = handler(schema) + json_schema.update({'format': 'phone'}) + return json_schema + + def __hash__(self) -> int: + return super().__hash__() diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/py.typed b/venv/lib/python3.10/site-packages/pydantic_extra_types/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/routing_number.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/routing_number.py new file mode 100644 index 0000000000000000000000000000000000000000..69682a58544182920e93afa88ee58e4ea0bae27b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/routing_number.py @@ -0,0 +1,90 @@ +"""The `pydantic_extra_types.routing_number` module provides the +[`ABARoutingNumber`][pydantic_extra_types.routing_number.ABARoutingNumber] data type. +""" + +from __future__ import annotations + +import itertools as it +from typing import Any, ClassVar + +from pydantic import GetCoreSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +class ABARoutingNumber(str): + """The `ABARoutingNumber` data type is a string of 9 digits representing an ABA routing transit number. + + The algorithm used to validate the routing number is described in the + [ABA routing transit number](https://en.wikipedia.org/wiki/ABA_routing_transit_number#Check_digit) + Wikipedia article. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.routing_number import ABARoutingNumber + + + class BankAccount(BaseModel): + routing_number: ABARoutingNumber + + + account = BankAccount(routing_number='122105155') + print(account) + # > routing_number='122105155' + ``` + """ + + strip_whitespace: ClassVar[bool] = True + min_length: ClassVar[int] = 9 + max_length: ClassVar[int] = 9 + + def __init__(self, routing_number: str): + self._validate_digits(routing_number) + self._routing_number = self._validate_routing_number(routing_number) + + @classmethod + def __get_pydantic_core_schema__( + cls, source: type[Any], handler: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema( + min_length=cls.min_length, + max_length=cls.max_length, + strip_whitespace=cls.strip_whitespace, + strict=False, + ), + ) + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> ABARoutingNumber: + return cls(__input_value) + + @classmethod + def _validate_digits(cls, routing_number: str) -> None: + """Check that the routing number is all digits. + + Args: + routing_number: The routing number to validate. + + Raises: + PydanticCustomError: If the routing number is not all digits. + """ + if not routing_number.isdigit(): + raise PydanticCustomError('aba_routing_number', 'routing number is not all digits') + + @classmethod + def _validate_routing_number(cls, routing_number: str) -> str: + """Check [digit algorithm](https://en.wikipedia.org/wiki/ABA_routing_transit_number#Check_digit) for + [ABA routing transit number](https://www.routingnumber.com/). + + Args: + routing_number: The routing number to validate. + + Raises: + PydanticCustomError: If the routing number is incorrect. + """ + checksum = sum(int(digit) * factor for digit, factor in zip(routing_number, it.cycle((3, 7, 1)))) + if checksum % 10: + raise PydanticCustomError('aba_routing_number', 'Incorrect ABA routing transit number') + return routing_number diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/s3.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/s3.py new file mode 100644 index 0000000000000000000000000000000000000000..edb7d441067b4fb2a86c52745f5763ad96dbad89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/s3.py @@ -0,0 +1,69 @@ +"""The `pydantic_extra_types.s3` module provides the +[`S3Path`][pydantic_extra_types.s3.S3Path] data type. + +A simpleAWS S3 URLs parser. +It also provides the `Bucket`, `Key` component. +""" + +from __future__ import annotations + +import re +from typing import Any, ClassVar + +from pydantic import GetCoreSchemaHandler +from pydantic_core import core_schema + + +class S3Path(str): + """An object representing a valid S3 path. + This type also allows you to access the `bucket` and `key` component of the S3 path. + It also contains the `last_key` which represents the last part of the path (tipically a file). + + ```python + from pydantic import BaseModel + from pydantic_extra_types.s3 import S3Path + + + class TestModel(BaseModel): + path: S3Path + + + p = 's3://my-data-bucket/2023/08/29/sales-report.csv' + model = TestModel(path=p) + model + + # > TestModel(path=S3Path('s3://my-data-bucket/2023/08/29/sales-report.csv')) + + model.path.bucket + + # > 'my-data-bucket' + ``` + """ + + patt: ClassVar[str] = r'^s3://([^/]+)/(.*?([^/]+)/?)$' + + def __init__(self, value: str) -> None: + self.value = value + groups: tuple[str, str, str] = re.match(self.patt, self.value).groups() # type: ignore + self.bucket: str = groups[0] + self.key: str = groups[1] + self.last_key: str = groups[2] + + def __str__(self) -> str: # pragma: no cover + return self.value + + def __repr__(self) -> str: # pragma: no cover + return f'{self.__class__.__name__}({self.value!r})' + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> S3Path: + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + _, _ = source, handler + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(pattern=cls.patt), + field_name=cls.__class__.__name__, + ) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/script_code.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/script_code.py new file mode 100644 index 0000000000000000000000000000000000000000..2838b5a1721d6edc07bf4cac1df645254103424a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/script_code.py @@ -0,0 +1,97 @@ +"""script definitions that are based on the [ISO 15924](https://en.wikipedia.org/wiki/ISO_15924)""" + +from __future__ import annotations + +from typing import Any + +from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + +try: + import pycountry +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `script_code` module requires "pycountry" to be installed.' + ' You can install it with "pip install pycountry".' + ) from e + + +class ISO_15924(str): + """ISO_15924 parses script in the [ISO 15924](https://en.wikipedia.org/wiki/ISO_15924) + format. + + ```py + from pydantic import BaseModel + + from pydantic_extra_types.language_code import ISO_15924 + + + class Script(BaseModel): + alpha_4: ISO_15924 + + + script = Script(alpha_4='Java') + print(lang) + # > script='Java' + ``` + """ + + allowed_values_list = [script.alpha_4 for script in pycountry.scripts] + allowed_values = set(allowed_values_list) + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> ISO_15924: + """Validate a ISO 15924 language code from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated ISO 15924 script code. + + Raises: + PydanticCustomError: If the ISO 15924 script code is not valid. + """ + if __input_value not in cls.allowed_values: + raise PydanticCustomError( + 'ISO_15924', 'Invalid ISO 15924 script code. See https://en.wikipedia.org/wiki/ISO_15924' + ) + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, _: type[Any], __: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + """Return a Pydantic CoreSchema with the ISO 639-3 language code validation. + + Args: + _: The source type. + __: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the ISO 639-3 language code validation. + + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(min_length=4, max_length=4), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + """Return a Pydantic JSON Schema with the ISO 639-3 language code validation. + + Args: + schema: The Pydantic CoreSchema. + handler: The handler to get the JSON Schema. + + Returns: + A Pydantic JSON Schema with the ISO 639-3 language code validation. + + """ + json_schema = handler(schema) + json_schema.update({'enum': cls.allowed_values_list}) + return json_schema diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/semantic_version.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/semantic_version.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffdfde1487303f0811829bde9e7536344ea0aba --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/semantic_version.py @@ -0,0 +1,59 @@ +"""SemanticVersion definition that is based on the Semantiv Versioning Specification [semver](https://semver.org/).""" + +from typing import Any, Callable + +from pydantic import GetJsonSchemaHandler +from pydantic.json_schema import JsonSchemaValue +from pydantic_core import core_schema + +try: + import semver +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `semantic_version` module requires "semver" to be installed. You can install it with "pip install semver".' + ) from e + + +class SemanticVersion(semver.Version): + """Semantic version based on the official [semver thread](https://python-semver.readthedocs.io/en/latest/advanced/combine-pydantic-and-semver.html).""" + + @classmethod + def __get_pydantic_core_schema__( + cls, + _source_type: Any, + _handler: Callable[[Any], core_schema.CoreSchema], + ) -> core_schema.CoreSchema: + def validate_from_str(value: str) -> SemanticVersion: + return cls.parse(value) + + from_str_schema = core_schema.chain_schema( + [ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_from_str), + ] + ) + + return core_schema.json_or_python_schema( + json_schema=from_str_schema, + python_schema=core_schema.union_schema( + [ + core_schema.is_instance_schema(semver.Version), + from_str_schema, + ] + ), + serialization=core_schema.to_string_ser_schema(), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, _core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + return handler( + core_schema.str_schema( + pattern=r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$' + ) + ) + + @classmethod + def validate_from_str(cls, value: str) -> 'SemanticVersion': + return cls.parse(value) diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/semver.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/semver.py new file mode 100644 index 0000000000000000000000000000000000000000..f53b80fb2d51a37115c192d459564292ed3e47f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/semver.py @@ -0,0 +1,77 @@ +"""The _VersionPydanticAnnotation class provides functionality to parse and validate Semantic Versioning (SemVer) strings. + +This class depends on the [semver](https://python-semver.readthedocs.io/en/latest/index.html) package. +""" + +import warnings +from typing import Any, Callable + +from pydantic import GetJsonSchemaHandler +from pydantic.json_schema import JsonSchemaValue +from pydantic_core import core_schema +from semver import Version +from typing_extensions import Annotated + +warnings.warn( + 'Use from pydantic_extra_types.semver import SemanticVersion instead. Will be removed in 3.0.0.', DeprecationWarning +) + + +class _VersionPydanticAnnotation(Version): + """Represents a Semantic Versioning (SemVer). + + Wraps the `version` type from `semver`. + + Example: + ```python + from pydantic import BaseModel + + from pydantic_extra_types.semver import _VersionPydanticAnnotation + + + class appVersion(BaseModel): + version: _VersionPydanticAnnotation + + + app_version = appVersion(version='1.2.3') + + print(app_version.version) + # > 1.2.3 + ``` + """ + + @classmethod + def __get_pydantic_core_schema__( + cls, + _source_type: Any, + _handler: Callable[[Any], core_schema.CoreSchema], + ) -> core_schema.CoreSchema: + def validate_from_str(value: str) -> Version: + return Version.parse(value) + + from_str_schema = core_schema.chain_schema( + [ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_from_str), + ] + ) + + return core_schema.json_or_python_schema( + json_schema=from_str_schema, + python_schema=core_schema.union_schema( + [ + core_schema.is_instance_schema(Version), + from_str_schema, + ] + ), + serialization=core_schema.to_string_ser_schema(), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, _core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> JsonSchemaValue: + return handler(core_schema.str_schema()) + + +ManifestVersion = Annotated[Version, _VersionPydanticAnnotation] diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/timezone_name.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/timezone_name.py new file mode 100644 index 0000000000000000000000000000000000000000..0280387eee96534b1c59a778ee921edf74591575 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/timezone_name.py @@ -0,0 +1,186 @@ +"""Time zone name validation and serialization module.""" + +from __future__ import annotations + +import importlib +import sys +import warnings +from typing import Any, Callable, cast + +from pydantic import GetCoreSchemaHandler, GetJsonSchemaHandler +from pydantic_core import PydanticCustomError, core_schema + + +def _is_available(name: str) -> bool: + """Check if a module is available for import.""" + try: + importlib.import_module(name) + return True + except ModuleNotFoundError: # pragma: no cover + return False + + +def _tz_provider_from_zone_info() -> set[str]: # pragma: no cover + """Get timezones from the zoneinfo module.""" + from zoneinfo import available_timezones + + return set(available_timezones()) + + +def _tz_provider_from_pytz() -> set[str]: # pragma: no cover + """Get timezones from the pytz module.""" + from pytz import all_timezones + + return set(all_timezones) + + +def _warn_about_pytz_usage() -> None: + """Warn about using pytz with Python 3.9 or later.""" + warnings.warn( # pragma: no cover + 'Projects using Python 3.9 or later should be using the support now included as part of the standard library. ' + 'Please consider switching to the standard library (zoneinfo) module.' + ) + + +def get_timezones() -> set[str]: + """Determine the timezone provider and return available timezones.""" + if _is_available('zoneinfo'): # pragma: no cover + timezones = _tz_provider_from_zone_info() + if len(timezones) == 0: # pragma: no cover + raise ImportError('No timezone provider found. Please install tzdata with "pip install tzdata"') + return timezones + elif _is_available('pytz'): # pragma: no cover + return _tz_provider_from_pytz() + else: # pragma: no cover + if sys.version_info[:2] == (3, 8): + raise ImportError('No pytz module found. Please install it with "pip install pytz"') + raise ImportError('No timezone provider found. Please install tzdata with "pip install tzdata"') + + +class TimeZoneNameSettings(type): + def __new__(cls, name: str, bases: tuple[type, ...], dct: dict[str, Any], **kwargs: Any) -> type[TimeZoneName]: + dct['strict'] = kwargs.pop('strict', True) + return cast('type[TimeZoneName]', super().__new__(cls, name, bases, dct)) + + def __init__(cls, name: str, bases: tuple[type, ...], dct: dict[str, Any], **kwargs: Any) -> None: + super().__init__(name, bases, dct) + cls.strict = kwargs.get('strict', True) + + +def timezone_name_settings(**kwargs: Any) -> Callable[[type[TimeZoneName]], type[TimeZoneName]]: + def wrapper(cls: type[TimeZoneName]) -> type[TimeZoneName]: + cls.strict = kwargs.get('strict', True) + return cls + + return wrapper + + +@timezone_name_settings(strict=True) +class TimeZoneName(str): + """TimeZoneName is a custom string subclass for validating and serializing timezone names. + + The TimeZoneName class uses the IANA Time Zone Database for validation. + It supports both strict and non-strict modes for timezone name validation. + + + ## Examples: + + Some examples of using the TimeZoneName class: + + ### Normal usage: + + ```python + from pydantic_extra_types.timezone_name import TimeZoneName + from pydantic import BaseModel + class Location(BaseModel): + city: str + timezone: TimeZoneName + + loc = Location(city="New York", timezone="America/New_York") + print(loc.timezone) + + >> America/New_York + + ``` + + ### Non-strict mode: + + ```python + + from pydantic_extra_types.timezone_name import TimeZoneName, timezone_name_settings + + @timezone_name_settings(strict=False) + class TZNonStrict(TimeZoneName): + pass + + tz = TZNonStrict("america/new_york") + + print(tz) + + >> america/new_york + + ``` + """ + + __slots__: list[str] = [] + allowed_values: set[str] = set(get_timezones()) + allowed_values_list: list[str] = sorted(allowed_values) + allowed_values_upper_to_correct: dict[str, str] = {val.upper(): val for val in allowed_values} + strict: bool + + @classmethod + def _validate(cls, __input_value: str, _: core_schema.ValidationInfo) -> TimeZoneName: + """Validate a time zone name from the provided str value. + + Args: + __input_value: The str value to be validated. + _: The Pydantic ValidationInfo. + + Returns: + The validated time zone name. + + Raises: + PydanticCustomError: If the timezone name is not valid. + """ + if __input_value not in cls.allowed_values: # be fast for the most common case + if not cls.strict: + upper_value = __input_value.strip().upper() + if upper_value in cls.allowed_values_upper_to_correct: + return cls(cls.allowed_values_upper_to_correct[upper_value]) + raise PydanticCustomError('TimeZoneName', 'Invalid timezone name.') + return cls(__input_value) + + @classmethod + def __get_pydantic_core_schema__( + cls, _: type[Any], __: GetCoreSchemaHandler + ) -> core_schema.AfterValidatorFunctionSchema: + """Return a Pydantic CoreSchema with the timezone name validation. + + Args: + _: The source type. + __: The handler to get the CoreSchema. + + Returns: + A Pydantic CoreSchema with the timezone name validation. + """ + return core_schema.with_info_after_validator_function( + cls._validate, + core_schema.str_schema(min_length=1), + ) + + @classmethod + def __get_pydantic_json_schema__( + cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler + ) -> dict[str, Any]: + """Return a Pydantic JSON Schema with the timezone name validation. + + Args: + schema: The Pydantic CoreSchema. + handler: The handler to get the JSON Schema. + + Returns: + A Pydantic JSON Schema with the timezone name validation. + """ + json_schema = handler(schema) + json_schema.update({'enum': cls.allowed_values_list}) + return json_schema diff --git a/venv/lib/python3.10/site-packages/pydantic_extra_types/ulid.py b/venv/lib/python3.10/site-packages/pydantic_extra_types/ulid.py new file mode 100644 index 0000000000000000000000000000000000000000..9f09075fc212e4c229398227409fe47cb421376f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pydantic_extra_types/ulid.py @@ -0,0 +1,67 @@ +"""The `pydantic_extra_types.ULID` module provides the [`ULID`] data type. + +This class depends on the [python-ulid] package, which is a validate by the [ULID-spec](https://github.com/ulid/spec#implementations-in-other-languages). +""" + +from __future__ import annotations + +import uuid +from dataclasses import dataclass +from typing import Any, Union + +from pydantic import GetCoreSchemaHandler +from pydantic._internal import _repr +from pydantic_core import PydanticCustomError, core_schema + +try: + from ulid import ULID as _ULID +except ModuleNotFoundError as e: # pragma: no cover + raise RuntimeError( + 'The `ulid` module requires "python-ulid" to be installed. You can install it with "pip install python-ulid".' + ) from e + +UlidType = Union[str, bytes, int] + + +@dataclass +class ULID(_repr.Representation): + """A wrapper around [python-ulid](https://pypi.org/project/python-ulid/) package, which + is a validate by the [ULID-spec](https://github.com/ulid/spec#implementations-in-other-languages). + """ + + ulid: _ULID + + @classmethod + def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: + return core_schema.no_info_wrap_validator_function( + cls._validate_ulid, + core_schema.union_schema( + [ + core_schema.is_instance_schema(_ULID), + core_schema.int_schema(), + core_schema.bytes_schema(), + core_schema.str_schema(), + core_schema.uuid_schema(), + ] + ), + ) + + @classmethod + def _validate_ulid(cls, value: Any, handler: core_schema.ValidatorFunctionWrapHandler) -> Any: + ulid: _ULID + if isinstance(value, bool): + raise PydanticCustomError('ulid_format', 'Unrecognized format') + try: + if isinstance(value, int): + ulid = _ULID.from_int(value) + elif isinstance(value, str): + ulid = _ULID.from_str(value) + elif isinstance(value, uuid.UUID): + ulid = _ULID.from_uuid(value) + elif isinstance(value, _ULID): + ulid = value + else: + ulid = _ULID.from_bytes(value) + except ValueError as e: + raise PydanticCustomError('ulid_format', 'Unrecognized format') from e + return handler(ulid) diff --git a/venv/lib/python3.10/site-packages/starlette/__init__.py b/venv/lib/python3.10/site-packages/starlette/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56f6de799b5ee55e3943ba41447bead4e8fb99de --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/__init__.py @@ -0,0 +1 @@ +__version__ = "0.47.2" diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9169217223e33b0918b769a71888d22ab5a8b986 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/_exception_handler.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/_exception_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f09611bb326861081953553890b5a37f8e27f308 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/_exception_handler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a75f1069fb84160f1468232ecb8582bc027e557b Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/applications.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/applications.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73830520e3b4d21db46ce43452a756ef4e24eda2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/applications.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/authentication.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/authentication.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..566d0952b6964b42141f8d285d5ba656457bfa66 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/authentication.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/background.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/background.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a96fc97c3cbe7db458c863a46f5ea4632f77d5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/background.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/concurrency.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/concurrency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8124970bb99d2d03d7049987e42914e7457fd283 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/concurrency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19c6a2fcf1048e9a120abe4b6a18c23250a8c47b Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/convertors.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/convertors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22182776f6939b155865fe7c6e40805a8440357a Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/convertors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/datastructures.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/datastructures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a24bcdc1261be51726b89d01ab988b98ed31c6f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/datastructures.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/endpoints.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/endpoints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f3063b62b08ac96fe1be432e25513bd29a62a34 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/endpoints.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbd0d89eea4340e180a5add3644157488896e3f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/formparsers.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/formparsers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03106f7f20a0412beafae7f3cec255a70cd4097f Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/formparsers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/requests.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/requests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6234f914fbb330e28c57a6089c5ee978be8bc333 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/requests.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/responses.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/responses.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf1874752be234f3fe17ccf0166b066ec8bff2b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/responses.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/routing.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbf61a0fa7e4ce95a6a883f7d14a4826fa59057f Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/routing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/schemas.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ebe7b22b4c06e4a979ad1a6518da32f7d10e7aa Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/schemas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/staticfiles.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/staticfiles.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c497fdbea4bb433b9a168d5204e2ffefd4852af Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/staticfiles.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/status.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/status.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3416c778d4c09463e4a361dc6a92f7067a9e0676 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/status.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/templating.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/templating.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3469316f6d0dfbde341db76739f2bc1d90220e22 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/templating.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/testclient.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/testclient.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b08d6eb8685cb79f9486b10351a7b625b5ac7492 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/testclient.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c7cab514609749ef65fde72150a04cd07c3a6f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/__pycache__/websockets.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/__pycache__/websockets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adcbbd6736472490b6a5d6afd76289032d19ea32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/__pycache__/websockets.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/_exception_handler.py b/venv/lib/python3.10/site-packages/starlette/_exception_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..40761b68c98cb40a3d24ff1e6067f3a653800b94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/_exception_handler.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import Any + +from starlette._utils import is_async_callable +from starlette.concurrency import run_in_threadpool +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.types import ASGIApp, ExceptionHandler, Message, Receive, Scope, Send +from starlette.websockets import WebSocket + +ExceptionHandlers = dict[Any, ExceptionHandler] +StatusHandlers = dict[int, ExceptionHandler] + + +def _lookup_exception_handler(exc_handlers: ExceptionHandlers, exc: Exception) -> ExceptionHandler | None: + for cls in type(exc).__mro__: + if cls in exc_handlers: + return exc_handlers[cls] + return None + + +def wrap_app_handling_exceptions(app: ASGIApp, conn: Request | WebSocket) -> ASGIApp: + exception_handlers: ExceptionHandlers + status_handlers: StatusHandlers + try: + exception_handlers, status_handlers = conn.scope["starlette.exception_handlers"] + except KeyError: + exception_handlers, status_handlers = {}, {} + + async def wrapped_app(scope: Scope, receive: Receive, send: Send) -> None: + response_started = False + + async def sender(message: Message) -> None: + nonlocal response_started + + if message["type"] == "http.response.start": + response_started = True + await send(message) + + try: + await app(scope, receive, sender) + except Exception as exc: + handler = None + + if isinstance(exc, HTTPException): + handler = status_handlers.get(exc.status_code) + + if handler is None: + handler = _lookup_exception_handler(exception_handlers, exc) + + if handler is None: + raise exc + + if response_started: + raise RuntimeError("Caught handled exception, but response already started.") from exc + + if is_async_callable(handler): + response = await handler(conn, exc) + else: + response = await run_in_threadpool(handler, conn, exc) + if response is not None: + await response(scope, receive, sender) + + return wrapped_app diff --git a/venv/lib/python3.10/site-packages/starlette/_utils.py b/venv/lib/python3.10/site-packages/starlette/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5ac985de1dd1c49f0cdaf931de15da102162ebea --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/_utils.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import functools +import inspect +import sys +from collections.abc import Awaitable, Generator +from contextlib import AbstractAsyncContextManager, contextmanager +from typing import Any, Callable, Generic, Protocol, TypeVar, overload + +from starlette.types import Scope + +if sys.version_info >= (3, 13): # pragma: no cover + from typing import TypeIs +else: # pragma: no cover + from typing_extensions import TypeIs + +has_exceptiongroups = True +if sys.version_info < (3, 11): # pragma: no cover + try: + from exceptiongroup import BaseExceptionGroup # type: ignore[unused-ignore,import-not-found] + except ImportError: + has_exceptiongroups = False + +T = TypeVar("T") +AwaitableCallable = Callable[..., Awaitable[T]] + + +@overload +def is_async_callable(obj: AwaitableCallable[T]) -> TypeIs[AwaitableCallable[T]]: ... + + +@overload +def is_async_callable(obj: Any) -> TypeIs[AwaitableCallable[Any]]: ... + + +def is_async_callable(obj: Any) -> Any: + while isinstance(obj, functools.partial): + obj = obj.func + + return inspect.iscoroutinefunction(obj) or (callable(obj) and inspect.iscoroutinefunction(obj.__call__)) + + +T_co = TypeVar("T_co", covariant=True) + + +class AwaitableOrContextManager(Awaitable[T_co], AbstractAsyncContextManager[T_co], Protocol[T_co]): ... + + +class SupportsAsyncClose(Protocol): + async def close(self) -> None: ... # pragma: no cover + + +SupportsAsyncCloseType = TypeVar("SupportsAsyncCloseType", bound=SupportsAsyncClose, covariant=False) + + +class AwaitableOrContextManagerWrapper(Generic[SupportsAsyncCloseType]): + __slots__ = ("aw", "entered") + + def __init__(self, aw: Awaitable[SupportsAsyncCloseType]) -> None: + self.aw = aw + + def __await__(self) -> Generator[Any, None, SupportsAsyncCloseType]: + return self.aw.__await__() + + async def __aenter__(self) -> SupportsAsyncCloseType: + self.entered = await self.aw + return self.entered + + async def __aexit__(self, *args: Any) -> None | bool: + await self.entered.close() + return None + + +@contextmanager +def collapse_excgroups() -> Generator[None, None, None]: + try: + yield + except BaseException as exc: + if has_exceptiongroups: # pragma: no cover + while isinstance(exc, BaseExceptionGroup) and len(exc.exceptions) == 1: + exc = exc.exceptions[0] + + raise exc + + +def get_route_path(scope: Scope) -> str: + path: str = scope["path"] + root_path = scope.get("root_path", "") + if not root_path: + return path + + if not path.startswith(root_path): + return path + + if path == root_path: + return "" + + if path[len(root_path)] == "/": + return path[len(root_path) :] + + return path diff --git a/venv/lib/python3.10/site-packages/starlette/applications.py b/venv/lib/python3.10/site-packages/starlette/applications.py new file mode 100644 index 0000000000000000000000000000000000000000..62c0eb165eef282b6603f6ef17839214a8c531d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/applications.py @@ -0,0 +1,250 @@ +from __future__ import annotations + +import sys +import warnings +from collections.abc import Awaitable, Mapping, Sequence +from typing import Any, Callable, TypeVar + +if sys.version_info >= (3, 10): # pragma: no cover + from typing import ParamSpec +else: # pragma: no cover + from typing_extensions import ParamSpec + +from starlette.datastructures import State, URLPath +from starlette.middleware import Middleware, _MiddlewareFactory +from starlette.middleware.base import BaseHTTPMiddleware +from starlette.middleware.errors import ServerErrorMiddleware +from starlette.middleware.exceptions import ExceptionMiddleware +from starlette.requests import Request +from starlette.responses import Response +from starlette.routing import BaseRoute, Router +from starlette.types import ASGIApp, ExceptionHandler, Lifespan, Receive, Scope, Send +from starlette.websockets import WebSocket + +AppType = TypeVar("AppType", bound="Starlette") +P = ParamSpec("P") + + +class Starlette: + """Creates an Starlette application.""" + + def __init__( + self: AppType, + debug: bool = False, + routes: Sequence[BaseRoute] | None = None, + middleware: Sequence[Middleware] | None = None, + exception_handlers: Mapping[Any, ExceptionHandler] | None = None, + on_startup: Sequence[Callable[[], Any]] | None = None, + on_shutdown: Sequence[Callable[[], Any]] | None = None, + lifespan: Lifespan[AppType] | None = None, + ) -> None: + """Initializes the application. + + Parameters: + debug: Boolean indicating if debug tracebacks should be returned on errors. + routes: A list of routes to serve incoming HTTP and WebSocket requests. + middleware: A list of middleware to run for every request. A starlette + application will always automatically include two middleware classes. + `ServerErrorMiddleware` is added as the very outermost middleware, to handle + any uncaught errors occurring anywhere in the entire stack. + `ExceptionMiddleware` is added as the very innermost middleware, to deal + with handled exception cases occurring in the routing or endpoints. + exception_handlers: A mapping of either integer status codes, + or exception class types onto callables which handle the exceptions. + Exception handler callables should be of the form + `handler(request, exc) -> response` and may be either standard functions, or + async functions. + on_startup: A list of callables to run on application startup. + Startup handler callables do not take any arguments, and may be either + standard functions, or async functions. + on_shutdown: A list of callables to run on application shutdown. + Shutdown handler callables do not take any arguments, and may be either + standard functions, or async functions. + lifespan: A lifespan context function, which can be used to perform + startup and shutdown tasks. This is a newer style that replaces the + `on_startup` and `on_shutdown` handlers. Use one or the other, not both. + """ + # The lifespan context function is a newer style that replaces + # on_startup / on_shutdown handlers. Use one or the other, not both. + assert lifespan is None or (on_startup is None and on_shutdown is None), ( + "Use either 'lifespan' or 'on_startup'/'on_shutdown', not both." + ) + + self.debug = debug + self.state = State() + self.router = Router(routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan) + self.exception_handlers = {} if exception_handlers is None else dict(exception_handlers) + self.user_middleware = [] if middleware is None else list(middleware) + self.middleware_stack: ASGIApp | None = None + + def build_middleware_stack(self) -> ASGIApp: + debug = self.debug + error_handler = None + exception_handlers: dict[Any, ExceptionHandler] = {} + + for key, value in self.exception_handlers.items(): + if key in (500, Exception): + error_handler = value + else: + exception_handlers[key] = value + + middleware = ( + [Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)] + + self.user_middleware + + [Middleware(ExceptionMiddleware, handlers=exception_handlers, debug=debug)] + ) + + app = self.router + for cls, args, kwargs in reversed(middleware): + app = cls(app, *args, **kwargs) + return app + + @property + def routes(self) -> list[BaseRoute]: + return self.router.routes + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + return self.router.url_path_for(name, **path_params) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + scope["app"] = self + if self.middleware_stack is None: + self.middleware_stack = self.build_middleware_stack() + await self.middleware_stack(scope, receive, send) + + def on_event(self, event_type: str) -> Callable: # type: ignore[type-arg] + return self.router.on_event(event_type) # pragma: no cover + + def mount(self, path: str, app: ASGIApp, name: str | None = None) -> None: + self.router.mount(path, app=app, name=name) # pragma: no cover + + def host(self, host: str, app: ASGIApp, name: str | None = None) -> None: + self.router.host(host, app=app, name=name) # pragma: no cover + + def add_middleware( + self, + middleware_class: _MiddlewareFactory[P], + *args: P.args, + **kwargs: P.kwargs, + ) -> None: + if self.middleware_stack is not None: # pragma: no cover + raise RuntimeError("Cannot add middleware after an application has started") + self.user_middleware.insert(0, Middleware(middleware_class, *args, **kwargs)) + + def add_exception_handler( + self, + exc_class_or_status_code: int | type[Exception], + handler: ExceptionHandler, + ) -> None: # pragma: no cover + self.exception_handlers[exc_class_or_status_code] = handler + + def add_event_handler( + self, + event_type: str, + func: Callable, # type: ignore[type-arg] + ) -> None: # pragma: no cover + self.router.add_event_handler(event_type, func) + + def add_route( + self, + path: str, + route: Callable[[Request], Awaitable[Response] | Response], + methods: list[str] | None = None, + name: str | None = None, + include_in_schema: bool = True, + ) -> None: # pragma: no cover + self.router.add_route(path, route, methods=methods, name=name, include_in_schema=include_in_schema) + + def add_websocket_route( + self, + path: str, + route: Callable[[WebSocket], Awaitable[None]], + name: str | None = None, + ) -> None: # pragma: no cover + self.router.add_websocket_route(path, route, name=name) + + def exception_handler(self, exc_class_or_status_code: int | type[Exception]) -> Callable: # type: ignore[type-arg] + warnings.warn( + "The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0. " + "Refer to https://www.starlette.io/exceptions/ for the recommended approach.", + DeprecationWarning, + ) + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.add_exception_handler(exc_class_or_status_code, func) + return func + + return decorator + + def route( + self, + path: str, + methods: list[str] | None = None, + name: str | None = None, + include_in_schema: bool = True, + ) -> Callable: # type: ignore[type-arg] + """ + We no longer document this decorator style API, and its usage is discouraged. + Instead you should use the following approach: + + >>> routes = [Route(path, endpoint=...), ...] + >>> app = Starlette(routes=routes) + """ + warnings.warn( + "The `route` decorator is deprecated, and will be removed in version 1.0.0. " + "Refer to https://www.starlette.io/routing/ for the recommended approach.", + DeprecationWarning, + ) + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.router.add_route( + path, + func, + methods=methods, + name=name, + include_in_schema=include_in_schema, + ) + return func + + return decorator + + def websocket_route(self, path: str, name: str | None = None) -> Callable: # type: ignore[type-arg] + """ + We no longer document this decorator style API, and its usage is discouraged. + Instead you should use the following approach: + + >>> routes = [WebSocketRoute(path, endpoint=...), ...] + >>> app = Starlette(routes=routes) + """ + warnings.warn( + "The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. " + "Refer to https://www.starlette.io/routing/#websocket-routing for the recommended approach.", + DeprecationWarning, + ) + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.router.add_websocket_route(path, func, name=name) + return func + + return decorator + + def middleware(self, middleware_type: str) -> Callable: # type: ignore[type-arg] + """ + We no longer document this decorator style API, and its usage is discouraged. + Instead you should use the following approach: + + >>> middleware = [Middleware(...), ...] + >>> app = Starlette(middleware=middleware) + """ + warnings.warn( + "The `middleware` decorator is deprecated, and will be removed in version 1.0.0. " + "Refer to https://www.starlette.io/middleware/#using-middleware for recommended approach.", + DeprecationWarning, + ) + assert middleware_type == "http", 'Currently only middleware("http") is supported.' + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.add_middleware(BaseHTTPMiddleware, dispatch=func) + return func + + return decorator diff --git a/venv/lib/python3.10/site-packages/starlette/authentication.py b/venv/lib/python3.10/site-packages/starlette/authentication.py new file mode 100644 index 0000000000000000000000000000000000000000..a7138949e256ad6defdbd3e2f637d0ba11affb76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/authentication.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +import functools +import inspect +import sys +from collections.abc import Sequence +from typing import Any, Callable +from urllib.parse import urlencode + +if sys.version_info >= (3, 10): # pragma: no cover + from typing import ParamSpec +else: # pragma: no cover + from typing_extensions import ParamSpec + +from starlette._utils import is_async_callable +from starlette.exceptions import HTTPException +from starlette.requests import HTTPConnection, Request +from starlette.responses import RedirectResponse +from starlette.websockets import WebSocket + +_P = ParamSpec("_P") + + +def has_required_scope(conn: HTTPConnection, scopes: Sequence[str]) -> bool: + for scope in scopes: + if scope not in conn.auth.scopes: + return False + return True + + +def requires( + scopes: str | Sequence[str], + status_code: int = 403, + redirect: str | None = None, +) -> Callable[[Callable[_P, Any]], Callable[_P, Any]]: + scopes_list = [scopes] if isinstance(scopes, str) else list(scopes) + + def decorator( + func: Callable[_P, Any], + ) -> Callable[_P, Any]: + sig = inspect.signature(func) + for idx, parameter in enumerate(sig.parameters.values()): + if parameter.name == "request" or parameter.name == "websocket": + type_ = parameter.name + break + else: + raise Exception(f'No "request" or "websocket" argument on function "{func}"') + + if type_ == "websocket": + # Handle websocket functions. (Always async) + @functools.wraps(func) + async def websocket_wrapper(*args: _P.args, **kwargs: _P.kwargs) -> None: + websocket = kwargs.get("websocket", args[idx] if idx < len(args) else None) + assert isinstance(websocket, WebSocket) + + if not has_required_scope(websocket, scopes_list): + await websocket.close() + else: + await func(*args, **kwargs) + + return websocket_wrapper + + elif is_async_callable(func): + # Handle async request/response functions. + @functools.wraps(func) + async def async_wrapper(*args: _P.args, **kwargs: _P.kwargs) -> Any: + request = kwargs.get("request", args[idx] if idx < len(args) else None) + assert isinstance(request, Request) + + if not has_required_scope(request, scopes_list): + if redirect is not None: + orig_request_qparam = urlencode({"next": str(request.url)}) + next_url = f"{request.url_for(redirect)}?{orig_request_qparam}" + return RedirectResponse(url=next_url, status_code=303) + raise HTTPException(status_code=status_code) + return await func(*args, **kwargs) + + return async_wrapper + + else: + # Handle sync request/response functions. + @functools.wraps(func) + def sync_wrapper(*args: _P.args, **kwargs: _P.kwargs) -> Any: + request = kwargs.get("request", args[idx] if idx < len(args) else None) + assert isinstance(request, Request) + + if not has_required_scope(request, scopes_list): + if redirect is not None: + orig_request_qparam = urlencode({"next": str(request.url)}) + next_url = f"{request.url_for(redirect)}?{orig_request_qparam}" + return RedirectResponse(url=next_url, status_code=303) + raise HTTPException(status_code=status_code) + return func(*args, **kwargs) + + return sync_wrapper + + return decorator + + +class AuthenticationError(Exception): + pass + + +class AuthenticationBackend: + async def authenticate(self, conn: HTTPConnection) -> tuple[AuthCredentials, BaseUser] | None: + raise NotImplementedError() # pragma: no cover + + +class AuthCredentials: + def __init__(self, scopes: Sequence[str] | None = None): + self.scopes = [] if scopes is None else list(scopes) + + +class BaseUser: + @property + def is_authenticated(self) -> bool: + raise NotImplementedError() # pragma: no cover + + @property + def display_name(self) -> str: + raise NotImplementedError() # pragma: no cover + + @property + def identity(self) -> str: + raise NotImplementedError() # pragma: no cover + + +class SimpleUser(BaseUser): + def __init__(self, username: str) -> None: + self.username = username + + @property + def is_authenticated(self) -> bool: + return True + + @property + def display_name(self) -> str: + return self.username + + +class UnauthenticatedUser(BaseUser): + @property + def is_authenticated(self) -> bool: + return False + + @property + def display_name(self) -> str: + return "" diff --git a/venv/lib/python3.10/site-packages/starlette/background.py b/venv/lib/python3.10/site-packages/starlette/background.py new file mode 100644 index 0000000000000000000000000000000000000000..8a4562cdc9f10dace61e5e6b20ff859ef00cdc8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/background.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import sys +from collections.abc import Sequence +from typing import Any, Callable + +if sys.version_info >= (3, 10): # pragma: no cover + from typing import ParamSpec +else: # pragma: no cover + from typing_extensions import ParamSpec + +from starlette._utils import is_async_callable +from starlette.concurrency import run_in_threadpool + +P = ParamSpec("P") + + +class BackgroundTask: + def __init__(self, func: Callable[P, Any], *args: P.args, **kwargs: P.kwargs) -> None: + self.func = func + self.args = args + self.kwargs = kwargs + self.is_async = is_async_callable(func) + + async def __call__(self) -> None: + if self.is_async: + await self.func(*self.args, **self.kwargs) + else: + await run_in_threadpool(self.func, *self.args, **self.kwargs) + + +class BackgroundTasks(BackgroundTask): + def __init__(self, tasks: Sequence[BackgroundTask] | None = None): + self.tasks = list(tasks) if tasks else [] + + def add_task(self, func: Callable[P, Any], *args: P.args, **kwargs: P.kwargs) -> None: + task = BackgroundTask(func, *args, **kwargs) + self.tasks.append(task) + + async def __call__(self) -> None: + for task in self.tasks: + await task() diff --git a/venv/lib/python3.10/site-packages/starlette/concurrency.py b/venv/lib/python3.10/site-packages/starlette/concurrency.py new file mode 100644 index 0000000000000000000000000000000000000000..7986475387964445ecf157b4edae394aa8669325 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/concurrency.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import functools +import sys +import warnings +from collections.abc import AsyncIterator, Coroutine, Iterable, Iterator +from typing import Callable, TypeVar + +import anyio.to_thread + +if sys.version_info >= (3, 10): # pragma: no cover + from typing import ParamSpec +else: # pragma: no cover + from typing_extensions import ParamSpec + +P = ParamSpec("P") +T = TypeVar("T") + + +async def run_until_first_complete(*args: tuple[Callable, dict]) -> None: # type: ignore[type-arg] + warnings.warn( + "run_until_first_complete is deprecated and will be removed in a future version.", + DeprecationWarning, + ) + + async with anyio.create_task_group() as task_group: + + async def run(func: Callable[[], Coroutine]) -> None: # type: ignore[type-arg] + await func() + task_group.cancel_scope.cancel() + + for func, kwargs in args: + task_group.start_soon(run, functools.partial(func, **kwargs)) + + +async def run_in_threadpool(func: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T: + func = functools.partial(func, *args, **kwargs) + return await anyio.to_thread.run_sync(func) + + +class _StopIteration(Exception): + pass + + +def _next(iterator: Iterator[T]) -> T: + # We can't raise `StopIteration` from within the threadpool iterator + # and catch it outside that context, so we coerce them into a different + # exception type. + try: + return next(iterator) + except StopIteration: + raise _StopIteration + + +async def iterate_in_threadpool( + iterator: Iterable[T], +) -> AsyncIterator[T]: + as_iterator = iter(iterator) + while True: + try: + yield await anyio.to_thread.run_sync(_next, as_iterator) + except _StopIteration: + break diff --git a/venv/lib/python3.10/site-packages/starlette/config.py b/venv/lib/python3.10/site-packages/starlette/config.py new file mode 100644 index 0000000000000000000000000000000000000000..091f857f3d98eb8acbe61d7f85d3a3f37f0b026c --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/config.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +import os +import warnings +from collections.abc import Iterator, Mapping, MutableMapping +from pathlib import Path +from typing import Any, Callable, TypeVar, overload + + +class undefined: + pass + + +class EnvironError(Exception): + pass + + +class Environ(MutableMapping[str, str]): + def __init__(self, environ: MutableMapping[str, str] = os.environ): + self._environ = environ + self._has_been_read: set[str] = set() + + def __getitem__(self, key: str) -> str: + self._has_been_read.add(key) + return self._environ.__getitem__(key) + + def __setitem__(self, key: str, value: str) -> None: + if key in self._has_been_read: + raise EnvironError(f"Attempting to set environ['{key}'], but the value has already been read.") + self._environ.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + if key in self._has_been_read: + raise EnvironError(f"Attempting to delete environ['{key}'], but the value has already been read.") + self._environ.__delitem__(key) + + def __iter__(self) -> Iterator[str]: + return iter(self._environ) + + def __len__(self) -> int: + return len(self._environ) + + +environ = Environ() + +T = TypeVar("T") + + +class Config: + def __init__( + self, + env_file: str | Path | None = None, + environ: Mapping[str, str] = environ, + env_prefix: str = "", + ) -> None: + self.environ = environ + self.env_prefix = env_prefix + self.file_values: dict[str, str] = {} + if env_file is not None: + if not os.path.isfile(env_file): + warnings.warn(f"Config file '{env_file}' not found.") + else: + self.file_values = self._read_file(env_file) + + @overload + def __call__(self, key: str, *, default: None) -> str | None: ... + + @overload + def __call__(self, key: str, cast: type[T], default: T = ...) -> T: ... + + @overload + def __call__(self, key: str, cast: type[str] = ..., default: str = ...) -> str: ... + + @overload + def __call__( + self, + key: str, + cast: Callable[[Any], T] = ..., + default: Any = ..., + ) -> T: ... + + @overload + def __call__(self, key: str, cast: type[str] = ..., default: T = ...) -> T | str: ... + + def __call__( + self, + key: str, + cast: Callable[[Any], Any] | None = None, + default: Any = undefined, + ) -> Any: + return self.get(key, cast, default) + + def get( + self, + key: str, + cast: Callable[[Any], Any] | None = None, + default: Any = undefined, + ) -> Any: + key = self.env_prefix + key + if key in self.environ: + value = self.environ[key] + return self._perform_cast(key, value, cast) + if key in self.file_values: + value = self.file_values[key] + return self._perform_cast(key, value, cast) + if default is not undefined: + return self._perform_cast(key, default, cast) + raise KeyError(f"Config '{key}' is missing, and has no default.") + + def _read_file(self, file_name: str | Path) -> dict[str, str]: + file_values: dict[str, str] = {} + with open(file_name) as input_file: + for line in input_file.readlines(): + line = line.strip() + if "=" in line and not line.startswith("#"): + key, value = line.split("=", 1) + key = key.strip() + value = value.strip().strip("\"'") + file_values[key] = value + return file_values + + def _perform_cast( + self, + key: str, + value: Any, + cast: Callable[[Any], Any] | None = None, + ) -> Any: + if cast is None or value is None: + return value + elif cast is bool and isinstance(value, str): + mapping = {"true": True, "1": True, "false": False, "0": False} + value = value.lower() + if value not in mapping: + raise ValueError(f"Config '{key}' has value '{value}'. Not a valid bool.") + return mapping[value] + try: + return cast(value) + except (TypeError, ValueError): + raise ValueError(f"Config '{key}' has value '{value}'. Not a valid {cast.__name__}.") diff --git a/venv/lib/python3.10/site-packages/starlette/convertors.py b/venv/lib/python3.10/site-packages/starlette/convertors.py new file mode 100644 index 0000000000000000000000000000000000000000..72b1cf9fdfb6d7f5be61f648a9ae016deb45012b --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/convertors.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import math +import uuid +from typing import Any, ClassVar, Generic, TypeVar + +T = TypeVar("T") + + +class Convertor(Generic[T]): + regex: ClassVar[str] = "" + + def convert(self, value: str) -> T: + raise NotImplementedError() # pragma: no cover + + def to_string(self, value: T) -> str: + raise NotImplementedError() # pragma: no cover + + +class StringConvertor(Convertor[str]): + regex = "[^/]+" + + def convert(self, value: str) -> str: + return value + + def to_string(self, value: str) -> str: + value = str(value) + assert "/" not in value, "May not contain path separators" + assert value, "Must not be empty" + return value + + +class PathConvertor(Convertor[str]): + regex = ".*" + + def convert(self, value: str) -> str: + return str(value) + + def to_string(self, value: str) -> str: + return str(value) + + +class IntegerConvertor(Convertor[int]): + regex = "[0-9]+" + + def convert(self, value: str) -> int: + return int(value) + + def to_string(self, value: int) -> str: + value = int(value) + assert value >= 0, "Negative integers are not supported" + return str(value) + + +class FloatConvertor(Convertor[float]): + regex = r"[0-9]+(\.[0-9]+)?" + + def convert(self, value: str) -> float: + return float(value) + + def to_string(self, value: float) -> str: + value = float(value) + assert value >= 0.0, "Negative floats are not supported" + assert not math.isnan(value), "NaN values are not supported" + assert not math.isinf(value), "Infinite values are not supported" + return ("%0.20f" % value).rstrip("0").rstrip(".") + + +class UUIDConvertor(Convertor[uuid.UUID]): + regex = "[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}" + + def convert(self, value: str) -> uuid.UUID: + return uuid.UUID(value) + + def to_string(self, value: uuid.UUID) -> str: + return str(value) + + +CONVERTOR_TYPES: dict[str, Convertor[Any]] = { + "str": StringConvertor(), + "path": PathConvertor(), + "int": IntegerConvertor(), + "float": FloatConvertor(), + "uuid": UUIDConvertor(), +} + + +def register_url_convertor(key: str, convertor: Convertor[Any]) -> None: + CONVERTOR_TYPES[key] = convertor diff --git a/venv/lib/python3.10/site-packages/starlette/datastructures.py b/venv/lib/python3.10/site-packages/starlette/datastructures.py new file mode 100644 index 0000000000000000000000000000000000000000..38eabec52743cb04dde53b17fdc19fe074a12841 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/datastructures.py @@ -0,0 +1,692 @@ +from __future__ import annotations + +from collections.abc import ItemsView, Iterable, Iterator, KeysView, Mapping, MutableMapping, Sequence, ValuesView +from shlex import shlex +from typing import ( + Any, + BinaryIO, + NamedTuple, + TypeVar, + Union, + cast, +) +from urllib.parse import SplitResult, parse_qsl, urlencode, urlsplit + +from starlette.concurrency import run_in_threadpool +from starlette.types import Scope + + +class Address(NamedTuple): + host: str + port: int + + +_KeyType = TypeVar("_KeyType") +# Mapping keys are invariant but their values are covariant since +# you can only read them +# that is, you can't do `Mapping[str, Animal]()["fido"] = Dog()` +_CovariantValueType = TypeVar("_CovariantValueType", covariant=True) + + +class URL: + def __init__( + self, + url: str = "", + scope: Scope | None = None, + **components: Any, + ) -> None: + if scope is not None: + assert not url, 'Cannot set both "url" and "scope".' + assert not components, 'Cannot set both "scope" and "**components".' + scheme = scope.get("scheme", "http") + server = scope.get("server", None) + path = scope["path"] + query_string = scope.get("query_string", b"") + + host_header = None + for key, value in scope["headers"]: + if key == b"host": + host_header = value.decode("latin-1") + break + + if host_header is not None: + url = f"{scheme}://{host_header}{path}" + elif server is None: + url = path + else: + host, port = server + default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme] + if port == default_port: + url = f"{scheme}://{host}{path}" + else: + url = f"{scheme}://{host}:{port}{path}" + + if query_string: + url += "?" + query_string.decode() + elif components: + assert not url, 'Cannot set both "url" and "**components".' + url = URL("").replace(**components).components.geturl() + + self._url = url + + @property + def components(self) -> SplitResult: + if not hasattr(self, "_components"): + self._components = urlsplit(self._url) + return self._components + + @property + def scheme(self) -> str: + return self.components.scheme + + @property + def netloc(self) -> str: + return self.components.netloc + + @property + def path(self) -> str: + return self.components.path + + @property + def query(self) -> str: + return self.components.query + + @property + def fragment(self) -> str: + return self.components.fragment + + @property + def username(self) -> None | str: + return self.components.username + + @property + def password(self) -> None | str: + return self.components.password + + @property + def hostname(self) -> None | str: + return self.components.hostname + + @property + def port(self) -> int | None: + return self.components.port + + @property + def is_secure(self) -> bool: + return self.scheme in ("https", "wss") + + def replace(self, **kwargs: Any) -> URL: + if "username" in kwargs or "password" in kwargs or "hostname" in kwargs or "port" in kwargs: + hostname = kwargs.pop("hostname", None) + port = kwargs.pop("port", self.port) + username = kwargs.pop("username", self.username) + password = kwargs.pop("password", self.password) + + if hostname is None: + netloc = self.netloc + _, _, hostname = netloc.rpartition("@") + + if hostname[-1] != "]": + hostname = hostname.rsplit(":", 1)[0] + + netloc = hostname + if port is not None: + netloc += f":{port}" + if username is not None: + userpass = username + if password is not None: + userpass += f":{password}" + netloc = f"{userpass}@{netloc}" + + kwargs["netloc"] = netloc + + components = self.components._replace(**kwargs) + return self.__class__(components.geturl()) + + def include_query_params(self, **kwargs: Any) -> URL: + params = MultiDict(parse_qsl(self.query, keep_blank_values=True)) + params.update({str(key): str(value) for key, value in kwargs.items()}) + query = urlencode(params.multi_items()) + return self.replace(query=query) + + def replace_query_params(self, **kwargs: Any) -> URL: + query = urlencode([(str(key), str(value)) for key, value in kwargs.items()]) + return self.replace(query=query) + + def remove_query_params(self, keys: str | Sequence[str]) -> URL: + if isinstance(keys, str): + keys = [keys] + params = MultiDict(parse_qsl(self.query, keep_blank_values=True)) + for key in keys: + params.pop(key, None) + query = urlencode(params.multi_items()) + return self.replace(query=query) + + def __eq__(self, other: Any) -> bool: + return str(self) == str(other) + + def __str__(self) -> str: + return self._url + + def __repr__(self) -> str: + url = str(self) + if self.password: + url = str(self.replace(password="********")) + return f"{self.__class__.__name__}({repr(url)})" + + +class URLPath(str): + """ + A URL path string that may also hold an associated protocol and/or host. + Used by the routing to return `url_path_for` matches. + """ + + def __new__(cls, path: str, protocol: str = "", host: str = "") -> URLPath: + assert protocol in ("http", "websocket", "") + return str.__new__(cls, path) + + def __init__(self, path: str, protocol: str = "", host: str = "") -> None: + self.protocol = protocol + self.host = host + + def make_absolute_url(self, base_url: str | URL) -> URL: + if isinstance(base_url, str): + base_url = URL(base_url) + if self.protocol: + scheme = { + "http": {True: "https", False: "http"}, + "websocket": {True: "wss", False: "ws"}, + }[self.protocol][base_url.is_secure] + else: + scheme = base_url.scheme + + netloc = self.host or base_url.netloc + path = base_url.path.rstrip("/") + str(self) + return URL(scheme=scheme, netloc=netloc, path=path) + + +class Secret: + """ + Holds a string value that should not be revealed in tracebacks etc. + You should cast the value to `str` at the point it is required. + """ + + def __init__(self, value: str): + self._value = value + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + return f"{class_name}('**********')" + + def __str__(self) -> str: + return self._value + + def __bool__(self) -> bool: + return bool(self._value) + + +class CommaSeparatedStrings(Sequence[str]): + def __init__(self, value: str | Sequence[str]): + if isinstance(value, str): + splitter = shlex(value, posix=True) + splitter.whitespace = "," + splitter.whitespace_split = True + self._items = [item.strip() for item in splitter] + else: + self._items = list(value) + + def __len__(self) -> int: + return len(self._items) + + def __getitem__(self, index: int | slice) -> Any: + return self._items[index] + + def __iter__(self) -> Iterator[str]: + return iter(self._items) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + items = [item for item in self] + return f"{class_name}({items!r})" + + def __str__(self) -> str: + return ", ".join(repr(item) for item in self) + + +class ImmutableMultiDict(Mapping[_KeyType, _CovariantValueType]): + _dict: dict[_KeyType, _CovariantValueType] + + def __init__( + self, + *args: ImmutableMultiDict[_KeyType, _CovariantValueType] + | Mapping[_KeyType, _CovariantValueType] + | Iterable[tuple[_KeyType, _CovariantValueType]], + **kwargs: Any, + ) -> None: + assert len(args) < 2, "Too many arguments." + + value: Any = args[0] if args else [] + if kwargs: + value = ImmutableMultiDict(value).multi_items() + ImmutableMultiDict(kwargs).multi_items() + + if not value: + _items: list[tuple[Any, Any]] = [] + elif hasattr(value, "multi_items"): + value = cast(ImmutableMultiDict[_KeyType, _CovariantValueType], value) + _items = list(value.multi_items()) + elif hasattr(value, "items"): + value = cast(Mapping[_KeyType, _CovariantValueType], value) + _items = list(value.items()) + else: + value = cast("list[tuple[Any, Any]]", value) + _items = list(value) + + self._dict = {k: v for k, v in _items} + self._list = _items + + def getlist(self, key: Any) -> list[_CovariantValueType]: + return [item_value for item_key, item_value in self._list if item_key == key] + + def keys(self) -> KeysView[_KeyType]: + return self._dict.keys() + + def values(self) -> ValuesView[_CovariantValueType]: + return self._dict.values() + + def items(self) -> ItemsView[_KeyType, _CovariantValueType]: + return self._dict.items() + + def multi_items(self) -> list[tuple[_KeyType, _CovariantValueType]]: + return list(self._list) + + def __getitem__(self, key: _KeyType) -> _CovariantValueType: + return self._dict[key] + + def __contains__(self, key: Any) -> bool: + return key in self._dict + + def __iter__(self) -> Iterator[_KeyType]: + return iter(self.keys()) + + def __len__(self) -> int: + return len(self._dict) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, self.__class__): + return False + return sorted(self._list) == sorted(other._list) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + items = self.multi_items() + return f"{class_name}({items!r})" + + +class MultiDict(ImmutableMultiDict[Any, Any]): + def __setitem__(self, key: Any, value: Any) -> None: + self.setlist(key, [value]) + + def __delitem__(self, key: Any) -> None: + self._list = [(k, v) for k, v in self._list if k != key] + del self._dict[key] + + def pop(self, key: Any, default: Any = None) -> Any: + self._list = [(k, v) for k, v in self._list if k != key] + return self._dict.pop(key, default) + + def popitem(self) -> tuple[Any, Any]: + key, value = self._dict.popitem() + self._list = [(k, v) for k, v in self._list if k != key] + return key, value + + def poplist(self, key: Any) -> list[Any]: + values = [v for k, v in self._list if k == key] + self.pop(key) + return values + + def clear(self) -> None: + self._dict.clear() + self._list.clear() + + def setdefault(self, key: Any, default: Any = None) -> Any: + if key not in self: + self._dict[key] = default + self._list.append((key, default)) + + return self[key] + + def setlist(self, key: Any, values: list[Any]) -> None: + if not values: + self.pop(key, None) + else: + existing_items = [(k, v) for (k, v) in self._list if k != key] + self._list = existing_items + [(key, value) for value in values] + self._dict[key] = values[-1] + + def append(self, key: Any, value: Any) -> None: + self._list.append((key, value)) + self._dict[key] = value + + def update( + self, + *args: MultiDict | Mapping[Any, Any] | list[tuple[Any, Any]], + **kwargs: Any, + ) -> None: + value = MultiDict(*args, **kwargs) + existing_items = [(k, v) for (k, v) in self._list if k not in value.keys()] + self._list = existing_items + value.multi_items() + self._dict.update(value) + + +class QueryParams(ImmutableMultiDict[str, str]): + """ + An immutable multidict. + """ + + def __init__( + self, + *args: ImmutableMultiDict[Any, Any] | Mapping[Any, Any] | list[tuple[Any, Any]] | str | bytes, + **kwargs: Any, + ) -> None: + assert len(args) < 2, "Too many arguments." + + value = args[0] if args else [] + + if isinstance(value, str): + super().__init__(parse_qsl(value, keep_blank_values=True), **kwargs) + elif isinstance(value, bytes): + super().__init__(parse_qsl(value.decode("latin-1"), keep_blank_values=True), **kwargs) + else: + super().__init__(*args, **kwargs) # type: ignore[arg-type] + self._list = [(str(k), str(v)) for k, v in self._list] + self._dict = {str(k): str(v) for k, v in self._dict.items()} + + def __str__(self) -> str: + return urlencode(self._list) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + query_string = str(self) + return f"{class_name}({query_string!r})" + + +class UploadFile: + """ + An uploaded file included as part of the request data. + """ + + def __init__( + self, + file: BinaryIO, + *, + size: int | None = None, + filename: str | None = None, + headers: Headers | None = None, + ) -> None: + self.filename = filename + self.file = file + self.size = size + self.headers = headers or Headers() + + # Capture max size from SpooledTemporaryFile if one is provided. This slightly speeds up future checks. + # Note 0 means unlimited mirroring SpooledTemporaryFile's __init__ + self._max_mem_size = getattr(self.file, "_max_size", 0) + + @property + def content_type(self) -> str | None: + return self.headers.get("content-type", None) + + @property + def _in_memory(self) -> bool: + # check for SpooledTemporaryFile._rolled + rolled_to_disk = getattr(self.file, "_rolled", True) + return not rolled_to_disk + + def _will_roll(self, size_to_add: int) -> bool: + # If we're not in_memory then we will always roll + if not self._in_memory: + return True + + # Check for SpooledTemporaryFile._max_size + future_size = self.file.tell() + size_to_add + return bool(future_size > self._max_mem_size) if self._max_mem_size else False + + async def write(self, data: bytes) -> None: + new_data_len = len(data) + if self.size is not None: + self.size += new_data_len + + if self._will_roll(new_data_len): + await run_in_threadpool(self.file.write, data) + else: + self.file.write(data) + + async def read(self, size: int = -1) -> bytes: + if self._in_memory: + return self.file.read(size) + return await run_in_threadpool(self.file.read, size) + + async def seek(self, offset: int) -> None: + if self._in_memory: + self.file.seek(offset) + else: + await run_in_threadpool(self.file.seek, offset) + + async def close(self) -> None: + if self._in_memory: + self.file.close() + else: + await run_in_threadpool(self.file.close) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(filename={self.filename!r}, size={self.size!r}, headers={self.headers!r})" + + +class FormData(ImmutableMultiDict[str, Union[UploadFile, str]]): + """ + An immutable multidict, containing both file uploads and text input. + """ + + def __init__( + self, + *args: FormData | Mapping[str, str | UploadFile] | list[tuple[str, str | UploadFile]], + **kwargs: str | UploadFile, + ) -> None: + super().__init__(*args, **kwargs) + + async def close(self) -> None: + for key, value in self.multi_items(): + if isinstance(value, UploadFile): + await value.close() + + +class Headers(Mapping[str, str]): + """ + An immutable, case-insensitive multidict. + """ + + def __init__( + self, + headers: Mapping[str, str] | None = None, + raw: list[tuple[bytes, bytes]] | None = None, + scope: MutableMapping[str, Any] | None = None, + ) -> None: + self._list: list[tuple[bytes, bytes]] = [] + if headers is not None: + assert raw is None, 'Cannot set both "headers" and "raw".' + assert scope is None, 'Cannot set both "headers" and "scope".' + self._list = [(key.lower().encode("latin-1"), value.encode("latin-1")) for key, value in headers.items()] + elif raw is not None: + assert scope is None, 'Cannot set both "raw" and "scope".' + self._list = raw + elif scope is not None: + # scope["headers"] isn't necessarily a list + # it might be a tuple or other iterable + self._list = scope["headers"] = list(scope["headers"]) + + @property + def raw(self) -> list[tuple[bytes, bytes]]: + return list(self._list) + + def keys(self) -> list[str]: # type: ignore[override] + return [key.decode("latin-1") for key, value in self._list] + + def values(self) -> list[str]: # type: ignore[override] + return [value.decode("latin-1") for key, value in self._list] + + def items(self) -> list[tuple[str, str]]: # type: ignore[override] + return [(key.decode("latin-1"), value.decode("latin-1")) for key, value in self._list] + + def getlist(self, key: str) -> list[str]: + get_header_key = key.lower().encode("latin-1") + return [item_value.decode("latin-1") for item_key, item_value in self._list if item_key == get_header_key] + + def mutablecopy(self) -> MutableHeaders: + return MutableHeaders(raw=self._list[:]) + + def __getitem__(self, key: str) -> str: + get_header_key = key.lower().encode("latin-1") + for header_key, header_value in self._list: + if header_key == get_header_key: + return header_value.decode("latin-1") + raise KeyError(key) + + def __contains__(self, key: Any) -> bool: + get_header_key = key.lower().encode("latin-1") + for header_key, header_value in self._list: + if header_key == get_header_key: + return True + return False + + def __iter__(self) -> Iterator[Any]: + return iter(self.keys()) + + def __len__(self) -> int: + return len(self._list) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Headers): + return False + return sorted(self._list) == sorted(other._list) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + as_dict = dict(self.items()) + if len(as_dict) == len(self): + return f"{class_name}({as_dict!r})" + return f"{class_name}(raw={self.raw!r})" + + +class MutableHeaders(Headers): + def __setitem__(self, key: str, value: str) -> None: + """ + Set the header `key` to `value`, removing any duplicate entries. + Retains insertion order. + """ + set_key = key.lower().encode("latin-1") + set_value = value.encode("latin-1") + + found_indexes: list[int] = [] + for idx, (item_key, item_value) in enumerate(self._list): + if item_key == set_key: + found_indexes.append(idx) + + for idx in reversed(found_indexes[1:]): + del self._list[idx] + + if found_indexes: + idx = found_indexes[0] + self._list[idx] = (set_key, set_value) + else: + self._list.append((set_key, set_value)) + + def __delitem__(self, key: str) -> None: + """ + Remove the header `key`. + """ + del_key = key.lower().encode("latin-1") + + pop_indexes: list[int] = [] + for idx, (item_key, item_value) in enumerate(self._list): + if item_key == del_key: + pop_indexes.append(idx) + + for idx in reversed(pop_indexes): + del self._list[idx] + + def __ior__(self, other: Mapping[str, str]) -> MutableHeaders: + if not isinstance(other, Mapping): + raise TypeError(f"Expected a mapping but got {other.__class__.__name__}") + self.update(other) + return self + + def __or__(self, other: Mapping[str, str]) -> MutableHeaders: + if not isinstance(other, Mapping): + raise TypeError(f"Expected a mapping but got {other.__class__.__name__}") + new = self.mutablecopy() + new.update(other) + return new + + @property + def raw(self) -> list[tuple[bytes, bytes]]: + return self._list + + def setdefault(self, key: str, value: str) -> str: + """ + If the header `key` does not exist, then set it to `value`. + Returns the header value. + """ + set_key = key.lower().encode("latin-1") + set_value = value.encode("latin-1") + + for idx, (item_key, item_value) in enumerate(self._list): + if item_key == set_key: + return item_value.decode("latin-1") + self._list.append((set_key, set_value)) + return value + + def update(self, other: Mapping[str, str]) -> None: + for key, val in other.items(): + self[key] = val + + def append(self, key: str, value: str) -> None: + """ + Append a header, preserving any duplicate entries. + """ + append_key = key.lower().encode("latin-1") + append_value = value.encode("latin-1") + self._list.append((append_key, append_value)) + + def add_vary_header(self, vary: str) -> None: + existing = self.get("vary") + if existing is not None: + vary = ", ".join([existing, vary]) + self["vary"] = vary + + +class State: + """ + An object that can be used to store arbitrary state. + + Used for `request.state` and `app.state`. + """ + + _state: dict[str, Any] + + def __init__(self, state: dict[str, Any] | None = None): + if state is None: + state = {} + super().__setattr__("_state", state) + + def __setattr__(self, key: Any, value: Any) -> None: + self._state[key] = value + + def __getattr__(self, key: Any) -> Any: + try: + return self._state[key] + except KeyError: + message = "'{}' object has no attribute '{}'" + raise AttributeError(message.format(self.__class__.__name__, key)) + + def __delattr__(self, key: Any) -> None: + del self._state[key] diff --git a/venv/lib/python3.10/site-packages/starlette/endpoints.py b/venv/lib/python3.10/site-packages/starlette/endpoints.py new file mode 100644 index 0000000000000000000000000000000000000000..2cdbeb116e2d21cf84a7ba1d658f710e63a577b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/endpoints.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import json +from collections.abc import Generator +from typing import Any, Callable + +from starlette import status +from starlette._utils import is_async_callable +from starlette.concurrency import run_in_threadpool +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import PlainTextResponse, Response +from starlette.types import Message, Receive, Scope, Send +from starlette.websockets import WebSocket + + +class HTTPEndpoint: + def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: + assert scope["type"] == "http" + self.scope = scope + self.receive = receive + self.send = send + self._allowed_methods = [ + method + for method in ("GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS") + if getattr(self, method.lower(), None) is not None + ] + + def __await__(self) -> Generator[Any, None, None]: + return self.dispatch().__await__() + + async def dispatch(self) -> None: + request = Request(self.scope, receive=self.receive) + handler_name = "get" if request.method == "HEAD" and not hasattr(self, "head") else request.method.lower() + + handler: Callable[[Request], Any] = getattr(self, handler_name, self.method_not_allowed) + is_async = is_async_callable(handler) + if is_async: + response = await handler(request) + else: + response = await run_in_threadpool(handler, request) + await response(self.scope, self.receive, self.send) + + async def method_not_allowed(self, request: Request) -> Response: + # If we're running inside a starlette application then raise an + # exception, so that the configurable exception handler can deal with + # returning the response. For plain ASGI apps, just return the response. + headers = {"Allow": ", ".join(self._allowed_methods)} + if "app" in self.scope: + raise HTTPException(status_code=405, headers=headers) + return PlainTextResponse("Method Not Allowed", status_code=405, headers=headers) + + +class WebSocketEndpoint: + encoding: str | None = None # May be "text", "bytes", or "json". + + def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: + assert scope["type"] == "websocket" + self.scope = scope + self.receive = receive + self.send = send + + def __await__(self) -> Generator[Any, None, None]: + return self.dispatch().__await__() + + async def dispatch(self) -> None: + websocket = WebSocket(self.scope, receive=self.receive, send=self.send) + await self.on_connect(websocket) + + close_code = status.WS_1000_NORMAL_CLOSURE + + try: + while True: + message = await websocket.receive() + if message["type"] == "websocket.receive": + data = await self.decode(websocket, message) + await self.on_receive(websocket, data) + elif message["type"] == "websocket.disconnect": # pragma: no branch + close_code = int(message.get("code") or status.WS_1000_NORMAL_CLOSURE) + break + except Exception as exc: + close_code = status.WS_1011_INTERNAL_ERROR + raise exc + finally: + await self.on_disconnect(websocket, close_code) + + async def decode(self, websocket: WebSocket, message: Message) -> Any: + if self.encoding == "text": + if "text" not in message: + await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) + raise RuntimeError("Expected text websocket messages, but got bytes") + return message["text"] + + elif self.encoding == "bytes": + if "bytes" not in message: + await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) + raise RuntimeError("Expected bytes websocket messages, but got text") + return message["bytes"] + + elif self.encoding == "json": + if message.get("text") is not None: + text = message["text"] + else: + text = message["bytes"].decode("utf-8") + + try: + return json.loads(text) + except json.decoder.JSONDecodeError: + await websocket.close(code=status.WS_1003_UNSUPPORTED_DATA) + raise RuntimeError("Malformed JSON data received.") + + assert self.encoding is None, f"Unsupported 'encoding' attribute {self.encoding}" + return message["text"] if message.get("text") else message["bytes"] + + async def on_connect(self, websocket: WebSocket) -> None: + """Override to handle an incoming websocket connection""" + await websocket.accept() + + async def on_receive(self, websocket: WebSocket, data: Any) -> None: + """Override to handle an incoming websocket message""" + + async def on_disconnect(self, websocket: WebSocket, close_code: int) -> None: + """Override to handle a disconnecting websocket""" diff --git a/venv/lib/python3.10/site-packages/starlette/exceptions.py b/venv/lib/python3.10/site-packages/starlette/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad3527b94ce034bb3967b3bf0969403118d4284 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/exceptions.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import http +from collections.abc import Mapping + + +class HTTPException(Exception): + def __init__(self, status_code: int, detail: str | None = None, headers: Mapping[str, str] | None = None) -> None: + if detail is None: + detail = http.HTTPStatus(status_code).phrase + self.status_code = status_code + self.detail = detail + self.headers = headers + + def __str__(self) -> str: + return f"{self.status_code}: {self.detail}" + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + return f"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})" + + +class WebSocketException(Exception): + def __init__(self, code: int, reason: str | None = None) -> None: + self.code = code + self.reason = reason or "" + + def __str__(self) -> str: + return f"{self.code}: {self.reason}" + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + return f"{class_name}(code={self.code!r}, reason={self.reason!r})" diff --git a/venv/lib/python3.10/site-packages/starlette/formparsers.py b/venv/lib/python3.10/site-packages/starlette/formparsers.py new file mode 100644 index 0000000000000000000000000000000000000000..8e389dec7f380c72fc0d9ee7d986f7b4522160ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/formparsers.py @@ -0,0 +1,276 @@ +from __future__ import annotations + +from collections.abc import AsyncGenerator +from dataclasses import dataclass, field +from enum import Enum +from tempfile import SpooledTemporaryFile +from typing import TYPE_CHECKING +from urllib.parse import unquote_plus + +from starlette.datastructures import FormData, Headers, UploadFile + +if TYPE_CHECKING: + import python_multipart as multipart + from python_multipart.multipart import MultipartCallbacks, QuerystringCallbacks, parse_options_header +else: + try: + try: + import python_multipart as multipart + from python_multipart.multipart import parse_options_header + except ModuleNotFoundError: # pragma: no cover + import multipart + from multipart.multipart import parse_options_header + except ModuleNotFoundError: # pragma: no cover + multipart = None + parse_options_header = None + + +class FormMessage(Enum): + FIELD_START = 1 + FIELD_NAME = 2 + FIELD_DATA = 3 + FIELD_END = 4 + END = 5 + + +@dataclass +class MultipartPart: + content_disposition: bytes | None = None + field_name: str = "" + data: bytearray = field(default_factory=bytearray) + file: UploadFile | None = None + item_headers: list[tuple[bytes, bytes]] = field(default_factory=list) + + +def _user_safe_decode(src: bytes | bytearray, codec: str) -> str: + try: + return src.decode(codec) + except (UnicodeDecodeError, LookupError): + return src.decode("latin-1") + + +class MultiPartException(Exception): + def __init__(self, message: str) -> None: + self.message = message + + +class FormParser: + def __init__(self, headers: Headers, stream: AsyncGenerator[bytes, None]) -> None: + assert multipart is not None, "The `python-multipart` library must be installed to use form parsing." + self.headers = headers + self.stream = stream + self.messages: list[tuple[FormMessage, bytes]] = [] + + def on_field_start(self) -> None: + message = (FormMessage.FIELD_START, b"") + self.messages.append(message) + + def on_field_name(self, data: bytes, start: int, end: int) -> None: + message = (FormMessage.FIELD_NAME, data[start:end]) + self.messages.append(message) + + def on_field_data(self, data: bytes, start: int, end: int) -> None: + message = (FormMessage.FIELD_DATA, data[start:end]) + self.messages.append(message) + + def on_field_end(self) -> None: + message = (FormMessage.FIELD_END, b"") + self.messages.append(message) + + def on_end(self) -> None: + message = (FormMessage.END, b"") + self.messages.append(message) + + async def parse(self) -> FormData: + # Callbacks dictionary. + callbacks: QuerystringCallbacks = { + "on_field_start": self.on_field_start, + "on_field_name": self.on_field_name, + "on_field_data": self.on_field_data, + "on_field_end": self.on_field_end, + "on_end": self.on_end, + } + + # Create the parser. + parser = multipart.QuerystringParser(callbacks) + field_name = b"" + field_value = b"" + + items: list[tuple[str, str | UploadFile]] = [] + + # Feed the parser with data from the request. + async for chunk in self.stream: + if chunk: + parser.write(chunk) + else: + parser.finalize() + messages = list(self.messages) + self.messages.clear() + for message_type, message_bytes in messages: + if message_type == FormMessage.FIELD_START: + field_name = b"" + field_value = b"" + elif message_type == FormMessage.FIELD_NAME: + field_name += message_bytes + elif message_type == FormMessage.FIELD_DATA: + field_value += message_bytes + elif message_type == FormMessage.FIELD_END: + name = unquote_plus(field_name.decode("latin-1")) + value = unquote_plus(field_value.decode("latin-1")) + items.append((name, value)) + + return FormData(items) + + +class MultiPartParser: + spool_max_size = 1024 * 1024 # 1MB + """The maximum size of the spooled temporary file used to store file data.""" + max_part_size = 1024 * 1024 # 1MB + """The maximum size of a part in the multipart request.""" + + def __init__( + self, + headers: Headers, + stream: AsyncGenerator[bytes, None], + *, + max_files: int | float = 1000, + max_fields: int | float = 1000, + max_part_size: int = 1024 * 1024, # 1MB + ) -> None: + assert multipart is not None, "The `python-multipart` library must be installed to use form parsing." + self.headers = headers + self.stream = stream + self.max_files = max_files + self.max_fields = max_fields + self.items: list[tuple[str, str | UploadFile]] = [] + self._current_files = 0 + self._current_fields = 0 + self._current_partial_header_name: bytes = b"" + self._current_partial_header_value: bytes = b"" + self._current_part = MultipartPart() + self._charset = "" + self._file_parts_to_write: list[tuple[MultipartPart, bytes]] = [] + self._file_parts_to_finish: list[MultipartPart] = [] + self._files_to_close_on_error: list[SpooledTemporaryFile[bytes]] = [] + self.max_part_size = max_part_size + + def on_part_begin(self) -> None: + self._current_part = MultipartPart() + + def on_part_data(self, data: bytes, start: int, end: int) -> None: + message_bytes = data[start:end] + if self._current_part.file is None: + if len(self._current_part.data) + len(message_bytes) > self.max_part_size: + raise MultiPartException(f"Part exceeded maximum size of {int(self.max_part_size / 1024)}KB.") + self._current_part.data.extend(message_bytes) + else: + self._file_parts_to_write.append((self._current_part, message_bytes)) + + def on_part_end(self) -> None: + if self._current_part.file is None: + self.items.append( + ( + self._current_part.field_name, + _user_safe_decode(self._current_part.data, self._charset), + ) + ) + else: + self._file_parts_to_finish.append(self._current_part) + # The file can be added to the items right now even though it's not + # finished yet, because it will be finished in the `parse()` method, before + # self.items is used in the return value. + self.items.append((self._current_part.field_name, self._current_part.file)) + + def on_header_field(self, data: bytes, start: int, end: int) -> None: + self._current_partial_header_name += data[start:end] + + def on_header_value(self, data: bytes, start: int, end: int) -> None: + self._current_partial_header_value += data[start:end] + + def on_header_end(self) -> None: + field = self._current_partial_header_name.lower() + if field == b"content-disposition": + self._current_part.content_disposition = self._current_partial_header_value + self._current_part.item_headers.append((field, self._current_partial_header_value)) + self._current_partial_header_name = b"" + self._current_partial_header_value = b"" + + def on_headers_finished(self) -> None: + disposition, options = parse_options_header(self._current_part.content_disposition) + try: + self._current_part.field_name = _user_safe_decode(options[b"name"], self._charset) + except KeyError: + raise MultiPartException('The Content-Disposition header field "name" must be provided.') + if b"filename" in options: + self._current_files += 1 + if self._current_files > self.max_files: + raise MultiPartException(f"Too many files. Maximum number of files is {self.max_files}.") + filename = _user_safe_decode(options[b"filename"], self._charset) + tempfile = SpooledTemporaryFile(max_size=self.spool_max_size) + self._files_to_close_on_error.append(tempfile) + self._current_part.file = UploadFile( + file=tempfile, # type: ignore[arg-type] + size=0, + filename=filename, + headers=Headers(raw=self._current_part.item_headers), + ) + else: + self._current_fields += 1 + if self._current_fields > self.max_fields: + raise MultiPartException(f"Too many fields. Maximum number of fields is {self.max_fields}.") + self._current_part.file = None + + def on_end(self) -> None: + pass + + async def parse(self) -> FormData: + # Parse the Content-Type header to get the multipart boundary. + _, params = parse_options_header(self.headers["Content-Type"]) + charset = params.get(b"charset", "utf-8") + if isinstance(charset, bytes): + charset = charset.decode("latin-1") + self._charset = charset + try: + boundary = params[b"boundary"] + except KeyError: + raise MultiPartException("Missing boundary in multipart.") + + # Callbacks dictionary. + callbacks: MultipartCallbacks = { + "on_part_begin": self.on_part_begin, + "on_part_data": self.on_part_data, + "on_part_end": self.on_part_end, + "on_header_field": self.on_header_field, + "on_header_value": self.on_header_value, + "on_header_end": self.on_header_end, + "on_headers_finished": self.on_headers_finished, + "on_end": self.on_end, + } + + # Create the parser. + parser = multipart.MultipartParser(boundary, callbacks) + try: + # Feed the parser with data from the request. + async for chunk in self.stream: + parser.write(chunk) + # Write file data, it needs to use await with the UploadFile methods + # that call the corresponding file methods *in a threadpool*, + # otherwise, if they were called directly in the callback methods above + # (regular, non-async functions), that would block the event loop in + # the main thread. + for part, data in self._file_parts_to_write: + assert part.file # for type checkers + await part.file.write(data) + for part in self._file_parts_to_finish: + assert part.file # for type checkers + await part.file.seek(0) + self._file_parts_to_write.clear() + self._file_parts_to_finish.clear() + except MultiPartException as exc: + # Close all the files if there was an error. + for file in self._files_to_close_on_error: + file.close() + raise exc + + parser.finalize() + return FormData(self.items) diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__init__.py b/venv/lib/python3.10/site-packages/starlette/middleware/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b99538a272d985e5288faff04d2e7971b938d613 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/__init__.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import sys +from collections.abc import Iterator +from typing import Any, Protocol + +if sys.version_info >= (3, 10): # pragma: no cover + from typing import ParamSpec +else: # pragma: no cover + from typing_extensions import ParamSpec + +from starlette.types import ASGIApp + +P = ParamSpec("P") + + +class _MiddlewareFactory(Protocol[P]): + def __call__(self, app: ASGIApp, /, *args: P.args, **kwargs: P.kwargs) -> ASGIApp: ... # pragma: no cover + + +class Middleware: + def __init__( + self, + cls: _MiddlewareFactory[P], + *args: P.args, + **kwargs: P.kwargs, + ) -> None: + self.cls = cls + self.args = args + self.kwargs = kwargs + + def __iter__(self) -> Iterator[Any]: + as_tuple = (self.cls, self.args, self.kwargs) + return iter(as_tuple) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + args_strings = [f"{value!r}" for value in self.args] + option_strings = [f"{key}={value!r}" for key, value in self.kwargs.items()] + name = getattr(self.cls, "__name__", "") + args_repr = ", ".join([name] + args_strings + option_strings) + return f"{class_name}({args_repr})" diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b646f0593da182063d712f3c9040a0b49bf33ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/authentication.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/authentication.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49574f134aade0b2f5313620c1201cd73f5fcbd6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/authentication.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1692229cc009f9feb3b51c41259610ad495caf6c Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/cors.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/cors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae8f627e957f4ac47bc3e8c4fde7e30b8e7096a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/cors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/errors.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e30d53eb3b6392a5b49ef00ece4a403ff3ccd49f Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/errors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f4ef5de3536f571be44600434fd48711e71439e Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/gzip.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/gzip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..085c7bca3699a5837844b1fed58fc53f3c6db452 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/gzip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/httpsredirect.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/httpsredirect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2802afd45f116969dc722c5250f69607da43399f Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/httpsredirect.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/sessions.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/sessions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61b692782ce6021d3502e009a839fbb8b3b71314 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/sessions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/trustedhost.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/trustedhost.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a7860a60c94a80e4b7cfbad49f937945e40f349 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/trustedhost.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/wsgi.cpython-310.pyc b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/wsgi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b0b701011215228bfc627e415feff78c190b740 Binary files /dev/null and b/venv/lib/python3.10/site-packages/starlette/middleware/__pycache__/wsgi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/authentication.py b/venv/lib/python3.10/site-packages/starlette/middleware/authentication.py new file mode 100644 index 0000000000000000000000000000000000000000..77fc742daf444e759fe31de5d16ac2978f797580 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/authentication.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +from typing import Callable + +from starlette.authentication import ( + AuthCredentials, + AuthenticationBackend, + AuthenticationError, + UnauthenticatedUser, +) +from starlette.requests import HTTPConnection +from starlette.responses import PlainTextResponse, Response +from starlette.types import ASGIApp, Receive, Scope, Send + + +class AuthenticationMiddleware: + def __init__( + self, + app: ASGIApp, + backend: AuthenticationBackend, + on_error: Callable[[HTTPConnection, AuthenticationError], Response] | None = None, + ) -> None: + self.app = app + self.backend = backend + self.on_error: Callable[[HTTPConnection, AuthenticationError], Response] = ( + on_error if on_error is not None else self.default_on_error + ) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] not in ["http", "websocket"]: + await self.app(scope, receive, send) + return + + conn = HTTPConnection(scope) + try: + auth_result = await self.backend.authenticate(conn) + except AuthenticationError as exc: + response = self.on_error(conn, exc) + if scope["type"] == "websocket": + await send({"type": "websocket.close", "code": 1000}) + else: + await response(scope, receive, send) + return + + if auth_result is None: + auth_result = AuthCredentials(), UnauthenticatedUser() + scope["auth"], scope["user"] = auth_result + await self.app(scope, receive, send) + + @staticmethod + def default_on_error(conn: HTTPConnection, exc: Exception) -> Response: + return PlainTextResponse(str(exc), status_code=400) diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/base.py b/venv/lib/python3.10/site-packages/starlette/middleware/base.py new file mode 100644 index 0000000000000000000000000000000000000000..577918eb91b18183018b5e48078257e4ae695502 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/base.py @@ -0,0 +1,235 @@ +from __future__ import annotations + +from collections.abc import AsyncGenerator, AsyncIterable, Awaitable, Mapping, MutableMapping +from typing import Any, Callable, TypeVar, Union + +import anyio + +from starlette._utils import collapse_excgroups +from starlette.requests import ClientDisconnect, Request +from starlette.responses import Response +from starlette.types import ASGIApp, Message, Receive, Scope, Send + +RequestResponseEndpoint = Callable[[Request], Awaitable[Response]] +DispatchFunction = Callable[[Request, RequestResponseEndpoint], Awaitable[Response]] +BodyStreamGenerator = AsyncGenerator[Union[bytes, MutableMapping[str, Any]], None] +AsyncContentStream = AsyncIterable[Union[str, bytes, memoryview, MutableMapping[str, Any]]] +T = TypeVar("T") + + +class _CachedRequest(Request): + """ + If the user calls Request.body() from their dispatch function + we cache the entire request body in memory and pass that to downstream middlewares, + but if they call Request.stream() then all we do is send an + empty body so that downstream things don't hang forever. + """ + + def __init__(self, scope: Scope, receive: Receive): + super().__init__(scope, receive) + self._wrapped_rcv_disconnected = False + self._wrapped_rcv_consumed = False + self._wrapped_rc_stream = self.stream() + + async def wrapped_receive(self) -> Message: + # wrapped_rcv state 1: disconnected + if self._wrapped_rcv_disconnected: + # we've already sent a disconnect to the downstream app + # we don't need to wait to get another one + # (although most ASGI servers will just keep sending it) + return {"type": "http.disconnect"} + # wrapped_rcv state 1: consumed but not yet disconnected + if self._wrapped_rcv_consumed: + # since the downstream app has consumed us all that is left + # is to send it a disconnect + if self._is_disconnected: + # the middleware has already seen the disconnect + # since we know the client is disconnected no need to wait + # for the message + self._wrapped_rcv_disconnected = True + return {"type": "http.disconnect"} + # we don't know yet if the client is disconnected or not + # so we'll wait until we get that message + msg = await self.receive() + if msg["type"] != "http.disconnect": # pragma: no cover + # at this point a disconnect is all that we should be receiving + # if we get something else, things went wrong somewhere + raise RuntimeError(f"Unexpected message received: {msg['type']}") + self._wrapped_rcv_disconnected = True + return msg + + # wrapped_rcv state 3: not yet consumed + if getattr(self, "_body", None) is not None: + # body() was called, we return it even if the client disconnected + self._wrapped_rcv_consumed = True + return { + "type": "http.request", + "body": self._body, + "more_body": False, + } + elif self._stream_consumed: + # stream() was called to completion + # return an empty body so that downstream apps don't hang + # waiting for a disconnect + self._wrapped_rcv_consumed = True + return { + "type": "http.request", + "body": b"", + "more_body": False, + } + else: + # body() was never called and stream() wasn't consumed + try: + stream = self.stream() + chunk = await stream.__anext__() + self._wrapped_rcv_consumed = self._stream_consumed + return { + "type": "http.request", + "body": chunk, + "more_body": not self._stream_consumed, + } + except ClientDisconnect: + self._wrapped_rcv_disconnected = True + return {"type": "http.disconnect"} + + +class BaseHTTPMiddleware: + def __init__(self, app: ASGIApp, dispatch: DispatchFunction | None = None) -> None: + self.app = app + self.dispatch_func = self.dispatch if dispatch is None else dispatch + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] != "http": + await self.app(scope, receive, send) + return + + request = _CachedRequest(scope, receive) + wrapped_receive = request.wrapped_receive + response_sent = anyio.Event() + app_exc: Exception | None = None + exception_already_raised = False + + async def call_next(request: Request) -> Response: + async def receive_or_disconnect() -> Message: + if response_sent.is_set(): + return {"type": "http.disconnect"} + + async with anyio.create_task_group() as task_group: + + async def wrap(func: Callable[[], Awaitable[T]]) -> T: + result = await func() + task_group.cancel_scope.cancel() + return result + + task_group.start_soon(wrap, response_sent.wait) + message = await wrap(wrapped_receive) + + if response_sent.is_set(): + return {"type": "http.disconnect"} + + return message + + async def send_no_error(message: Message) -> None: + try: + await send_stream.send(message) + except anyio.BrokenResourceError: + # recv_stream has been closed, i.e. response_sent has been set. + return + + async def coro() -> None: + nonlocal app_exc + + with send_stream: + try: + await self.app(scope, receive_or_disconnect, send_no_error) + except Exception as exc: + app_exc = exc + + task_group.start_soon(coro) + + try: + message = await recv_stream.receive() + info = message.get("info", None) + if message["type"] == "http.response.debug" and info is not None: + message = await recv_stream.receive() + except anyio.EndOfStream: + if app_exc is not None: + nonlocal exception_already_raised + exception_already_raised = True + raise app_exc + raise RuntimeError("No response returned.") + + assert message["type"] == "http.response.start" + + async def body_stream() -> BodyStreamGenerator: + async for message in recv_stream: + if message["type"] == "http.response.pathsend": + yield message + break + assert message["type"] == "http.response.body", f"Unexpected message: {message}" + body = message.get("body", b"") + if body: + yield body + if not message.get("more_body", False): + break + + response = _StreamingResponse(status_code=message["status"], content=body_stream(), info=info) + response.raw_headers = message["headers"] + return response + + streams: anyio.create_memory_object_stream[Message] = anyio.create_memory_object_stream() + send_stream, recv_stream = streams + with recv_stream, send_stream, collapse_excgroups(): + async with anyio.create_task_group() as task_group: + response = await self.dispatch_func(request, call_next) + await response(scope, wrapped_receive, send) + response_sent.set() + recv_stream.close() + if app_exc is not None and not exception_already_raised: + raise app_exc + + async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: + raise NotImplementedError() # pragma: no cover + + +class _StreamingResponse(Response): + def __init__( + self, + content: AsyncContentStream, + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + info: Mapping[str, Any] | None = None, + ) -> None: + self.info = info + self.body_iterator = content + self.status_code = status_code + self.media_type = media_type + self.init_headers(headers) + self.background = None + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if self.info is not None: + await send({"type": "http.response.debug", "info": self.info}) + await send( + { + "type": "http.response.start", + "status": self.status_code, + "headers": self.raw_headers, + } + ) + + should_close_body = True + async for chunk in self.body_iterator: + if isinstance(chunk, dict): + # We got an ASGI message which is not response body (eg: pathsend) + should_close_body = False + await send(chunk) + continue + await send({"type": "http.response.body", "body": chunk, "more_body": True}) + + if should_close_body: + await send({"type": "http.response.body", "body": b"", "more_body": False}) + + if self.background: + await self.background() diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/cors.py b/venv/lib/python3.10/site-packages/starlette/middleware/cors.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd8aefc60cedf17b87748cb11daeeb284c6685b --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/cors.py @@ -0,0 +1,172 @@ +from __future__ import annotations + +import functools +import re +from collections.abc import Sequence + +from starlette.datastructures import Headers, MutableHeaders +from starlette.responses import PlainTextResponse, Response +from starlette.types import ASGIApp, Message, Receive, Scope, Send + +ALL_METHODS = ("DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT") +SAFELISTED_HEADERS = {"Accept", "Accept-Language", "Content-Language", "Content-Type"} + + +class CORSMiddleware: + def __init__( + self, + app: ASGIApp, + allow_origins: Sequence[str] = (), + allow_methods: Sequence[str] = ("GET",), + allow_headers: Sequence[str] = (), + allow_credentials: bool = False, + allow_origin_regex: str | None = None, + expose_headers: Sequence[str] = (), + max_age: int = 600, + ) -> None: + if "*" in allow_methods: + allow_methods = ALL_METHODS + + compiled_allow_origin_regex = None + if allow_origin_regex is not None: + compiled_allow_origin_regex = re.compile(allow_origin_regex) + + allow_all_origins = "*" in allow_origins + allow_all_headers = "*" in allow_headers + preflight_explicit_allow_origin = not allow_all_origins or allow_credentials + + simple_headers = {} + if allow_all_origins: + simple_headers["Access-Control-Allow-Origin"] = "*" + if allow_credentials: + simple_headers["Access-Control-Allow-Credentials"] = "true" + if expose_headers: + simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers) + + preflight_headers = {} + if preflight_explicit_allow_origin: + # The origin value will be set in preflight_response() if it is allowed. + preflight_headers["Vary"] = "Origin" + else: + preflight_headers["Access-Control-Allow-Origin"] = "*" + preflight_headers.update( + { + "Access-Control-Allow-Methods": ", ".join(allow_methods), + "Access-Control-Max-Age": str(max_age), + } + ) + allow_headers = sorted(SAFELISTED_HEADERS | set(allow_headers)) + if allow_headers and not allow_all_headers: + preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers) + if allow_credentials: + preflight_headers["Access-Control-Allow-Credentials"] = "true" + + self.app = app + self.allow_origins = allow_origins + self.allow_methods = allow_methods + self.allow_headers = [h.lower() for h in allow_headers] + self.allow_all_origins = allow_all_origins + self.allow_all_headers = allow_all_headers + self.preflight_explicit_allow_origin = preflight_explicit_allow_origin + self.allow_origin_regex = compiled_allow_origin_regex + self.simple_headers = simple_headers + self.preflight_headers = preflight_headers + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] != "http": # pragma: no cover + await self.app(scope, receive, send) + return + + method = scope["method"] + headers = Headers(scope=scope) + origin = headers.get("origin") + + if origin is None: + await self.app(scope, receive, send) + return + + if method == "OPTIONS" and "access-control-request-method" in headers: + response = self.preflight_response(request_headers=headers) + await response(scope, receive, send) + return + + await self.simple_response(scope, receive, send, request_headers=headers) + + def is_allowed_origin(self, origin: str) -> bool: + if self.allow_all_origins: + return True + + if self.allow_origin_regex is not None and self.allow_origin_regex.fullmatch(origin): + return True + + return origin in self.allow_origins + + def preflight_response(self, request_headers: Headers) -> Response: + requested_origin = request_headers["origin"] + requested_method = request_headers["access-control-request-method"] + requested_headers = request_headers.get("access-control-request-headers") + + headers = dict(self.preflight_headers) + failures = [] + + if self.is_allowed_origin(origin=requested_origin): + if self.preflight_explicit_allow_origin: + # The "else" case is already accounted for in self.preflight_headers + # and the value would be "*". + headers["Access-Control-Allow-Origin"] = requested_origin + else: + failures.append("origin") + + if requested_method not in self.allow_methods: + failures.append("method") + + # If we allow all headers, then we have to mirror back any requested + # headers in the response. + if self.allow_all_headers and requested_headers is not None: + headers["Access-Control-Allow-Headers"] = requested_headers + elif requested_headers is not None: + for header in [h.lower() for h in requested_headers.split(",")]: + if header.strip() not in self.allow_headers: + failures.append("headers") + break + + # We don't strictly need to use 400 responses here, since its up to + # the browser to enforce the CORS policy, but its more informative + # if we do. + if failures: + failure_text = "Disallowed CORS " + ", ".join(failures) + return PlainTextResponse(failure_text, status_code=400, headers=headers) + + return PlainTextResponse("OK", status_code=200, headers=headers) + + async def simple_response(self, scope: Scope, receive: Receive, send: Send, request_headers: Headers) -> None: + send = functools.partial(self.send, send=send, request_headers=request_headers) + await self.app(scope, receive, send) + + async def send(self, message: Message, send: Send, request_headers: Headers) -> None: + if message["type"] != "http.response.start": + await send(message) + return + + message.setdefault("headers", []) + headers = MutableHeaders(scope=message) + headers.update(self.simple_headers) + origin = request_headers["Origin"] + has_cookie = "cookie" in request_headers + + # If request includes any cookie headers, then we must respond + # with the specific origin instead of '*'. + if self.allow_all_origins and has_cookie: + self.allow_explicit_origin(headers, origin) + + # If we only allow specific origins, then we have to mirror back + # the Origin header in the response. + elif not self.allow_all_origins and self.is_allowed_origin(origin=origin): + self.allow_explicit_origin(headers, origin) + + await send(message) + + @staticmethod + def allow_explicit_origin(headers: MutableHeaders, origin: str) -> None: + headers["Access-Control-Allow-Origin"] = origin + headers.add_vary_header("Origin") diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/errors.py b/venv/lib/python3.10/site-packages/starlette/middleware/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..bbc10728af8f809c12e39d5016585922f66665cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/errors.py @@ -0,0 +1,259 @@ +from __future__ import annotations + +import html +import inspect +import sys +import traceback + +from starlette._utils import is_async_callable +from starlette.concurrency import run_in_threadpool +from starlette.requests import Request +from starlette.responses import HTMLResponse, PlainTextResponse, Response +from starlette.types import ASGIApp, ExceptionHandler, Message, Receive, Scope, Send + +STYLES = """ +p { + color: #211c1c; +} +.traceback-container { + border: 1px solid #038BB8; +} +.traceback-title { + background-color: #038BB8; + color: lemonchiffon; + padding: 12px; + font-size: 20px; + margin-top: 0px; +} +.frame-line { + padding-left: 10px; + font-family: monospace; +} +.frame-filename { + font-family: monospace; +} +.center-line { + background-color: #038BB8; + color: #f9f6e1; + padding: 5px 0px 5px 5px; +} +.lineno { + margin-right: 5px; +} +.frame-title { + font-weight: unset; + padding: 10px 10px 10px 10px; + background-color: #E4F4FD; + margin-right: 10px; + color: #191f21; + font-size: 17px; + border: 1px solid #c7dce8; +} +.collapse-btn { + float: right; + padding: 0px 5px 1px 5px; + border: solid 1px #96aebb; + cursor: pointer; +} +.collapsed { + display: none; +} +.source-code { + font-family: courier; + font-size: small; + padding-bottom: 10px; +} +""" + +JS = """ + +""" + +TEMPLATE = """ + + + + Starlette Debugger + + +

    500 Server Error

    +

    {error}

    +
    +

    Traceback

    +
    {exc_html}
    +
    + {js} + + +""" + +FRAME_TEMPLATE = """ +
    +

    File {frame_filename}, + line {frame_lineno}, + in {frame_name} + {collapse_button} +

    +
    {code_context}
    +
    +""" # noqa: E501 + +LINE = """ +

    +{lineno}. {line}

    +""" + +CENTER_LINE = """ +

    +{lineno}. {line}

    +""" + + +class ServerErrorMiddleware: + """ + Handles returning 500 responses when a server error occurs. + + If 'debug' is set, then traceback responses will be returned, + otherwise the designated 'handler' will be called. + + This middleware class should generally be used to wrap *everything* + else up, so that unhandled exceptions anywhere in the stack + always result in an appropriate 500 response. + """ + + def __init__( + self, + app: ASGIApp, + handler: ExceptionHandler | None = None, + debug: bool = False, + ) -> None: + self.app = app + self.handler = handler + self.debug = debug + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] != "http": + await self.app(scope, receive, send) + return + + response_started = False + + async def _send(message: Message) -> None: + nonlocal response_started, send + + if message["type"] == "http.response.start": + response_started = True + await send(message) + + try: + await self.app(scope, receive, _send) + except Exception as exc: + request = Request(scope) + if self.debug: + # In debug mode, return traceback responses. + response = self.debug_response(request, exc) + elif self.handler is None: + # Use our default 500 error handler. + response = self.error_response(request, exc) + else: + # Use an installed 500 error handler. + if is_async_callable(self.handler): + response = await self.handler(request, exc) + else: + response = await run_in_threadpool(self.handler, request, exc) + + if not response_started: + await response(scope, receive, send) + + # We always continue to raise the exception. + # This allows servers to log the error, or allows test clients + # to optionally raise the error within the test case. + raise exc + + def format_line(self, index: int, line: str, frame_lineno: int, frame_index: int) -> str: + values = { + # HTML escape - line could contain < or > + "line": html.escape(line).replace(" ", " "), + "lineno": (frame_lineno - frame_index) + index, + } + + if index != frame_index: + return LINE.format(**values) + return CENTER_LINE.format(**values) + + def generate_frame_html(self, frame: inspect.FrameInfo, is_collapsed: bool) -> str: + code_context = "".join( + self.format_line( + index, + line, + frame.lineno, + frame.index, # type: ignore[arg-type] + ) + for index, line in enumerate(frame.code_context or []) + ) + + values = { + # HTML escape - filename could contain < or >, especially if it's a virtual + # file e.g. in the REPL + "frame_filename": html.escape(frame.filename), + "frame_lineno": frame.lineno, + # HTML escape - if you try very hard it's possible to name a function with < + # or > + "frame_name": html.escape(frame.function), + "code_context": code_context, + "collapsed": "collapsed" if is_collapsed else "", + "collapse_button": "+" if is_collapsed else "‒", + } + return FRAME_TEMPLATE.format(**values) + + def generate_html(self, exc: Exception, limit: int = 7) -> str: + traceback_obj = traceback.TracebackException.from_exception(exc, capture_locals=True) + + exc_html = "" + is_collapsed = False + exc_traceback = exc.__traceback__ + if exc_traceback is not None: + frames = inspect.getinnerframes(exc_traceback, limit) + for frame in reversed(frames): + exc_html += self.generate_frame_html(frame, is_collapsed) + is_collapsed = True + + if sys.version_info >= (3, 13): # pragma: no cover + exc_type_str = traceback_obj.exc_type_str + else: # pragma: no cover + exc_type_str = traceback_obj.exc_type.__name__ + + # escape error class and text + error = f"{html.escape(exc_type_str)}: {html.escape(str(traceback_obj))}" + + return TEMPLATE.format(styles=STYLES, js=JS, error=error, exc_html=exc_html) + + def generate_plain_text(self, exc: Exception) -> str: + return "".join(traceback.format_exception(type(exc), exc, exc.__traceback__)) + + def debug_response(self, request: Request, exc: Exception) -> Response: + accept = request.headers.get("accept", "") + + if "text/html" in accept: + content = self.generate_html(exc) + return HTMLResponse(content, status_code=500) + content = self.generate_plain_text(exc) + return PlainTextResponse(content, status_code=500) + + def error_response(self, request: Request, exc: Exception) -> Response: + return PlainTextResponse("Internal Server Error", status_code=500) diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/exceptions.py b/venv/lib/python3.10/site-packages/starlette/middleware/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..6c98558f4db50007dd3ddee1892454a5a73f6abd --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/exceptions.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from collections.abc import Mapping +from typing import Any + +from starlette._exception_handler import ( + ExceptionHandlers, + StatusHandlers, + wrap_app_handling_exceptions, +) +from starlette.exceptions import HTTPException, WebSocketException +from starlette.requests import Request +from starlette.responses import PlainTextResponse, Response +from starlette.types import ASGIApp, ExceptionHandler, Receive, Scope, Send +from starlette.websockets import WebSocket + + +class ExceptionMiddleware: + def __init__( + self, + app: ASGIApp, + handlers: Mapping[Any, ExceptionHandler] | None = None, + debug: bool = False, + ) -> None: + self.app = app + self.debug = debug # TODO: We ought to handle 404 cases if debug is set. + self._status_handlers: StatusHandlers = {} + self._exception_handlers: ExceptionHandlers = { + HTTPException: self.http_exception, + WebSocketException: self.websocket_exception, + } + if handlers is not None: # pragma: no branch + for key, value in handlers.items(): + self.add_exception_handler(key, value) + + def add_exception_handler( + self, + exc_class_or_status_code: int | type[Exception], + handler: ExceptionHandler, + ) -> None: + if isinstance(exc_class_or_status_code, int): + self._status_handlers[exc_class_or_status_code] = handler + else: + assert issubclass(exc_class_or_status_code, Exception) + self._exception_handlers[exc_class_or_status_code] = handler + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] not in ("http", "websocket"): + await self.app(scope, receive, send) + return + + scope["starlette.exception_handlers"] = ( + self._exception_handlers, + self._status_handlers, + ) + + conn: Request | WebSocket + if scope["type"] == "http": + conn = Request(scope, receive, send) + else: + conn = WebSocket(scope, receive, send) + + await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send) + + async def http_exception(self, request: Request, exc: Exception) -> Response: + assert isinstance(exc, HTTPException) + if exc.status_code in {204, 304}: + return Response(status_code=exc.status_code, headers=exc.headers) + return PlainTextResponse(exc.detail, status_code=exc.status_code, headers=exc.headers) + + async def websocket_exception(self, websocket: WebSocket, exc: Exception) -> None: + assert isinstance(exc, WebSocketException) + await websocket.close(code=exc.code, reason=exc.reason) # pragma: no cover diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/gzip.py b/venv/lib/python3.10/site-packages/starlette/middleware/gzip.py new file mode 100644 index 0000000000000000000000000000000000000000..abd898b2daffb4c1cc2b47b5931060ce34f2abfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/gzip.py @@ -0,0 +1,145 @@ +import gzip +import io +from typing import NoReturn + +from starlette.datastructures import Headers, MutableHeaders +from starlette.types import ASGIApp, Message, Receive, Scope, Send + +DEFAULT_EXCLUDED_CONTENT_TYPES = ("text/event-stream",) + + +class GZipMiddleware: + def __init__(self, app: ASGIApp, minimum_size: int = 500, compresslevel: int = 9) -> None: + self.app = app + self.minimum_size = minimum_size + self.compresslevel = compresslevel + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] != "http": # pragma: no cover + await self.app(scope, receive, send) + return + + headers = Headers(scope=scope) + responder: ASGIApp + if "gzip" in headers.get("Accept-Encoding", ""): + responder = GZipResponder(self.app, self.minimum_size, compresslevel=self.compresslevel) + else: + responder = IdentityResponder(self.app, self.minimum_size) + + await responder(scope, receive, send) + + +class IdentityResponder: + content_encoding: str + + def __init__(self, app: ASGIApp, minimum_size: int) -> None: + self.app = app + self.minimum_size = minimum_size + self.send: Send = unattached_send + self.initial_message: Message = {} + self.started = False + self.content_encoding_set = False + self.content_type_is_excluded = False + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + self.send = send + await self.app(scope, receive, self.send_with_compression) + + async def send_with_compression(self, message: Message) -> None: + message_type = message["type"] + if message_type == "http.response.start": + # Don't send the initial message until we've determined how to + # modify the outgoing headers correctly. + self.initial_message = message + headers = Headers(raw=self.initial_message["headers"]) + self.content_encoding_set = "content-encoding" in headers + self.content_type_is_excluded = headers.get("content-type", "").startswith(DEFAULT_EXCLUDED_CONTENT_TYPES) + elif message_type == "http.response.body" and (self.content_encoding_set or self.content_type_is_excluded): + if not self.started: + self.started = True + await self.send(self.initial_message) + await self.send(message) + elif message_type == "http.response.body" and not self.started: + self.started = True + body = message.get("body", b"") + more_body = message.get("more_body", False) + if len(body) < self.minimum_size and not more_body: + # Don't apply compression to small outgoing responses. + await self.send(self.initial_message) + await self.send(message) + elif not more_body: + # Standard response. + body = self.apply_compression(body, more_body=False) + + headers = MutableHeaders(raw=self.initial_message["headers"]) + headers.add_vary_header("Accept-Encoding") + if body != message["body"]: + headers["Content-Encoding"] = self.content_encoding + headers["Content-Length"] = str(len(body)) + message["body"] = body + + await self.send(self.initial_message) + await self.send(message) + else: + # Initial body in streaming response. + body = self.apply_compression(body, more_body=True) + + headers = MutableHeaders(raw=self.initial_message["headers"]) + headers.add_vary_header("Accept-Encoding") + if body != message["body"]: + headers["Content-Encoding"] = self.content_encoding + del headers["Content-Length"] + message["body"] = body + + await self.send(self.initial_message) + await self.send(message) + elif message_type == "http.response.body": + # Remaining body in streaming response. + body = message.get("body", b"") + more_body = message.get("more_body", False) + + message["body"] = self.apply_compression(body, more_body=more_body) + + await self.send(message) + elif message_type == "http.response.pathsend": # pragma: no branch + # Don't apply GZip to pathsend responses + await self.send(self.initial_message) + await self.send(message) + + def apply_compression(self, body: bytes, *, more_body: bool) -> bytes: + """Apply compression on the response body. + + If more_body is False, any compression file should be closed. If it + isn't, it won't be closed automatically until all background tasks + complete. + """ + return body + + +class GZipResponder(IdentityResponder): + content_encoding = "gzip" + + def __init__(self, app: ASGIApp, minimum_size: int, compresslevel: int = 9) -> None: + super().__init__(app, minimum_size) + + self.gzip_buffer = io.BytesIO() + self.gzip_file = gzip.GzipFile(mode="wb", fileobj=self.gzip_buffer, compresslevel=compresslevel) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + with self.gzip_buffer, self.gzip_file: + await super().__call__(scope, receive, send) + + def apply_compression(self, body: bytes, *, more_body: bool) -> bytes: + self.gzip_file.write(body) + if not more_body: + self.gzip_file.close() + + body = self.gzip_buffer.getvalue() + self.gzip_buffer.seek(0) + self.gzip_buffer.truncate() + + return body + + +async def unattached_send(message: Message) -> NoReturn: + raise RuntimeError("send awaitable not set") # pragma: no cover diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/httpsredirect.py b/venv/lib/python3.10/site-packages/starlette/middleware/httpsredirect.py new file mode 100644 index 0000000000000000000000000000000000000000..a8359067ff7afb80e979042077d5fa0fff119ddf --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/httpsredirect.py @@ -0,0 +1,19 @@ +from starlette.datastructures import URL +from starlette.responses import RedirectResponse +from starlette.types import ASGIApp, Receive, Scope, Send + + +class HTTPSRedirectMiddleware: + def __init__(self, app: ASGIApp) -> None: + self.app = app + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] in ("http", "websocket") and scope["scheme"] in ("http", "ws"): + url = URL(scope=scope) + redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme] + netloc = url.hostname if url.port in (80, 443) else url.netloc + url = url.replace(scheme=redirect_scheme, netloc=netloc) + response = RedirectResponse(url, status_code=307) + await response(scope, receive, send) + else: + await self.app(scope, receive, send) diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/sessions.py b/venv/lib/python3.10/site-packages/starlette/middleware/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..1b95db4b0d638fb5f18a61b4f7924c1f6eafb25e --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/sessions.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import json +from base64 import b64decode, b64encode +from typing import Literal + +import itsdangerous +from itsdangerous.exc import BadSignature + +from starlette.datastructures import MutableHeaders, Secret +from starlette.requests import HTTPConnection +from starlette.types import ASGIApp, Message, Receive, Scope, Send + + +class SessionMiddleware: + def __init__( + self, + app: ASGIApp, + secret_key: str | Secret, + session_cookie: str = "session", + max_age: int | None = 14 * 24 * 60 * 60, # 14 days, in seconds + path: str = "/", + same_site: Literal["lax", "strict", "none"] = "lax", + https_only: bool = False, + domain: str | None = None, + ) -> None: + self.app = app + self.signer = itsdangerous.TimestampSigner(str(secret_key)) + self.session_cookie = session_cookie + self.max_age = max_age + self.path = path + self.security_flags = "httponly; samesite=" + same_site + if https_only: # Secure flag can be used with HTTPS only + self.security_flags += "; secure" + if domain is not None: + self.security_flags += f"; domain={domain}" + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] not in ("http", "websocket"): # pragma: no cover + await self.app(scope, receive, send) + return + + connection = HTTPConnection(scope) + initial_session_was_empty = True + + if self.session_cookie in connection.cookies: + data = connection.cookies[self.session_cookie].encode("utf-8") + try: + data = self.signer.unsign(data, max_age=self.max_age) + scope["session"] = json.loads(b64decode(data)) + initial_session_was_empty = False + except BadSignature: + scope["session"] = {} + else: + scope["session"] = {} + + async def send_wrapper(message: Message) -> None: + if message["type"] == "http.response.start": + if scope["session"]: + # We have session data to persist. + data = b64encode(json.dumps(scope["session"]).encode("utf-8")) + data = self.signer.sign(data) + headers = MutableHeaders(scope=message) + header_value = "{session_cookie}={data}; path={path}; {max_age}{security_flags}".format( + session_cookie=self.session_cookie, + data=data.decode("utf-8"), + path=self.path, + max_age=f"Max-Age={self.max_age}; " if self.max_age else "", + security_flags=self.security_flags, + ) + headers.append("Set-Cookie", header_value) + elif not initial_session_was_empty: + # The session has been cleared. + headers = MutableHeaders(scope=message) + header_value = "{session_cookie}={data}; path={path}; {expires}{security_flags}".format( + session_cookie=self.session_cookie, + data="null", + path=self.path, + expires="expires=Thu, 01 Jan 1970 00:00:00 GMT; ", + security_flags=self.security_flags, + ) + headers.append("Set-Cookie", header_value) + await send(message) + + await self.app(scope, receive, send_wrapper) diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/trustedhost.py b/venv/lib/python3.10/site-packages/starlette/middleware/trustedhost.py new file mode 100644 index 0000000000000000000000000000000000000000..98451e29fef8a22379b5c1ef22c0f7f077eca83a --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/trustedhost.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from collections.abc import Sequence + +from starlette.datastructures import URL, Headers +from starlette.responses import PlainTextResponse, RedirectResponse, Response +from starlette.types import ASGIApp, Receive, Scope, Send + +ENFORCE_DOMAIN_WILDCARD = "Domain wildcard patterns must be like '*.example.com'." + + +class TrustedHostMiddleware: + def __init__( + self, + app: ASGIApp, + allowed_hosts: Sequence[str] | None = None, + www_redirect: bool = True, + ) -> None: + if allowed_hosts is None: + allowed_hosts = ["*"] + + for pattern in allowed_hosts: + assert "*" not in pattern[1:], ENFORCE_DOMAIN_WILDCARD + if pattern.startswith("*") and pattern != "*": + assert pattern.startswith("*."), ENFORCE_DOMAIN_WILDCARD + self.app = app + self.allowed_hosts = list(allowed_hosts) + self.allow_any = "*" in allowed_hosts + self.www_redirect = www_redirect + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + if self.allow_any or scope["type"] not in ( + "http", + "websocket", + ): # pragma: no cover + await self.app(scope, receive, send) + return + + headers = Headers(scope=scope) + host = headers.get("host", "").split(":")[0] + is_valid_host = False + found_www_redirect = False + for pattern in self.allowed_hosts: + if host == pattern or (pattern.startswith("*") and host.endswith(pattern[1:])): + is_valid_host = True + break + elif "www." + host == pattern: + found_www_redirect = True + + if is_valid_host: + await self.app(scope, receive, send) + else: + response: Response + if found_www_redirect and self.www_redirect: + url = URL(scope=scope) + redirect_url = url.replace(netloc="www." + url.netloc) + response = RedirectResponse(url=str(redirect_url)) + else: + response = PlainTextResponse("Invalid host header", status_code=400) + await response(scope, receive, send) diff --git a/venv/lib/python3.10/site-packages/starlette/middleware/wsgi.py b/venv/lib/python3.10/site-packages/starlette/middleware/wsgi.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7fd0d1145afda0350fac35b1db222ddd9a55df --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/middleware/wsgi.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +import io +import math +import sys +import warnings +from collections.abc import MutableMapping +from typing import Any, Callable + +import anyio +from anyio.abc import ObjectReceiveStream, ObjectSendStream + +from starlette.types import Receive, Scope, Send + +warnings.warn( + "starlette.middleware.wsgi is deprecated and will be removed in a future release. " + "Please refer to https://github.com/abersheeran/a2wsgi as a replacement.", + DeprecationWarning, +) + + +def build_environ(scope: Scope, body: bytes) -> dict[str, Any]: + """ + Builds a scope and request body into a WSGI environ object. + """ + + script_name = scope.get("root_path", "").encode("utf8").decode("latin1") + path_info = scope["path"].encode("utf8").decode("latin1") + if path_info.startswith(script_name): + path_info = path_info[len(script_name) :] + + environ = { + "REQUEST_METHOD": scope["method"], + "SCRIPT_NAME": script_name, + "PATH_INFO": path_info, + "QUERY_STRING": scope["query_string"].decode("ascii"), + "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}", + "wsgi.version": (1, 0), + "wsgi.url_scheme": scope.get("scheme", "http"), + "wsgi.input": io.BytesIO(body), + "wsgi.errors": sys.stdout, + "wsgi.multithread": True, + "wsgi.multiprocess": True, + "wsgi.run_once": False, + } + + # Get server name and port - required in WSGI, not in ASGI + server = scope.get("server") or ("localhost", 80) + environ["SERVER_NAME"] = server[0] + environ["SERVER_PORT"] = server[1] + + # Get client IP address + if scope.get("client"): + environ["REMOTE_ADDR"] = scope["client"][0] + + # Go through headers and make them into environ entries + for name, value in scope.get("headers", []): + name = name.decode("latin1") + if name == "content-length": + corrected_name = "CONTENT_LENGTH" + elif name == "content-type": + corrected_name = "CONTENT_TYPE" + else: + corrected_name = f"HTTP_{name}".upper().replace("-", "_") + # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in + # case + value = value.decode("latin1") + if corrected_name in environ: + value = environ[corrected_name] + "," + value + environ[corrected_name] = value + return environ + + +class WSGIMiddleware: + def __init__(self, app: Callable[..., Any]) -> None: + self.app = app + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + assert scope["type"] == "http" + responder = WSGIResponder(self.app, scope) + await responder(receive, send) + + +class WSGIResponder: + stream_send: ObjectSendStream[MutableMapping[str, Any]] + stream_receive: ObjectReceiveStream[MutableMapping[str, Any]] + + def __init__(self, app: Callable[..., Any], scope: Scope) -> None: + self.app = app + self.scope = scope + self.status = None + self.response_headers = None + self.stream_send, self.stream_receive = anyio.create_memory_object_stream(math.inf) + self.response_started = False + self.exc_info: Any = None + + async def __call__(self, receive: Receive, send: Send) -> None: + body = b"" + more_body = True + while more_body: + message = await receive() + body += message.get("body", b"") + more_body = message.get("more_body", False) + environ = build_environ(self.scope, body) + + async with anyio.create_task_group() as task_group: + task_group.start_soon(self.sender, send) + async with self.stream_send: + await anyio.to_thread.run_sync(self.wsgi, environ, self.start_response) + if self.exc_info is not None: + raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2]) + + async def sender(self, send: Send) -> None: + async with self.stream_receive: + async for message in self.stream_receive: + await send(message) + + def start_response( + self, + status: str, + response_headers: list[tuple[str, str]], + exc_info: Any = None, + ) -> None: + self.exc_info = exc_info + if not self.response_started: # pragma: no branch + self.response_started = True + status_code_string, _ = status.split(" ", 1) + status_code = int(status_code_string) + headers = [ + (name.strip().encode("ascii").lower(), value.strip().encode("ascii")) + for name, value in response_headers + ] + anyio.from_thread.run( + self.stream_send.send, + { + "type": "http.response.start", + "status": status_code, + "headers": headers, + }, + ) + + def wsgi( + self, + environ: dict[str, Any], + start_response: Callable[..., Any], + ) -> None: + for chunk in self.app(environ, start_response): + anyio.from_thread.run( + self.stream_send.send, + {"type": "http.response.body", "body": chunk, "more_body": True}, + ) + + anyio.from_thread.run(self.stream_send.send, {"type": "http.response.body", "body": b""}) diff --git a/venv/lib/python3.10/site-packages/starlette/py.typed b/venv/lib/python3.10/site-packages/starlette/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/starlette/requests.py b/venv/lib/python3.10/site-packages/starlette/requests.py new file mode 100644 index 0000000000000000000000000000000000000000..628358d15feb8cd83900f846b1656ea7998ff6a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/requests.py @@ -0,0 +1,323 @@ +from __future__ import annotations + +import json +from collections.abc import AsyncGenerator, Iterator, Mapping +from http import cookies as http_cookies +from typing import TYPE_CHECKING, Any, NoReturn, cast + +import anyio + +from starlette._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper +from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State +from starlette.exceptions import HTTPException +from starlette.formparsers import FormParser, MultiPartException, MultiPartParser +from starlette.types import Message, Receive, Scope, Send + +if TYPE_CHECKING: + from python_multipart.multipart import parse_options_header + + from starlette.applications import Starlette + from starlette.routing import Router +else: + try: + try: + from python_multipart.multipart import parse_options_header + except ModuleNotFoundError: # pragma: no cover + from multipart.multipart import parse_options_header + except ModuleNotFoundError: # pragma: no cover + parse_options_header = None + + +SERVER_PUSH_HEADERS_TO_COPY = { + "accept", + "accept-encoding", + "accept-language", + "cache-control", + "user-agent", +} + + +def cookie_parser(cookie_string: str) -> dict[str, str]: + """ + This function parses a ``Cookie`` HTTP header into a dict of key/value pairs. + + It attempts to mimic browser cookie parsing behavior: browsers and web servers + frequently disregard the spec (RFC 6265) when setting and reading cookies, + so we attempt to suit the common scenarios here. + + This function has been adapted from Django 3.1.0. + Note: we are explicitly _NOT_ using `SimpleCookie.load` because it is based + on an outdated spec and will fail on lots of input we want to support + """ + cookie_dict: dict[str, str] = {} + for chunk in cookie_string.split(";"): + if "=" in chunk: + key, val = chunk.split("=", 1) + else: + # Assume an empty name per + # https://bugzilla.mozilla.org/show_bug.cgi?id=169091 + key, val = "", chunk + key, val = key.strip(), val.strip() + if key or val: + # unquote using Python's algorithm. + cookie_dict[key] = http_cookies._unquote(val) + return cookie_dict + + +class ClientDisconnect(Exception): + pass + + +class HTTPConnection(Mapping[str, Any]): + """ + A base class for incoming HTTP connections, that is used to provide + any functionality that is common to both `Request` and `WebSocket`. + """ + + def __init__(self, scope: Scope, receive: Receive | None = None) -> None: + assert scope["type"] in ("http", "websocket") + self.scope = scope + + def __getitem__(self, key: str) -> Any: + return self.scope[key] + + def __iter__(self) -> Iterator[str]: + return iter(self.scope) + + def __len__(self) -> int: + return len(self.scope) + + # Don't use the `abc.Mapping.__eq__` implementation. + # Connection instances should never be considered equal + # unless `self is other`. + __eq__ = object.__eq__ + __hash__ = object.__hash__ + + @property + def app(self) -> Any: + return self.scope["app"] + + @property + def url(self) -> URL: + if not hasattr(self, "_url"): # pragma: no branch + self._url = URL(scope=self.scope) + return self._url + + @property + def base_url(self) -> URL: + if not hasattr(self, "_base_url"): + base_url_scope = dict(self.scope) + # This is used by request.url_for, it might be used inside a Mount which + # would have its own child scope with its own root_path, but the base URL + # for url_for should still be the top level app root path. + app_root_path = base_url_scope.get("app_root_path", base_url_scope.get("root_path", "")) + path = app_root_path + if not path.endswith("/"): + path += "/" + base_url_scope["path"] = path + base_url_scope["query_string"] = b"" + base_url_scope["root_path"] = app_root_path + self._base_url = URL(scope=base_url_scope) + return self._base_url + + @property + def headers(self) -> Headers: + if not hasattr(self, "_headers"): + self._headers = Headers(scope=self.scope) + return self._headers + + @property + def query_params(self) -> QueryParams: + if not hasattr(self, "_query_params"): # pragma: no branch + self._query_params = QueryParams(self.scope["query_string"]) + return self._query_params + + @property + def path_params(self) -> dict[str, Any]: + return self.scope.get("path_params", {}) + + @property + def cookies(self) -> dict[str, str]: + if not hasattr(self, "_cookies"): + cookies: dict[str, str] = {} + cookie_header = self.headers.get("cookie") + + if cookie_header: + cookies = cookie_parser(cookie_header) + self._cookies = cookies + return self._cookies + + @property + def client(self) -> Address | None: + # client is a 2 item tuple of (host, port), None if missing + host_port = self.scope.get("client") + if host_port is not None: + return Address(*host_port) + return None + + @property + def session(self) -> dict[str, Any]: + assert "session" in self.scope, "SessionMiddleware must be installed to access request.session" + return self.scope["session"] # type: ignore[no-any-return] + + @property + def auth(self) -> Any: + assert "auth" in self.scope, "AuthenticationMiddleware must be installed to access request.auth" + return self.scope["auth"] + + @property + def user(self) -> Any: + assert "user" in self.scope, "AuthenticationMiddleware must be installed to access request.user" + return self.scope["user"] + + @property + def state(self) -> State: + if not hasattr(self, "_state"): + # Ensure 'state' has an empty dict if it's not already populated. + self.scope.setdefault("state", {}) + # Create a state instance with a reference to the dict in which it should + # store info + self._state = State(self.scope["state"]) + return self._state + + def url_for(self, name: str, /, **path_params: Any) -> URL: + url_path_provider: Router | Starlette | None = self.scope.get("router") or self.scope.get("app") + if url_path_provider is None: + raise RuntimeError("The `url_for` method can only be used inside a Starlette application or with a router.") + url_path = url_path_provider.url_path_for(name, **path_params) + return url_path.make_absolute_url(base_url=self.base_url) + + +async def empty_receive() -> NoReturn: + raise RuntimeError("Receive channel has not been made available") + + +async def empty_send(message: Message) -> NoReturn: + raise RuntimeError("Send channel has not been made available") + + +class Request(HTTPConnection): + _form: FormData | None + + def __init__(self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send): + super().__init__(scope) + assert scope["type"] == "http" + self._receive = receive + self._send = send + self._stream_consumed = False + self._is_disconnected = False + self._form = None + + @property + def method(self) -> str: + return cast(str, self.scope["method"]) + + @property + def receive(self) -> Receive: + return self._receive + + async def stream(self) -> AsyncGenerator[bytes, None]: + if hasattr(self, "_body"): + yield self._body + yield b"" + return + if self._stream_consumed: + raise RuntimeError("Stream consumed") + while not self._stream_consumed: + message = await self._receive() + if message["type"] == "http.request": + body = message.get("body", b"") + if not message.get("more_body", False): + self._stream_consumed = True + if body: + yield body + elif message["type"] == "http.disconnect": # pragma: no branch + self._is_disconnected = True + raise ClientDisconnect() + yield b"" + + async def body(self) -> bytes: + if not hasattr(self, "_body"): + chunks: list[bytes] = [] + async for chunk in self.stream(): + chunks.append(chunk) + self._body = b"".join(chunks) + return self._body + + async def json(self) -> Any: + if not hasattr(self, "_json"): # pragma: no branch + body = await self.body() + self._json = json.loads(body) + return self._json + + async def _get_form( + self, + *, + max_files: int | float = 1000, + max_fields: int | float = 1000, + max_part_size: int = 1024 * 1024, + ) -> FormData: + if self._form is None: # pragma: no branch + assert parse_options_header is not None, ( + "The `python-multipart` library must be installed to use form parsing." + ) + content_type_header = self.headers.get("Content-Type") + content_type: bytes + content_type, _ = parse_options_header(content_type_header) + if content_type == b"multipart/form-data": + try: + multipart_parser = MultiPartParser( + self.headers, + self.stream(), + max_files=max_files, + max_fields=max_fields, + max_part_size=max_part_size, + ) + self._form = await multipart_parser.parse() + except MultiPartException as exc: + if "app" in self.scope: + raise HTTPException(status_code=400, detail=exc.message) + raise exc + elif content_type == b"application/x-www-form-urlencoded": + form_parser = FormParser(self.headers, self.stream()) + self._form = await form_parser.parse() + else: + self._form = FormData() + return self._form + + def form( + self, + *, + max_files: int | float = 1000, + max_fields: int | float = 1000, + max_part_size: int = 1024 * 1024, + ) -> AwaitableOrContextManager[FormData]: + return AwaitableOrContextManagerWrapper( + self._get_form(max_files=max_files, max_fields=max_fields, max_part_size=max_part_size) + ) + + async def close(self) -> None: + if self._form is not None: # pragma: no branch + await self._form.close() + + async def is_disconnected(self) -> bool: + if not self._is_disconnected: + message: Message = {} + + # If message isn't immediately available, move on + with anyio.CancelScope() as cs: + cs.cancel() + message = await self._receive() + + if message.get("type") == "http.disconnect": + self._is_disconnected = True + + return self._is_disconnected + + async def send_push_promise(self, path: str) -> None: + if "http.response.push" in self.scope.get("extensions", {}): + raw_headers: list[tuple[bytes, bytes]] = [] + for name in SERVER_PUSH_HEADERS_TO_COPY: + for value in self.headers.getlist(name): + raw_headers.append((name.encode("latin-1"), value.encode("latin-1"))) + await self._send({"type": "http.response.push", "path": path, "headers": raw_headers}) diff --git a/venv/lib/python3.10/site-packages/starlette/responses.py b/venv/lib/python3.10/site-packages/starlette/responses.py new file mode 100644 index 0000000000000000000000000000000000000000..031633b158c4c5ad5462cced7424dd92055c241f --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/responses.py @@ -0,0 +1,548 @@ +from __future__ import annotations + +import hashlib +import http.cookies +import json +import os +import re +import stat +import sys +import warnings +from collections.abc import AsyncIterable, Awaitable, Iterable, Mapping, Sequence +from datetime import datetime +from email.utils import format_datetime, formatdate +from functools import partial +from mimetypes import guess_type +from secrets import token_hex +from typing import Any, Callable, Literal, Union +from urllib.parse import quote + +import anyio +import anyio.to_thread + +from starlette._utils import collapse_excgroups +from starlette.background import BackgroundTask +from starlette.concurrency import iterate_in_threadpool +from starlette.datastructures import URL, Headers, MutableHeaders +from starlette.requests import ClientDisconnect +from starlette.types import Receive, Scope, Send + + +class Response: + media_type = None + charset = "utf-8" + + def __init__( + self, + content: Any = None, + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + ) -> None: + self.status_code = status_code + if media_type is not None: + self.media_type = media_type + self.background = background + self.body = self.render(content) + self.init_headers(headers) + + def render(self, content: Any) -> bytes | memoryview: + if content is None: + return b"" + if isinstance(content, (bytes, memoryview)): + return content + return content.encode(self.charset) # type: ignore + + def init_headers(self, headers: Mapping[str, str] | None = None) -> None: + if headers is None: + raw_headers: list[tuple[bytes, bytes]] = [] + populate_content_length = True + populate_content_type = True + else: + raw_headers = [(k.lower().encode("latin-1"), v.encode("latin-1")) for k, v in headers.items()] + keys = [h[0] for h in raw_headers] + populate_content_length = b"content-length" not in keys + populate_content_type = b"content-type" not in keys + + body = getattr(self, "body", None) + if ( + body is not None + and populate_content_length + and not (self.status_code < 200 or self.status_code in (204, 304)) + ): + content_length = str(len(body)) + raw_headers.append((b"content-length", content_length.encode("latin-1"))) + + content_type = self.media_type + if content_type is not None and populate_content_type: + if content_type.startswith("text/") and "charset=" not in content_type.lower(): + content_type += "; charset=" + self.charset + raw_headers.append((b"content-type", content_type.encode("latin-1"))) + + self.raw_headers = raw_headers + + @property + def headers(self) -> MutableHeaders: + if not hasattr(self, "_headers"): + self._headers = MutableHeaders(raw=self.raw_headers) + return self._headers + + def set_cookie( + self, + key: str, + value: str = "", + max_age: int | None = None, + expires: datetime | str | int | None = None, + path: str | None = "/", + domain: str | None = None, + secure: bool = False, + httponly: bool = False, + samesite: Literal["lax", "strict", "none"] | None = "lax", + partitioned: bool = False, + ) -> None: + cookie: http.cookies.BaseCookie[str] = http.cookies.SimpleCookie() + cookie[key] = value + if max_age is not None: + cookie[key]["max-age"] = max_age + if expires is not None: + if isinstance(expires, datetime): + cookie[key]["expires"] = format_datetime(expires, usegmt=True) + else: + cookie[key]["expires"] = expires + if path is not None: + cookie[key]["path"] = path + if domain is not None: + cookie[key]["domain"] = domain + if secure: + cookie[key]["secure"] = True + if httponly: + cookie[key]["httponly"] = True + if samesite is not None: + assert samesite.lower() in [ + "strict", + "lax", + "none", + ], "samesite must be either 'strict', 'lax' or 'none'" + cookie[key]["samesite"] = samesite + if partitioned: + if sys.version_info < (3, 14): + raise ValueError("Partitioned cookies are only supported in Python 3.14 and above.") # pragma: no cover + cookie[key]["partitioned"] = True # pragma: no cover + + cookie_val = cookie.output(header="").strip() + self.raw_headers.append((b"set-cookie", cookie_val.encode("latin-1"))) + + def delete_cookie( + self, + key: str, + path: str = "/", + domain: str | None = None, + secure: bool = False, + httponly: bool = False, + samesite: Literal["lax", "strict", "none"] | None = "lax", + ) -> None: + self.set_cookie( + key, + max_age=0, + expires=0, + path=path, + domain=domain, + secure=secure, + httponly=httponly, + samesite=samesite, + ) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + prefix = "websocket." if scope["type"] == "websocket" else "" + await send( + { + "type": prefix + "http.response.start", + "status": self.status_code, + "headers": self.raw_headers, + } + ) + await send({"type": prefix + "http.response.body", "body": self.body}) + + if self.background is not None: + await self.background() + + +class HTMLResponse(Response): + media_type = "text/html" + + +class PlainTextResponse(Response): + media_type = "text/plain" + + +class JSONResponse(Response): + media_type = "application/json" + + def __init__( + self, + content: Any, + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + ) -> None: + super().__init__(content, status_code, headers, media_type, background) + + def render(self, content: Any) -> bytes: + return json.dumps( + content, + ensure_ascii=False, + allow_nan=False, + indent=None, + separators=(",", ":"), + ).encode("utf-8") + + +class RedirectResponse(Response): + def __init__( + self, + url: str | URL, + status_code: int = 307, + headers: Mapping[str, str] | None = None, + background: BackgroundTask | None = None, + ) -> None: + super().__init__(content=b"", status_code=status_code, headers=headers, background=background) + self.headers["location"] = quote(str(url), safe=":/%#?=@[]!$&'()*+,;") + + +Content = Union[str, bytes, memoryview] +SyncContentStream = Iterable[Content] +AsyncContentStream = AsyncIterable[Content] +ContentStream = Union[AsyncContentStream, SyncContentStream] + + +class StreamingResponse(Response): + body_iterator: AsyncContentStream + + def __init__( + self, + content: ContentStream, + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + ) -> None: + if isinstance(content, AsyncIterable): + self.body_iterator = content + else: + self.body_iterator = iterate_in_threadpool(content) + self.status_code = status_code + self.media_type = self.media_type if media_type is None else media_type + self.background = background + self.init_headers(headers) + + async def listen_for_disconnect(self, receive: Receive) -> None: + while True: + message = await receive() + if message["type"] == "http.disconnect": + break + + async def stream_response(self, send: Send) -> None: + await send( + { + "type": "http.response.start", + "status": self.status_code, + "headers": self.raw_headers, + } + ) + async for chunk in self.body_iterator: + if not isinstance(chunk, (bytes, memoryview)): + chunk = chunk.encode(self.charset) + await send({"type": "http.response.body", "body": chunk, "more_body": True}) + + await send({"type": "http.response.body", "body": b"", "more_body": False}) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + spec_version = tuple(map(int, scope.get("asgi", {}).get("spec_version", "2.0").split("."))) + + if spec_version >= (2, 4): + try: + await self.stream_response(send) + except OSError: + raise ClientDisconnect() + else: + with collapse_excgroups(): + async with anyio.create_task_group() as task_group: + + async def wrap(func: Callable[[], Awaitable[None]]) -> None: + await func() + task_group.cancel_scope.cancel() + + task_group.start_soon(wrap, partial(self.stream_response, send)) + await wrap(partial(self.listen_for_disconnect, receive)) + + if self.background is not None: + await self.background() + + +class MalformedRangeHeader(Exception): + def __init__(self, content: str = "Malformed range header.") -> None: + self.content = content + + +class RangeNotSatisfiable(Exception): + def __init__(self, max_size: int) -> None: + self.max_size = max_size + + +_RANGE_PATTERN = re.compile(r"(\d*)-(\d*)") + + +class FileResponse(Response): + chunk_size = 64 * 1024 + + def __init__( + self, + path: str | os.PathLike[str], + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + filename: str | None = None, + stat_result: os.stat_result | None = None, + method: str | None = None, + content_disposition_type: str = "attachment", + ) -> None: + self.path = path + self.status_code = status_code + self.filename = filename + if method is not None: + warnings.warn( + "The 'method' parameter is not used, and it will be removed.", + DeprecationWarning, + ) + if media_type is None: + media_type = guess_type(filename or path)[0] or "text/plain" + self.media_type = media_type + self.background = background + self.init_headers(headers) + self.headers.setdefault("accept-ranges", "bytes") + if self.filename is not None: + content_disposition_filename = quote(self.filename) + if content_disposition_filename != self.filename: + content_disposition = f"{content_disposition_type}; filename*=utf-8''{content_disposition_filename}" + else: + content_disposition = f'{content_disposition_type}; filename="{self.filename}"' + self.headers.setdefault("content-disposition", content_disposition) + self.stat_result = stat_result + if stat_result is not None: + self.set_stat_headers(stat_result) + + def set_stat_headers(self, stat_result: os.stat_result) -> None: + content_length = str(stat_result.st_size) + last_modified = formatdate(stat_result.st_mtime, usegmt=True) + etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size) + etag = f'"{hashlib.md5(etag_base.encode(), usedforsecurity=False).hexdigest()}"' + + self.headers.setdefault("content-length", content_length) + self.headers.setdefault("last-modified", last_modified) + self.headers.setdefault("etag", etag) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + send_header_only: bool = scope["method"].upper() == "HEAD" + send_pathsend: bool = "http.response.pathsend" in scope.get("extensions", {}) + + if self.stat_result is None: + try: + stat_result = await anyio.to_thread.run_sync(os.stat, self.path) + self.set_stat_headers(stat_result) + except FileNotFoundError: + raise RuntimeError(f"File at path {self.path} does not exist.") + else: + mode = stat_result.st_mode + if not stat.S_ISREG(mode): + raise RuntimeError(f"File at path {self.path} is not a file.") + else: + stat_result = self.stat_result + + headers = Headers(scope=scope) + http_range = headers.get("range") + http_if_range = headers.get("if-range") + + if http_range is None or (http_if_range is not None and not self._should_use_range(http_if_range)): + await self._handle_simple(send, send_header_only, send_pathsend) + else: + try: + ranges = self._parse_range_header(http_range, stat_result.st_size) + except MalformedRangeHeader as exc: + return await PlainTextResponse(exc.content, status_code=400)(scope, receive, send) + except RangeNotSatisfiable as exc: + response = PlainTextResponse(status_code=416, headers={"Content-Range": f"*/{exc.max_size}"}) + return await response(scope, receive, send) + + if len(ranges) == 1: + start, end = ranges[0] + await self._handle_single_range(send, start, end, stat_result.st_size, send_header_only) + else: + await self._handle_multiple_ranges(send, ranges, stat_result.st_size, send_header_only) + + if self.background is not None: + await self.background() + + async def _handle_simple(self, send: Send, send_header_only: bool, send_pathsend: bool) -> None: + await send({"type": "http.response.start", "status": self.status_code, "headers": self.raw_headers}) + if send_header_only: + await send({"type": "http.response.body", "body": b"", "more_body": False}) + elif send_pathsend: + await send({"type": "http.response.pathsend", "path": str(self.path)}) + else: + async with await anyio.open_file(self.path, mode="rb") as file: + more_body = True + while more_body: + chunk = await file.read(self.chunk_size) + more_body = len(chunk) == self.chunk_size + await send({"type": "http.response.body", "body": chunk, "more_body": more_body}) + + async def _handle_single_range( + self, send: Send, start: int, end: int, file_size: int, send_header_only: bool + ) -> None: + self.headers["content-range"] = f"bytes {start}-{end - 1}/{file_size}" + self.headers["content-length"] = str(end - start) + await send({"type": "http.response.start", "status": 206, "headers": self.raw_headers}) + if send_header_only: + await send({"type": "http.response.body", "body": b"", "more_body": False}) + else: + async with await anyio.open_file(self.path, mode="rb") as file: + await file.seek(start) + more_body = True + while more_body: + chunk = await file.read(min(self.chunk_size, end - start)) + start += len(chunk) + more_body = len(chunk) == self.chunk_size and start < end + await send({"type": "http.response.body", "body": chunk, "more_body": more_body}) + + async def _handle_multiple_ranges( + self, + send: Send, + ranges: list[tuple[int, int]], + file_size: int, + send_header_only: bool, + ) -> None: + # In firefox and chrome, they use boundary with 95-96 bits entropy (that's roughly 13 bytes). + boundary = token_hex(13) + content_length, header_generator = self.generate_multipart( + ranges, boundary, file_size, self.headers["content-type"] + ) + self.headers["content-range"] = f"multipart/byteranges; boundary={boundary}" + self.headers["content-length"] = str(content_length) + await send({"type": "http.response.start", "status": 206, "headers": self.raw_headers}) + if send_header_only: + await send({"type": "http.response.body", "body": b"", "more_body": False}) + else: + async with await anyio.open_file(self.path, mode="rb") as file: + for start, end in ranges: + await send({"type": "http.response.body", "body": header_generator(start, end), "more_body": True}) + await file.seek(start) + while start < end: + chunk = await file.read(min(self.chunk_size, end - start)) + start += len(chunk) + await send({"type": "http.response.body", "body": chunk, "more_body": True}) + await send({"type": "http.response.body", "body": b"\n", "more_body": True}) + await send( + { + "type": "http.response.body", + "body": f"\n--{boundary}--\n".encode("latin-1"), + "more_body": False, + } + ) + + def _should_use_range(self, http_if_range: str) -> bool: + return http_if_range == self.headers["last-modified"] or http_if_range == self.headers["etag"] + + @staticmethod + def _parse_range_header(http_range: str, file_size: int) -> list[tuple[int, int]]: + ranges: list[tuple[int, int]] = [] + try: + units, range_ = http_range.split("=", 1) + except ValueError: + raise MalformedRangeHeader() + + units = units.strip().lower() + + if units != "bytes": + raise MalformedRangeHeader("Only support bytes range") + + ranges = [ + ( + int(_[0]) if _[0] else file_size - int(_[1]), + int(_[1]) + 1 if _[0] and _[1] and int(_[1]) < file_size else file_size, + ) + for _ in _RANGE_PATTERN.findall(range_) + if _ != ("", "") + ] + + if len(ranges) == 0: + raise MalformedRangeHeader("Range header: range must be requested") + + if any(not (0 <= start < file_size) for start, _ in ranges): + raise RangeNotSatisfiable(file_size) + + if any(start > end for start, end in ranges): + raise MalformedRangeHeader("Range header: start must be less than end") + + if len(ranges) == 1: + return ranges + + # Merge ranges + result: list[tuple[int, int]] = [] + for start, end in ranges: + for p in range(len(result)): + p_start, p_end = result[p] + if start > p_end: + continue + elif end < p_start: + result.insert(p, (start, end)) # THIS IS NOT REACHED! + break + else: + result[p] = (min(start, p_start), max(end, p_end)) + break + else: + result.append((start, end)) + + return result + + def generate_multipart( + self, + ranges: Sequence[tuple[int, int]], + boundary: str, + max_size: int, + content_type: str, + ) -> tuple[int, Callable[[int, int], bytes]]: + r""" + Multipart response headers generator. + + ``` + --{boundary}\n + Content-Type: {content_type}\n + Content-Range: bytes {start}-{end-1}/{max_size}\n + \n + ..........content...........\n + --{boundary}\n + Content-Type: {content_type}\n + Content-Range: bytes {start}-{end-1}/{max_size}\n + \n + ..........content...........\n + --{boundary}--\n + ``` + """ + boundary_len = len(boundary) + static_header_part_len = 44 + boundary_len + len(content_type) + len(str(max_size)) + content_length = sum( + (len(str(start)) + len(str(end - 1)) + static_header_part_len) # Headers + + (end - start) # Content + for start, end in ranges + ) + ( + 5 + boundary_len # --boundary--\n + ) + return ( + content_length, + lambda start, end: ( + f"--{boundary}\nContent-Type: {content_type}\nContent-Range: bytes {start}-{end - 1}/{max_size}\n\n" + ).encode("latin-1"), + ) diff --git a/venv/lib/python3.10/site-packages/starlette/routing.py b/venv/lib/python3.10/site-packages/starlette/routing.py new file mode 100644 index 0000000000000000000000000000000000000000..96c2df929fcfc6431a716f43de76b4d029737ccb --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/routing.py @@ -0,0 +1,876 @@ +from __future__ import annotations + +import contextlib +import functools +import inspect +import re +import traceback +import types +import warnings +from collections.abc import Awaitable, Collection, Generator, Sequence +from contextlib import AbstractAsyncContextManager, AbstractContextManager, asynccontextmanager +from enum import Enum +from re import Pattern +from typing import Any, Callable, TypeVar + +from starlette._exception_handler import wrap_app_handling_exceptions +from starlette._utils import get_route_path, is_async_callable +from starlette.concurrency import run_in_threadpool +from starlette.convertors import CONVERTOR_TYPES, Convertor +from starlette.datastructures import URL, Headers, URLPath +from starlette.exceptions import HTTPException +from starlette.middleware import Middleware +from starlette.requests import Request +from starlette.responses import PlainTextResponse, RedirectResponse, Response +from starlette.types import ASGIApp, Lifespan, Receive, Scope, Send +from starlette.websockets import WebSocket, WebSocketClose + + +class NoMatchFound(Exception): + """ + Raised by `.url_for(name, **path_params)` and `.url_path_for(name, **path_params)` + if no matching route exists. + """ + + def __init__(self, name: str, path_params: dict[str, Any]) -> None: + params = ", ".join(list(path_params.keys())) + super().__init__(f'No route exists for name "{name}" and params "{params}".') + + +class Match(Enum): + NONE = 0 + PARTIAL = 1 + FULL = 2 + + +def iscoroutinefunction_or_partial(obj: Any) -> bool: # pragma: no cover + """ + Correctly determines if an object is a coroutine function, + including those wrapped in functools.partial objects. + """ + warnings.warn( + "iscoroutinefunction_or_partial is deprecated, and will be removed in a future release.", + DeprecationWarning, + ) + while isinstance(obj, functools.partial): + obj = obj.func + return inspect.iscoroutinefunction(obj) + + +def request_response( + func: Callable[[Request], Awaitable[Response] | Response], +) -> ASGIApp: + """ + Takes a function or coroutine `func(request) -> response`, + and returns an ASGI application. + """ + f: Callable[[Request], Awaitable[Response]] = ( + func if is_async_callable(func) else functools.partial(run_in_threadpool, func) + ) + + async def app(scope: Scope, receive: Receive, send: Send) -> None: + request = Request(scope, receive, send) + + async def app(scope: Scope, receive: Receive, send: Send) -> None: + response = await f(request) + await response(scope, receive, send) + + await wrap_app_handling_exceptions(app, request)(scope, receive, send) + + return app + + +def websocket_session( + func: Callable[[WebSocket], Awaitable[None]], +) -> ASGIApp: + """ + Takes a coroutine `func(session)`, and returns an ASGI application. + """ + # assert asyncio.iscoroutinefunction(func), "WebSocket endpoints must be async" + + async def app(scope: Scope, receive: Receive, send: Send) -> None: + session = WebSocket(scope, receive=receive, send=send) + + async def app(scope: Scope, receive: Receive, send: Send) -> None: + await func(session) + + await wrap_app_handling_exceptions(app, session)(scope, receive, send) + + return app + + +def get_name(endpoint: Callable[..., Any]) -> str: + return getattr(endpoint, "__name__", endpoint.__class__.__name__) + + +def replace_params( + path: str, + param_convertors: dict[str, Convertor[Any]], + path_params: dict[str, str], +) -> tuple[str, dict[str, str]]: + for key, value in list(path_params.items()): + if "{" + key + "}" in path: + convertor = param_convertors[key] + value = convertor.to_string(value) + path = path.replace("{" + key + "}", value) + path_params.pop(key) + return path, path_params + + +# Match parameters in URL paths, eg. '{param}', and '{param:int}' +PARAM_REGEX = re.compile("{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}") + + +def compile_path( + path: str, +) -> tuple[Pattern[str], str, dict[str, Convertor[Any]]]: + """ + Given a path string, like: "/{username:str}", + or a host string, like: "{subdomain}.mydomain.org", return a three-tuple + of (regex, format, {param_name:convertor}). + + regex: "/(?P[^/]+)" + format: "/{username}" + convertors: {"username": StringConvertor()} + """ + is_host = not path.startswith("/") + + path_regex = "^" + path_format = "" + duplicated_params = set() + + idx = 0 + param_convertors = {} + for match in PARAM_REGEX.finditer(path): + param_name, convertor_type = match.groups("str") + convertor_type = convertor_type.lstrip(":") + assert convertor_type in CONVERTOR_TYPES, f"Unknown path convertor '{convertor_type}'" + convertor = CONVERTOR_TYPES[convertor_type] + + path_regex += re.escape(path[idx : match.start()]) + path_regex += f"(?P<{param_name}>{convertor.regex})" + + path_format += path[idx : match.start()] + path_format += "{%s}" % param_name + + if param_name in param_convertors: + duplicated_params.add(param_name) + + param_convertors[param_name] = convertor + + idx = match.end() + + if duplicated_params: + names = ", ".join(sorted(duplicated_params)) + ending = "s" if len(duplicated_params) > 1 else "" + raise ValueError(f"Duplicated param name{ending} {names} at path {path}") + + if is_host: + # Align with `Host.matches()` behavior, which ignores port. + hostname = path[idx:].split(":")[0] + path_regex += re.escape(hostname) + "$" + else: + path_regex += re.escape(path[idx:]) + "$" + + path_format += path[idx:] + + return re.compile(path_regex), path_format, param_convertors + + +class BaseRoute: + def matches(self, scope: Scope) -> tuple[Match, Scope]: + raise NotImplementedError() # pragma: no cover + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + raise NotImplementedError() # pragma: no cover + + async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: + raise NotImplementedError() # pragma: no cover + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + """ + A route may be used in isolation as a stand-alone ASGI app. + This is a somewhat contrived case, as they'll almost always be used + within a Router, but could be useful for some tooling and minimal apps. + """ + match, child_scope = self.matches(scope) + if match == Match.NONE: + if scope["type"] == "http": + response = PlainTextResponse("Not Found", status_code=404) + await response(scope, receive, send) + elif scope["type"] == "websocket": # pragma: no branch + websocket_close = WebSocketClose() + await websocket_close(scope, receive, send) + return + + scope.update(child_scope) + await self.handle(scope, receive, send) + + +class Route(BaseRoute): + def __init__( + self, + path: str, + endpoint: Callable[..., Any], + *, + methods: Collection[str] | None = None, + name: str | None = None, + include_in_schema: bool = True, + middleware: Sequence[Middleware] | None = None, + ) -> None: + assert path.startswith("/"), "Routed paths must start with '/'" + self.path = path + self.endpoint = endpoint + self.name = get_name(endpoint) if name is None else name + self.include_in_schema = include_in_schema + + endpoint_handler = endpoint + while isinstance(endpoint_handler, functools.partial): + endpoint_handler = endpoint_handler.func + if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler): + # Endpoint is function or method. Treat it as `func(request) -> response`. + self.app = request_response(endpoint) + if methods is None: + methods = ["GET"] + else: + # Endpoint is a class. Treat it as ASGI. + self.app = endpoint + + if middleware is not None: + for cls, args, kwargs in reversed(middleware): + self.app = cls(self.app, *args, **kwargs) + + if methods is None: + self.methods = None + else: + self.methods = {method.upper() for method in methods} + if "GET" in self.methods: + self.methods.add("HEAD") + + self.path_regex, self.path_format, self.param_convertors = compile_path(path) + + def matches(self, scope: Scope) -> tuple[Match, Scope]: + path_params: dict[str, Any] + if scope["type"] == "http": + route_path = get_route_path(scope) + match = self.path_regex.match(route_path) + if match: + matched_params = match.groupdict() + for key, value in matched_params.items(): + matched_params[key] = self.param_convertors[key].convert(value) + path_params = dict(scope.get("path_params", {})) + path_params.update(matched_params) + child_scope = {"endpoint": self.endpoint, "path_params": path_params} + if self.methods and scope["method"] not in self.methods: + return Match.PARTIAL, child_scope + else: + return Match.FULL, child_scope + return Match.NONE, {} + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + seen_params = set(path_params.keys()) + expected_params = set(self.param_convertors.keys()) + + if name != self.name or seen_params != expected_params: + raise NoMatchFound(name, path_params) + + path, remaining_params = replace_params(self.path_format, self.param_convertors, path_params) + assert not remaining_params + return URLPath(path=path, protocol="http") + + async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: + if self.methods and scope["method"] not in self.methods: + headers = {"Allow": ", ".join(self.methods)} + if "app" in scope: + raise HTTPException(status_code=405, headers=headers) + else: + response = PlainTextResponse("Method Not Allowed", status_code=405, headers=headers) + await response(scope, receive, send) + else: + await self.app(scope, receive, send) + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, Route) + and self.path == other.path + and self.endpoint == other.endpoint + and self.methods == other.methods + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + methods = sorted(self.methods or []) + path, name = self.path, self.name + return f"{class_name}(path={path!r}, name={name!r}, methods={methods!r})" + + +class WebSocketRoute(BaseRoute): + def __init__( + self, + path: str, + endpoint: Callable[..., Any], + *, + name: str | None = None, + middleware: Sequence[Middleware] | None = None, + ) -> None: + assert path.startswith("/"), "Routed paths must start with '/'" + self.path = path + self.endpoint = endpoint + self.name = get_name(endpoint) if name is None else name + + endpoint_handler = endpoint + while isinstance(endpoint_handler, functools.partial): + endpoint_handler = endpoint_handler.func + if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler): + # Endpoint is function or method. Treat it as `func(websocket)`. + self.app = websocket_session(endpoint) + else: + # Endpoint is a class. Treat it as ASGI. + self.app = endpoint + + if middleware is not None: + for cls, args, kwargs in reversed(middleware): + self.app = cls(self.app, *args, **kwargs) + + self.path_regex, self.path_format, self.param_convertors = compile_path(path) + + def matches(self, scope: Scope) -> tuple[Match, Scope]: + path_params: dict[str, Any] + if scope["type"] == "websocket": + route_path = get_route_path(scope) + match = self.path_regex.match(route_path) + if match: + matched_params = match.groupdict() + for key, value in matched_params.items(): + matched_params[key] = self.param_convertors[key].convert(value) + path_params = dict(scope.get("path_params", {})) + path_params.update(matched_params) + child_scope = {"endpoint": self.endpoint, "path_params": path_params} + return Match.FULL, child_scope + return Match.NONE, {} + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + seen_params = set(path_params.keys()) + expected_params = set(self.param_convertors.keys()) + + if name != self.name or seen_params != expected_params: + raise NoMatchFound(name, path_params) + + path, remaining_params = replace_params(self.path_format, self.param_convertors, path_params) + assert not remaining_params + return URLPath(path=path, protocol="websocket") + + async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: + await self.app(scope, receive, send) + + def __eq__(self, other: Any) -> bool: + return isinstance(other, WebSocketRoute) and self.path == other.path and self.endpoint == other.endpoint + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(path={self.path!r}, name={self.name!r})" + + +class Mount(BaseRoute): + def __init__( + self, + path: str, + app: ASGIApp | None = None, + routes: Sequence[BaseRoute] | None = None, + name: str | None = None, + *, + middleware: Sequence[Middleware] | None = None, + ) -> None: + assert path == "" or path.startswith("/"), "Routed paths must start with '/'" + assert app is not None or routes is not None, "Either 'app=...', or 'routes=' must be specified" + self.path = path.rstrip("/") + if app is not None: + self._base_app: ASGIApp = app + else: + self._base_app = Router(routes=routes) + self.app = self._base_app + if middleware is not None: + for cls, args, kwargs in reversed(middleware): + self.app = cls(self.app, *args, **kwargs) + self.name = name + self.path_regex, self.path_format, self.param_convertors = compile_path(self.path + "/{path:path}") + + @property + def routes(self) -> list[BaseRoute]: + return getattr(self._base_app, "routes", []) + + def matches(self, scope: Scope) -> tuple[Match, Scope]: + path_params: dict[str, Any] + if scope["type"] in ("http", "websocket"): # pragma: no branch + root_path = scope.get("root_path", "") + route_path = get_route_path(scope) + match = self.path_regex.match(route_path) + if match: + matched_params = match.groupdict() + for key, value in matched_params.items(): + matched_params[key] = self.param_convertors[key].convert(value) + remaining_path = "/" + matched_params.pop("path") + matched_path = route_path[: -len(remaining_path)] + path_params = dict(scope.get("path_params", {})) + path_params.update(matched_params) + child_scope = { + "path_params": path_params, + # app_root_path will only be set at the top level scope, + # initialized with the (optional) value of a root_path + # set above/before Starlette. And even though any + # mount will have its own child scope with its own respective + # root_path, the app_root_path will always be available in all + # the child scopes with the same top level value because it's + # set only once here with a default, any other child scope will + # just inherit that app_root_path default value stored in the + # scope. All this is needed to support Request.url_for(), as it + # uses the app_root_path to build the URL path. + "app_root_path": scope.get("app_root_path", root_path), + "root_path": root_path + matched_path, + "endpoint": self.app, + } + return Match.FULL, child_scope + return Match.NONE, {} + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + if self.name is not None and name == self.name and "path" in path_params: + # 'name' matches "". + path_params["path"] = path_params["path"].lstrip("/") + path, remaining_params = replace_params(self.path_format, self.param_convertors, path_params) + if not remaining_params: + return URLPath(path=path) + elif self.name is None or name.startswith(self.name + ":"): + if self.name is None: + # No mount name. + remaining_name = name + else: + # 'name' matches ":". + remaining_name = name[len(self.name) + 1 :] + path_kwarg = path_params.get("path") + path_params["path"] = "" + path_prefix, remaining_params = replace_params(self.path_format, self.param_convertors, path_params) + if path_kwarg is not None: + remaining_params["path"] = path_kwarg + for route in self.routes or []: + try: + url = route.url_path_for(remaining_name, **remaining_params) + return URLPath(path=path_prefix.rstrip("/") + str(url), protocol=url.protocol) + except NoMatchFound: + pass + raise NoMatchFound(name, path_params) + + async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: + await self.app(scope, receive, send) + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Mount) and self.path == other.path and self.app == other.app + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + name = self.name or "" + return f"{class_name}(path={self.path!r}, name={name!r}, app={self.app!r})" + + +class Host(BaseRoute): + def __init__(self, host: str, app: ASGIApp, name: str | None = None) -> None: + assert not host.startswith("/"), "Host must not start with '/'" + self.host = host + self.app = app + self.name = name + self.host_regex, self.host_format, self.param_convertors = compile_path(host) + + @property + def routes(self) -> list[BaseRoute]: + return getattr(self.app, "routes", []) + + def matches(self, scope: Scope) -> tuple[Match, Scope]: + if scope["type"] in ("http", "websocket"): # pragma:no branch + headers = Headers(scope=scope) + host = headers.get("host", "").split(":")[0] + match = self.host_regex.match(host) + if match: + matched_params = match.groupdict() + for key, value in matched_params.items(): + matched_params[key] = self.param_convertors[key].convert(value) + path_params = dict(scope.get("path_params", {})) + path_params.update(matched_params) + child_scope = {"path_params": path_params, "endpoint": self.app} + return Match.FULL, child_scope + return Match.NONE, {} + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + if self.name is not None and name == self.name and "path" in path_params: + # 'name' matches "". + path = path_params.pop("path") + host, remaining_params = replace_params(self.host_format, self.param_convertors, path_params) + if not remaining_params: + return URLPath(path=path, host=host) + elif self.name is None or name.startswith(self.name + ":"): + if self.name is None: + # No mount name. + remaining_name = name + else: + # 'name' matches ":". + remaining_name = name[len(self.name) + 1 :] + host, remaining_params = replace_params(self.host_format, self.param_convertors, path_params) + for route in self.routes or []: + try: + url = route.url_path_for(remaining_name, **remaining_params) + return URLPath(path=str(url), protocol=url.protocol, host=host) + except NoMatchFound: + pass + raise NoMatchFound(name, path_params) + + async def handle(self, scope: Scope, receive: Receive, send: Send) -> None: + await self.app(scope, receive, send) + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Host) and self.host == other.host and self.app == other.app + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + name = self.name or "" + return f"{class_name}(host={self.host!r}, name={name!r}, app={self.app!r})" + + +_T = TypeVar("_T") + + +class _AsyncLiftContextManager(AbstractAsyncContextManager[_T]): + def __init__(self, cm: AbstractContextManager[_T]): + self._cm = cm + + async def __aenter__(self) -> _T: + return self._cm.__enter__() + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: types.TracebackType | None, + ) -> bool | None: + return self._cm.__exit__(exc_type, exc_value, traceback) + + +def _wrap_gen_lifespan_context( + lifespan_context: Callable[[Any], Generator[Any, Any, Any]], +) -> Callable[[Any], AbstractAsyncContextManager[Any]]: + cmgr = contextlib.contextmanager(lifespan_context) + + @functools.wraps(cmgr) + def wrapper(app: Any) -> _AsyncLiftContextManager[Any]: + return _AsyncLiftContextManager(cmgr(app)) + + return wrapper + + +class _DefaultLifespan: + def __init__(self, router: Router): + self._router = router + + async def __aenter__(self) -> None: + await self._router.startup() + + async def __aexit__(self, *exc_info: object) -> None: + await self._router.shutdown() + + def __call__(self: _T, app: object) -> _T: + return self + + +class Router: + def __init__( + self, + routes: Sequence[BaseRoute] | None = None, + redirect_slashes: bool = True, + default: ASGIApp | None = None, + on_startup: Sequence[Callable[[], Any]] | None = None, + on_shutdown: Sequence[Callable[[], Any]] | None = None, + # the generic to Lifespan[AppType] is the type of the top level application + # which the router cannot know statically, so we use Any + lifespan: Lifespan[Any] | None = None, + *, + middleware: Sequence[Middleware] | None = None, + ) -> None: + self.routes = [] if routes is None else list(routes) + self.redirect_slashes = redirect_slashes + self.default = self.not_found if default is None else default + self.on_startup = [] if on_startup is None else list(on_startup) + self.on_shutdown = [] if on_shutdown is None else list(on_shutdown) + + if on_startup or on_shutdown: + warnings.warn( + "The on_startup and on_shutdown parameters are deprecated, and they " + "will be removed on version 1.0. Use the lifespan parameter instead. " + "See more about it on https://www.starlette.io/lifespan/.", + DeprecationWarning, + ) + if lifespan: + warnings.warn( + "The `lifespan` parameter cannot be used with `on_startup` or " + "`on_shutdown`. Both `on_startup` and `on_shutdown` will be " + "ignored." + ) + + if lifespan is None: + self.lifespan_context: Lifespan[Any] = _DefaultLifespan(self) + + elif inspect.isasyncgenfunction(lifespan): + warnings.warn( + "async generator function lifespans are deprecated, " + "use an @contextlib.asynccontextmanager function instead", + DeprecationWarning, + ) + self.lifespan_context = asynccontextmanager( + lifespan, + ) + elif inspect.isgeneratorfunction(lifespan): + warnings.warn( + "generator function lifespans are deprecated, use an @contextlib.asynccontextmanager function instead", + DeprecationWarning, + ) + self.lifespan_context = _wrap_gen_lifespan_context( + lifespan, + ) + else: + self.lifespan_context = lifespan + + self.middleware_stack = self.app + if middleware: + for cls, args, kwargs in reversed(middleware): + self.middleware_stack = cls(self.middleware_stack, *args, **kwargs) + + async def not_found(self, scope: Scope, receive: Receive, send: Send) -> None: + if scope["type"] == "websocket": + websocket_close = WebSocketClose() + await websocket_close(scope, receive, send) + return + + # If we're running inside a starlette application then raise an + # exception, so that the configurable exception handler can deal with + # returning the response. For plain ASGI apps, just return the response. + if "app" in scope: + raise HTTPException(status_code=404) + else: + response = PlainTextResponse("Not Found", status_code=404) + await response(scope, receive, send) + + def url_path_for(self, name: str, /, **path_params: Any) -> URLPath: + for route in self.routes: + try: + return route.url_path_for(name, **path_params) + except NoMatchFound: + pass + raise NoMatchFound(name, path_params) + + async def startup(self) -> None: + """ + Run any `.on_startup` event handlers. + """ + for handler in self.on_startup: + if is_async_callable(handler): + await handler() + else: + handler() + + async def shutdown(self) -> None: + """ + Run any `.on_shutdown` event handlers. + """ + for handler in self.on_shutdown: + if is_async_callable(handler): + await handler() + else: + handler() + + async def lifespan(self, scope: Scope, receive: Receive, send: Send) -> None: + """ + Handle ASGI lifespan messages, which allows us to manage application + startup and shutdown events. + """ + started = False + app: Any = scope.get("app") + await receive() + try: + async with self.lifespan_context(app) as maybe_state: + if maybe_state is not None: + if "state" not in scope: + raise RuntimeError('The server does not support "state" in the lifespan scope.') + scope["state"].update(maybe_state) + await send({"type": "lifespan.startup.complete"}) + started = True + await receive() + except BaseException: + exc_text = traceback.format_exc() + if started: + await send({"type": "lifespan.shutdown.failed", "message": exc_text}) + else: + await send({"type": "lifespan.startup.failed", "message": exc_text}) + raise + else: + await send({"type": "lifespan.shutdown.complete"}) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + """ + The main entry point to the Router class. + """ + await self.middleware_stack(scope, receive, send) + + async def app(self, scope: Scope, receive: Receive, send: Send) -> None: + assert scope["type"] in ("http", "websocket", "lifespan") + + if "router" not in scope: + scope["router"] = self + + if scope["type"] == "lifespan": + await self.lifespan(scope, receive, send) + return + + partial = None + + for route in self.routes: + # Determine if any route matches the incoming scope, + # and hand over to the matching route if found. + match, child_scope = route.matches(scope) + if match == Match.FULL: + scope.update(child_scope) + await route.handle(scope, receive, send) + return + elif match == Match.PARTIAL and partial is None: + partial = route + partial_scope = child_scope + + if partial is not None: + #  Handle partial matches. These are cases where an endpoint is + # able to handle the request, but is not a preferred option. + # We use this in particular to deal with "405 Method Not Allowed". + scope.update(partial_scope) + await partial.handle(scope, receive, send) + return + + route_path = get_route_path(scope) + if scope["type"] == "http" and self.redirect_slashes and route_path != "/": + redirect_scope = dict(scope) + if route_path.endswith("/"): + redirect_scope["path"] = redirect_scope["path"].rstrip("/") + else: + redirect_scope["path"] = redirect_scope["path"] + "/" + + for route in self.routes: + match, child_scope = route.matches(redirect_scope) + if match != Match.NONE: + redirect_url = URL(scope=redirect_scope) + response = RedirectResponse(url=str(redirect_url)) + await response(scope, receive, send) + return + + await self.default(scope, receive, send) + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Router) and self.routes == other.routes + + def mount(self, path: str, app: ASGIApp, name: str | None = None) -> None: # pragma: no cover + route = Mount(path, app=app, name=name) + self.routes.append(route) + + def host(self, host: str, app: ASGIApp, name: str | None = None) -> None: # pragma: no cover + route = Host(host, app=app, name=name) + self.routes.append(route) + + def add_route( + self, + path: str, + endpoint: Callable[[Request], Awaitable[Response] | Response], + methods: Collection[str] | None = None, + name: str | None = None, + include_in_schema: bool = True, + ) -> None: # pragma: no cover + route = Route( + path, + endpoint=endpoint, + methods=methods, + name=name, + include_in_schema=include_in_schema, + ) + self.routes.append(route) + + def add_websocket_route( + self, + path: str, + endpoint: Callable[[WebSocket], Awaitable[None]], + name: str | None = None, + ) -> None: # pragma: no cover + route = WebSocketRoute(path, endpoint=endpoint, name=name) + self.routes.append(route) + + def route( + self, + path: str, + methods: Collection[str] | None = None, + name: str | None = None, + include_in_schema: bool = True, + ) -> Callable: # type: ignore[type-arg] + """ + We no longer document this decorator style API, and its usage is discouraged. + Instead you should use the following approach: + + >>> routes = [Route(path, endpoint=...), ...] + >>> app = Starlette(routes=routes) + """ + warnings.warn( + "The `route` decorator is deprecated, and will be removed in version 1.0.0." + "Refer to https://www.starlette.io/routing/#http-routing for the recommended approach.", + DeprecationWarning, + ) + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.add_route( + path, + func, + methods=methods, + name=name, + include_in_schema=include_in_schema, + ) + return func + + return decorator + + def websocket_route(self, path: str, name: str | None = None) -> Callable: # type: ignore[type-arg] + """ + We no longer document this decorator style API, and its usage is discouraged. + Instead you should use the following approach: + + >>> routes = [WebSocketRoute(path, endpoint=...), ...] + >>> app = Starlette(routes=routes) + """ + warnings.warn( + "The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. Refer to " + "https://www.starlette.io/routing/#websocket-routing for the recommended approach.", + DeprecationWarning, + ) + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.add_websocket_route(path, func, name=name) + return func + + return decorator + + def add_event_handler(self, event_type: str, func: Callable[[], Any]) -> None: # pragma: no cover + assert event_type in ("startup", "shutdown") + + if event_type == "startup": + self.on_startup.append(func) + else: + self.on_shutdown.append(func) + + def on_event(self, event_type: str) -> Callable: # type: ignore[type-arg] + warnings.warn( + "The `on_event` decorator is deprecated, and will be removed in version 1.0.0. " + "Refer to https://www.starlette.io/lifespan/ for recommended approach.", + DeprecationWarning, + ) + + def decorator(func: Callable) -> Callable: # type: ignore[type-arg] + self.add_event_handler(event_type, func) + return func + + return decorator diff --git a/venv/lib/python3.10/site-packages/starlette/schemas.py b/venv/lib/python3.10/site-packages/starlette/schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..e97f83ecf86c78b5cb6e1945c905417b107df910 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/schemas.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import inspect +import re +from typing import Any, Callable, NamedTuple + +from starlette.requests import Request +from starlette.responses import Response +from starlette.routing import BaseRoute, Host, Mount, Route + +try: + import yaml +except ModuleNotFoundError: # pragma: no cover + yaml = None # type: ignore[assignment] + + +class OpenAPIResponse(Response): + media_type = "application/vnd.oai.openapi" + + def render(self, content: Any) -> bytes: + assert yaml is not None, "`pyyaml` must be installed to use OpenAPIResponse." + assert isinstance(content, dict), "The schema passed to OpenAPIResponse should be a dictionary." + return yaml.dump(content, default_flow_style=False).encode("utf-8") + + +class EndpointInfo(NamedTuple): + path: str + http_method: str + func: Callable[..., Any] + + +_remove_converter_pattern = re.compile(r":\w+}") + + +class BaseSchemaGenerator: + def get_schema(self, routes: list[BaseRoute]) -> dict[str, Any]: + raise NotImplementedError() # pragma: no cover + + def get_endpoints(self, routes: list[BaseRoute]) -> list[EndpointInfo]: + """ + Given the routes, yields the following information: + + - path + eg: /users/ + - http_method + one of 'get', 'post', 'put', 'patch', 'delete', 'options' + - func + method ready to extract the docstring + """ + endpoints_info: list[EndpointInfo] = [] + + for route in routes: + if isinstance(route, (Mount, Host)): + routes = route.routes or [] + if isinstance(route, Mount): + path = self._remove_converter(route.path) + else: + path = "" + sub_endpoints = [ + EndpointInfo( + path="".join((path, sub_endpoint.path)), + http_method=sub_endpoint.http_method, + func=sub_endpoint.func, + ) + for sub_endpoint in self.get_endpoints(routes) + ] + endpoints_info.extend(sub_endpoints) + + elif not isinstance(route, Route) or not route.include_in_schema: + continue + + elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint): + path = self._remove_converter(route.path) + for method in route.methods or ["GET"]: + if method == "HEAD": + continue + endpoints_info.append(EndpointInfo(path, method.lower(), route.endpoint)) + else: + path = self._remove_converter(route.path) + for method in ["get", "post", "put", "patch", "delete", "options"]: + if not hasattr(route.endpoint, method): + continue + func = getattr(route.endpoint, method) + endpoints_info.append(EndpointInfo(path, method.lower(), func)) + + return endpoints_info + + def _remove_converter(self, path: str) -> str: + """ + Remove the converter from the path. + For example, a route like this: + Route("/users/{id:int}", endpoint=get_user, methods=["GET"]) + Should be represented as `/users/{id}` in the OpenAPI schema. + """ + return _remove_converter_pattern.sub("}", path) + + def parse_docstring(self, func_or_method: Callable[..., Any]) -> dict[str, Any]: + """ + Given a function, parse the docstring as YAML and return a dictionary of info. + """ + docstring = func_or_method.__doc__ + if not docstring: + return {} + + assert yaml is not None, "`pyyaml` must be installed to use parse_docstring." + + # We support having regular docstrings before the schema + # definition. Here we return just the schema part from + # the docstring. + docstring = docstring.split("---")[-1] + + parsed = yaml.safe_load(docstring) + + if not isinstance(parsed, dict): + # A regular docstring (not yaml formatted) can return + # a simple string here, which wouldn't follow the schema. + return {} + + return parsed + + def OpenAPIResponse(self, request: Request) -> Response: + routes = request.app.routes + schema = self.get_schema(routes=routes) + return OpenAPIResponse(schema) + + +class SchemaGenerator(BaseSchemaGenerator): + def __init__(self, base_schema: dict[str, Any]) -> None: + self.base_schema = base_schema + + def get_schema(self, routes: list[BaseRoute]) -> dict[str, Any]: + schema = dict(self.base_schema) + schema.setdefault("paths", {}) + endpoints_info = self.get_endpoints(routes) + + for endpoint in endpoints_info: + parsed = self.parse_docstring(endpoint.func) + + if not parsed: + continue + + if endpoint.path not in schema["paths"]: + schema["paths"][endpoint.path] = {} + + schema["paths"][endpoint.path][endpoint.http_method] = parsed + + return schema diff --git a/venv/lib/python3.10/site-packages/starlette/staticfiles.py b/venv/lib/python3.10/site-packages/starlette/staticfiles.py new file mode 100644 index 0000000000000000000000000000000000000000..7fba9aa95b51787f848e9a7be2bff1c7a85ab5c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/staticfiles.py @@ -0,0 +1,220 @@ +from __future__ import annotations + +import errno +import importlib.util +import os +import stat +from email.utils import parsedate +from typing import Union + +import anyio +import anyio.to_thread + +from starlette._utils import get_route_path +from starlette.datastructures import URL, Headers +from starlette.exceptions import HTTPException +from starlette.responses import FileResponse, RedirectResponse, Response +from starlette.types import Receive, Scope, Send + +PathLike = Union[str, "os.PathLike[str]"] + + +class NotModifiedResponse(Response): + NOT_MODIFIED_HEADERS = ( + "cache-control", + "content-location", + "date", + "etag", + "expires", + "vary", + ) + + def __init__(self, headers: Headers): + super().__init__( + status_code=304, + headers={name: value for name, value in headers.items() if name in self.NOT_MODIFIED_HEADERS}, + ) + + +class StaticFiles: + def __init__( + self, + *, + directory: PathLike | None = None, + packages: list[str | tuple[str, str]] | None = None, + html: bool = False, + check_dir: bool = True, + follow_symlink: bool = False, + ) -> None: + self.directory = directory + self.packages = packages + self.all_directories = self.get_directories(directory, packages) + self.html = html + self.config_checked = False + self.follow_symlink = follow_symlink + if check_dir and directory is not None and not os.path.isdir(directory): + raise RuntimeError(f"Directory '{directory}' does not exist") + + def get_directories( + self, + directory: PathLike | None = None, + packages: list[str | tuple[str, str]] | None = None, + ) -> list[PathLike]: + """ + Given `directory` and `packages` arguments, return a list of all the + directories that should be used for serving static files from. + """ + directories = [] + if directory is not None: + directories.append(directory) + + for package in packages or []: + if isinstance(package, tuple): + package, statics_dir = package + else: + statics_dir = "statics" + spec = importlib.util.find_spec(package) + assert spec is not None, f"Package {package!r} could not be found." + assert spec.origin is not None, f"Package {package!r} could not be found." + package_directory = os.path.normpath(os.path.join(spec.origin, "..", statics_dir)) + assert os.path.isdir(package_directory), ( + f"Directory '{statics_dir!r}' in package {package!r} could not be found." + ) + directories.append(package_directory) + + return directories + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + """ + The ASGI entry point. + """ + assert scope["type"] == "http" + + if not self.config_checked: + await self.check_config() + self.config_checked = True + + path = self.get_path(scope) + response = await self.get_response(path, scope) + await response(scope, receive, send) + + def get_path(self, scope: Scope) -> str: + """ + Given the ASGI scope, return the `path` string to serve up, + with OS specific path separators, and any '..', '.' components removed. + """ + route_path = get_route_path(scope) + return os.path.normpath(os.path.join(*route_path.split("/"))) + + async def get_response(self, path: str, scope: Scope) -> Response: + """ + Returns an HTTP response, given the incoming path, method and request headers. + """ + if scope["method"] not in ("GET", "HEAD"): + raise HTTPException(status_code=405) + + try: + full_path, stat_result = await anyio.to_thread.run_sync(self.lookup_path, path) + except PermissionError: + raise HTTPException(status_code=401) + except OSError as exc: + # Filename is too long, so it can't be a valid static file. + if exc.errno == errno.ENAMETOOLONG: + raise HTTPException(status_code=404) + + raise exc + + if stat_result and stat.S_ISREG(stat_result.st_mode): + # We have a static file to serve. + return self.file_response(full_path, stat_result, scope) + + elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html: + # We're in HTML mode, and have got a directory URL. + # Check if we have 'index.html' file to serve. + index_path = os.path.join(path, "index.html") + full_path, stat_result = await anyio.to_thread.run_sync(self.lookup_path, index_path) + if stat_result is not None and stat.S_ISREG(stat_result.st_mode): + if not scope["path"].endswith("/"): + # Directory URLs should redirect to always end in "/". + url = URL(scope=scope) + url = url.replace(path=url.path + "/") + return RedirectResponse(url=url) + return self.file_response(full_path, stat_result, scope) + + if self.html: + # Check for '404.html' if we're in HTML mode. + full_path, stat_result = await anyio.to_thread.run_sync(self.lookup_path, "404.html") + if stat_result and stat.S_ISREG(stat_result.st_mode): + return FileResponse(full_path, stat_result=stat_result, status_code=404) + raise HTTPException(status_code=404) + + def lookup_path(self, path: str) -> tuple[str, os.stat_result | None]: + for directory in self.all_directories: + joined_path = os.path.join(directory, path) + if self.follow_symlink: + full_path = os.path.abspath(joined_path) + directory = os.path.abspath(directory) + else: + full_path = os.path.realpath(joined_path) + directory = os.path.realpath(directory) + if os.path.commonpath([full_path, directory]) != str(directory): + # Don't allow misbehaving clients to break out of the static files directory. + continue + try: + return full_path, os.stat(full_path) + except (FileNotFoundError, NotADirectoryError): + continue + return "", None + + def file_response( + self, + full_path: PathLike, + stat_result: os.stat_result, + scope: Scope, + status_code: int = 200, + ) -> Response: + request_headers = Headers(scope=scope) + + response = FileResponse(full_path, status_code=status_code, stat_result=stat_result) + if self.is_not_modified(response.headers, request_headers): + return NotModifiedResponse(response.headers) + return response + + async def check_config(self) -> None: + """ + Perform a one-off configuration check that StaticFiles is actually + pointed at a directory, so that we can raise loud errors rather than + just returning 404 responses. + """ + if self.directory is None: + return + + try: + stat_result = await anyio.to_thread.run_sync(os.stat, self.directory) + except FileNotFoundError: + raise RuntimeError(f"StaticFiles directory '{self.directory}' does not exist.") + if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)): + raise RuntimeError(f"StaticFiles path '{self.directory}' is not a directory.") + + def is_not_modified(self, response_headers: Headers, request_headers: Headers) -> bool: + """ + Given the request and response headers, return `True` if an HTTP + "Not Modified" response could be returned instead. + """ + try: + if_none_match = request_headers["if-none-match"] + etag = response_headers["etag"] + if etag in [tag.strip(" W/") for tag in if_none_match.split(",")]: + return True + except KeyError: + pass + + try: + if_modified_since = parsedate(request_headers["if-modified-since"]) + last_modified = parsedate(response_headers["last-modified"]) + if if_modified_since is not None and last_modified is not None and if_modified_since >= last_modified: + return True + except KeyError: + pass + + return False diff --git a/venv/lib/python3.10/site-packages/starlette/status.py b/venv/lib/python3.10/site-packages/starlette/status.py new file mode 100644 index 0000000000000000000000000000000000000000..54c1fb7d0df128bfe16fb4cb03cdf23b8225af94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/status.py @@ -0,0 +1,95 @@ +""" +HTTP codes +See HTTP Status Code Registry: +https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml + +And RFC 2324 - https://tools.ietf.org/html/rfc2324 +""" + +from __future__ import annotations + +HTTP_100_CONTINUE = 100 +HTTP_101_SWITCHING_PROTOCOLS = 101 +HTTP_102_PROCESSING = 102 +HTTP_103_EARLY_HINTS = 103 +HTTP_200_OK = 200 +HTTP_201_CREATED = 201 +HTTP_202_ACCEPTED = 202 +HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203 +HTTP_204_NO_CONTENT = 204 +HTTP_205_RESET_CONTENT = 205 +HTTP_206_PARTIAL_CONTENT = 206 +HTTP_207_MULTI_STATUS = 207 +HTTP_208_ALREADY_REPORTED = 208 +HTTP_226_IM_USED = 226 +HTTP_300_MULTIPLE_CHOICES = 300 +HTTP_301_MOVED_PERMANENTLY = 301 +HTTP_302_FOUND = 302 +HTTP_303_SEE_OTHER = 303 +HTTP_304_NOT_MODIFIED = 304 +HTTP_305_USE_PROXY = 305 +HTTP_306_RESERVED = 306 +HTTP_307_TEMPORARY_REDIRECT = 307 +HTTP_308_PERMANENT_REDIRECT = 308 +HTTP_400_BAD_REQUEST = 400 +HTTP_401_UNAUTHORIZED = 401 +HTTP_402_PAYMENT_REQUIRED = 402 +HTTP_403_FORBIDDEN = 403 +HTTP_404_NOT_FOUND = 404 +HTTP_405_METHOD_NOT_ALLOWED = 405 +HTTP_406_NOT_ACCEPTABLE = 406 +HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407 +HTTP_408_REQUEST_TIMEOUT = 408 +HTTP_409_CONFLICT = 409 +HTTP_410_GONE = 410 +HTTP_411_LENGTH_REQUIRED = 411 +HTTP_412_PRECONDITION_FAILED = 412 +HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413 +HTTP_414_REQUEST_URI_TOO_LONG = 414 +HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415 +HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416 +HTTP_417_EXPECTATION_FAILED = 417 +HTTP_418_IM_A_TEAPOT = 418 +HTTP_421_MISDIRECTED_REQUEST = 421 +HTTP_422_UNPROCESSABLE_ENTITY = 422 +HTTP_423_LOCKED = 423 +HTTP_424_FAILED_DEPENDENCY = 424 +HTTP_425_TOO_EARLY = 425 +HTTP_426_UPGRADE_REQUIRED = 426 +HTTP_428_PRECONDITION_REQUIRED = 428 +HTTP_429_TOO_MANY_REQUESTS = 429 +HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 +HTTP_451_UNAVAILABLE_FOR_LEGAL_REASONS = 451 +HTTP_500_INTERNAL_SERVER_ERROR = 500 +HTTP_501_NOT_IMPLEMENTED = 501 +HTTP_502_BAD_GATEWAY = 502 +HTTP_503_SERVICE_UNAVAILABLE = 503 +HTTP_504_GATEWAY_TIMEOUT = 504 +HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505 +HTTP_506_VARIANT_ALSO_NEGOTIATES = 506 +HTTP_507_INSUFFICIENT_STORAGE = 507 +HTTP_508_LOOP_DETECTED = 508 +HTTP_510_NOT_EXTENDED = 510 +HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511 + + +""" +WebSocket codes +https://www.iana.org/assignments/websocket/websocket.xml#close-code-number +https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent +""" +WS_1000_NORMAL_CLOSURE = 1000 +WS_1001_GOING_AWAY = 1001 +WS_1002_PROTOCOL_ERROR = 1002 +WS_1003_UNSUPPORTED_DATA = 1003 +WS_1005_NO_STATUS_RCVD = 1005 +WS_1006_ABNORMAL_CLOSURE = 1006 +WS_1007_INVALID_FRAME_PAYLOAD_DATA = 1007 +WS_1008_POLICY_VIOLATION = 1008 +WS_1009_MESSAGE_TOO_BIG = 1009 +WS_1010_MANDATORY_EXT = 1010 +WS_1011_INTERNAL_ERROR = 1011 +WS_1012_SERVICE_RESTART = 1012 +WS_1013_TRY_AGAIN_LATER = 1013 +WS_1014_BAD_GATEWAY = 1014 +WS_1015_TLS_HANDSHAKE = 1015 diff --git a/venv/lib/python3.10/site-packages/starlette/templating.py b/venv/lib/python3.10/site-packages/starlette/templating.py new file mode 100644 index 0000000000000000000000000000000000000000..10fa02710acf3adcea62d8b10812c077c03a9280 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/templating.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +import warnings +from collections.abc import Mapping, Sequence +from os import PathLike +from typing import Any, Callable, cast, overload + +from starlette.background import BackgroundTask +from starlette.datastructures import URL +from starlette.requests import Request +from starlette.responses import HTMLResponse +from starlette.types import Receive, Scope, Send + +try: + import jinja2 + + # @contextfunction was renamed to @pass_context in Jinja 3.0, and was removed in 3.1 + # hence we try to get pass_context (most installs will be >=3.1) + # and fall back to contextfunction, + # adding a type ignore for mypy to let us access an attribute that may not exist + if hasattr(jinja2, "pass_context"): + pass_context = jinja2.pass_context + else: # pragma: no cover + pass_context = jinja2.contextfunction # type: ignore[attr-defined] +except ModuleNotFoundError: # pragma: no cover + jinja2 = None # type: ignore[assignment] + + +class _TemplateResponse(HTMLResponse): + def __init__( + self, + template: Any, + context: dict[str, Any], + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + ): + self.template = template + self.context = context + content = template.render(context) + super().__init__(content, status_code, headers, media_type, background) + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + request = self.context.get("request", {}) + extensions = request.get("extensions", {}) + if "http.response.debug" in extensions: # pragma: no branch + await send( + { + "type": "http.response.debug", + "info": { + "template": self.template, + "context": self.context, + }, + } + ) + await super().__call__(scope, receive, send) + + +class Jinja2Templates: + """ + templates = Jinja2Templates("templates") + + return templates.TemplateResponse("index.html", {"request": request}) + """ + + @overload + def __init__( + self, + directory: str | PathLike[str] | Sequence[str | PathLike[str]], + *, + context_processors: list[Callable[[Request], dict[str, Any]]] | None = None, + **env_options: Any, + ) -> None: ... + + @overload + def __init__( + self, + *, + env: jinja2.Environment, + context_processors: list[Callable[[Request], dict[str, Any]]] | None = None, + ) -> None: ... + + def __init__( + self, + directory: str | PathLike[str] | Sequence[str | PathLike[str]] | None = None, + *, + context_processors: list[Callable[[Request], dict[str, Any]]] | None = None, + env: jinja2.Environment | None = None, + **env_options: Any, + ) -> None: + if env_options: + warnings.warn( + "Extra environment options are deprecated. Use a preconfigured jinja2.Environment instead.", + DeprecationWarning, + ) + assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" + assert bool(directory) ^ bool(env), "either 'directory' or 'env' arguments must be passed" + self.context_processors = context_processors or [] + if directory is not None: + self.env = self._create_env(directory, **env_options) + elif env is not None: # pragma: no branch + self.env = env + + self._setup_env_defaults(self.env) + + def _create_env( + self, + directory: str | PathLike[str] | Sequence[str | PathLike[str]], + **env_options: Any, + ) -> jinja2.Environment: + loader = jinja2.FileSystemLoader(directory) + env_options.setdefault("loader", loader) + env_options.setdefault("autoescape", True) + + return jinja2.Environment(**env_options) + + def _setup_env_defaults(self, env: jinja2.Environment) -> None: + @pass_context + def url_for( + context: dict[str, Any], + name: str, + /, + **path_params: Any, + ) -> URL: + request: Request = context["request"] + return request.url_for(name, **path_params) + + env.globals.setdefault("url_for", url_for) + + def get_template(self, name: str) -> jinja2.Template: + return self.env.get_template(name) + + @overload + def TemplateResponse( + self, + request: Request, + name: str, + context: dict[str, Any] | None = None, + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + ) -> _TemplateResponse: ... + + @overload + def TemplateResponse( + self, + name: str, + context: dict[str, Any] | None = None, + status_code: int = 200, + headers: Mapping[str, str] | None = None, + media_type: str | None = None, + background: BackgroundTask | None = None, + ) -> _TemplateResponse: + # Deprecated usage + ... + + def TemplateResponse(self, *args: Any, **kwargs: Any) -> _TemplateResponse: + if args: + if isinstance(args[0], str): # the first argument is template name (old style) + warnings.warn( + "The `name` is not the first parameter anymore. " + "The first parameter should be the `Request` instance.\n" + 'Replace `TemplateResponse(name, {"request": request})` by `TemplateResponse(request, name)`.', + DeprecationWarning, + ) + + name = args[0] + context = args[1] if len(args) > 1 else kwargs.get("context", {}) + status_code = args[2] if len(args) > 2 else kwargs.get("status_code", 200) + headers = args[3] if len(args) > 3 else kwargs.get("headers") + media_type = args[4] if len(args) > 4 else kwargs.get("media_type") + background = args[5] if len(args) > 5 else kwargs.get("background") + + if "request" not in context: + raise ValueError('context must include a "request" key') + request = context["request"] + else: # the first argument is a request instance (new style) + request = args[0] + name = args[1] if len(args) > 1 else kwargs["name"] + context = args[2] if len(args) > 2 else kwargs.get("context", {}) + status_code = args[3] if len(args) > 3 else kwargs.get("status_code", 200) + headers = args[4] if len(args) > 4 else kwargs.get("headers") + media_type = args[5] if len(args) > 5 else kwargs.get("media_type") + background = args[6] if len(args) > 6 else kwargs.get("background") + else: # all arguments are kwargs + if "request" not in kwargs: + warnings.warn( + "The `TemplateResponse` now requires the `request` argument.\n" + 'Replace `TemplateResponse(name, {"context": context})` by `TemplateResponse(request, name)`.', + DeprecationWarning, + ) + if "request" not in kwargs.get("context", {}): + raise ValueError('context must include a "request" key') + + context = kwargs.get("context", {}) + request = kwargs.get("request", context.get("request")) + name = cast(str, kwargs["name"]) + status_code = kwargs.get("status_code", 200) + headers = kwargs.get("headers") + media_type = kwargs.get("media_type") + background = kwargs.get("background") + + context.setdefault("request", request) + for context_processor in self.context_processors: + context.update(context_processor(request)) + + template = self.get_template(name) + return _TemplateResponse( + template, + context, + status_code=status_code, + headers=headers, + media_type=media_type, + background=background, + ) diff --git a/venv/lib/python3.10/site-packages/starlette/testclient.py b/venv/lib/python3.10/site-packages/starlette/testclient.py new file mode 100644 index 0000000000000000000000000000000000000000..575b1a71f22bea0976f2fa22b62e041e934ec8ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/testclient.py @@ -0,0 +1,745 @@ +from __future__ import annotations + +import contextlib +import inspect +import io +import json +import math +import sys +import warnings +from collections.abc import Awaitable, Generator, Iterable, Mapping, MutableMapping, Sequence +from concurrent.futures import Future +from contextlib import AbstractContextManager +from types import GeneratorType +from typing import ( + Any, + Callable, + Literal, + TypedDict, + Union, + cast, +) +from urllib.parse import unquote, urljoin + +import anyio +import anyio.abc +import anyio.from_thread +from anyio.streams.stapled import StapledObjectStream + +from starlette._utils import is_async_callable +from starlette.types import ASGIApp, Message, Receive, Scope, Send +from starlette.websockets import WebSocketDisconnect + +if sys.version_info >= (3, 10): # pragma: no cover + from typing import TypeGuard +else: # pragma: no cover + from typing_extensions import TypeGuard + +if sys.version_info >= (3, 11): # pragma: no cover + from typing import Self +else: # pragma: no cover + from typing_extensions import Self + +try: + import httpx +except ModuleNotFoundError: # pragma: no cover + raise RuntimeError( + "The starlette.testclient module requires the httpx package to be installed.\n" + "You can install this with:\n" + " $ pip install httpx\n" + ) +_PortalFactoryType = Callable[[], AbstractContextManager[anyio.abc.BlockingPortal]] + +ASGIInstance = Callable[[Receive, Send], Awaitable[None]] +ASGI2App = Callable[[Scope], ASGIInstance] +ASGI3App = Callable[[Scope, Receive, Send], Awaitable[None]] + + +_RequestData = Mapping[str, Union[str, Iterable[str], bytes]] + + +def _is_asgi3(app: ASGI2App | ASGI3App) -> TypeGuard[ASGI3App]: + if inspect.isclass(app): + return hasattr(app, "__await__") + return is_async_callable(app) + + +class _WrapASGI2: + """ + Provide an ASGI3 interface onto an ASGI2 app. + """ + + def __init__(self, app: ASGI2App) -> None: + self.app = app + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + instance = self.app(scope) + await instance(receive, send) + + +class _AsyncBackend(TypedDict): + backend: str + backend_options: dict[str, Any] + + +class _Upgrade(Exception): + def __init__(self, session: WebSocketTestSession) -> None: + self.session = session + + +class WebSocketDenialResponse( # type: ignore[misc] + httpx.Response, + WebSocketDisconnect, +): + """ + A special case of `WebSocketDisconnect`, raised in the `TestClient` if the + `WebSocket` is closed before being accepted with a `send_denial_response()`. + """ + + +class WebSocketTestSession: + def __init__( + self, + app: ASGI3App, + scope: Scope, + portal_factory: _PortalFactoryType, + ) -> None: + self.app = app + self.scope = scope + self.accepted_subprotocol = None + self.portal_factory = portal_factory + self.extra_headers = None + + def __enter__(self) -> WebSocketTestSession: + with contextlib.ExitStack() as stack: + self.portal = portal = stack.enter_context(self.portal_factory()) + fut, cs = portal.start_task(self._run) + stack.callback(fut.result) + stack.callback(portal.call, cs.cancel) + self.send({"type": "websocket.connect"}) + message = self.receive() + self._raise_on_close(message) + self.accepted_subprotocol = message.get("subprotocol", None) + self.extra_headers = message.get("headers", None) + stack.callback(self.close, 1000) + self.exit_stack = stack.pop_all() + return self + + def __exit__(self, *args: Any) -> bool | None: + return self.exit_stack.__exit__(*args) + + async def _run(self, *, task_status: anyio.abc.TaskStatus[anyio.CancelScope]) -> None: + """ + The sub-thread in which the websocket session runs. + """ + send: anyio.create_memory_object_stream[Message] = anyio.create_memory_object_stream(math.inf) + send_tx, send_rx = send + receive: anyio.create_memory_object_stream[Message] = anyio.create_memory_object_stream(math.inf) + receive_tx, receive_rx = receive + with send_tx, send_rx, receive_tx, receive_rx, anyio.CancelScope() as cs: + self._receive_tx = receive_tx + self._send_rx = send_rx + task_status.started(cs) + await self.app(self.scope, receive_rx.receive, send_tx.send) + + # wait for cs.cancel to be called before closing streams + await anyio.sleep_forever() + + def _raise_on_close(self, message: Message) -> None: + if message["type"] == "websocket.close": + raise WebSocketDisconnect(code=message.get("code", 1000), reason=message.get("reason", "")) + elif message["type"] == "websocket.http.response.start": + status_code: int = message["status"] + headers: list[tuple[bytes, bytes]] = message["headers"] + body: list[bytes] = [] + while True: + message = self.receive() + assert message["type"] == "websocket.http.response.body" + body.append(message["body"]) + if not message.get("more_body", False): + break + raise WebSocketDenialResponse(status_code=status_code, headers=headers, content=b"".join(body)) + + def send(self, message: Message) -> None: + self.portal.call(self._receive_tx.send, message) + + def send_text(self, data: str) -> None: + self.send({"type": "websocket.receive", "text": data}) + + def send_bytes(self, data: bytes) -> None: + self.send({"type": "websocket.receive", "bytes": data}) + + def send_json(self, data: Any, mode: Literal["text", "binary"] = "text") -> None: + text = json.dumps(data, separators=(",", ":"), ensure_ascii=False) + if mode == "text": + self.send({"type": "websocket.receive", "text": text}) + else: + self.send({"type": "websocket.receive", "bytes": text.encode("utf-8")}) + + def close(self, code: int = 1000, reason: str | None = None) -> None: + self.send({"type": "websocket.disconnect", "code": code, "reason": reason}) + + def receive(self) -> Message: + return self.portal.call(self._send_rx.receive) + + def receive_text(self) -> str: + message = self.receive() + self._raise_on_close(message) + return cast(str, message["text"]) + + def receive_bytes(self) -> bytes: + message = self.receive() + self._raise_on_close(message) + return cast(bytes, message["bytes"]) + + def receive_json(self, mode: Literal["text", "binary"] = "text") -> Any: + message = self.receive() + self._raise_on_close(message) + if mode == "text": + text = message["text"] + else: + text = message["bytes"].decode("utf-8") + return json.loads(text) + + +class _TestClientTransport(httpx.BaseTransport): + def __init__( + self, + app: ASGI3App, + portal_factory: _PortalFactoryType, + raise_server_exceptions: bool = True, + root_path: str = "", + *, + client: tuple[str, int], + app_state: dict[str, Any], + ) -> None: + self.app = app + self.raise_server_exceptions = raise_server_exceptions + self.root_path = root_path + self.portal_factory = portal_factory + self.app_state = app_state + self.client = client + + def handle_request(self, request: httpx.Request) -> httpx.Response: + scheme = request.url.scheme + netloc = request.url.netloc.decode(encoding="ascii") + path = request.url.path + raw_path = request.url.raw_path + query = request.url.query.decode(encoding="ascii") + + default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme] + + if ":" in netloc: + host, port_string = netloc.split(":", 1) + port = int(port_string) + else: + host = netloc + port = default_port + + # Include the 'host' header. + if "host" in request.headers: + headers: list[tuple[bytes, bytes]] = [] + elif port == default_port: # pragma: no cover + headers = [(b"host", host.encode())] + else: # pragma: no cover + headers = [(b"host", (f"{host}:{port}").encode())] + + # Include other request headers. + headers += [(key.lower().encode(), value.encode()) for key, value in request.headers.multi_items()] + + scope: dict[str, Any] + + if scheme in {"ws", "wss"}: + subprotocol = request.headers.get("sec-websocket-protocol", None) + if subprotocol is None: + subprotocols: Sequence[str] = [] + else: + subprotocols = [value.strip() for value in subprotocol.split(",")] + scope = { + "type": "websocket", + "path": unquote(path), + "raw_path": raw_path.split(b"?", 1)[0], + "root_path": self.root_path, + "scheme": scheme, + "query_string": query.encode(), + "headers": headers, + "client": self.client, + "server": [host, port], + "subprotocols": subprotocols, + "state": self.app_state.copy(), + "extensions": {"websocket.http.response": {}}, + } + session = WebSocketTestSession(self.app, scope, self.portal_factory) + raise _Upgrade(session) + + scope = { + "type": "http", + "http_version": "1.1", + "method": request.method, + "path": unquote(path), + "raw_path": raw_path.split(b"?", 1)[0], + "root_path": self.root_path, + "scheme": scheme, + "query_string": query.encode(), + "headers": headers, + "client": self.client, + "server": [host, port], + "extensions": {"http.response.debug": {}}, + "state": self.app_state.copy(), + } + + request_complete = False + response_started = False + response_complete: anyio.Event + raw_kwargs: dict[str, Any] = {"stream": io.BytesIO()} + template = None + context = None + + async def receive() -> Message: + nonlocal request_complete + + if request_complete: + if not response_complete.is_set(): + await response_complete.wait() + return {"type": "http.disconnect"} + + body = request.read() + if isinstance(body, str): + body_bytes: bytes = body.encode("utf-8") # pragma: no cover + elif body is None: + body_bytes = b"" # pragma: no cover + elif isinstance(body, GeneratorType): + try: # pragma: no cover + chunk = body.send(None) + if isinstance(chunk, str): + chunk = chunk.encode("utf-8") + return {"type": "http.request", "body": chunk, "more_body": True} + except StopIteration: # pragma: no cover + request_complete = True + return {"type": "http.request", "body": b""} + else: + body_bytes = body + + request_complete = True + return {"type": "http.request", "body": body_bytes} + + async def send(message: Message) -> None: + nonlocal raw_kwargs, response_started, template, context + + if message["type"] == "http.response.start": + assert not response_started, 'Received multiple "http.response.start" messages.' + raw_kwargs["status_code"] = message["status"] + raw_kwargs["headers"] = [(key.decode(), value.decode()) for key, value in message.get("headers", [])] + response_started = True + elif message["type"] == "http.response.body": + assert response_started, 'Received "http.response.body" without "http.response.start".' + assert not response_complete.is_set(), 'Received "http.response.body" after response completed.' + body = message.get("body", b"") + more_body = message.get("more_body", False) + if request.method != "HEAD": + raw_kwargs["stream"].write(body) + if not more_body: + raw_kwargs["stream"].seek(0) + response_complete.set() + elif message["type"] == "http.response.debug": + template = message["info"]["template"] + context = message["info"]["context"] + + try: + with self.portal_factory() as portal: + response_complete = portal.call(anyio.Event) + portal.call(self.app, scope, receive, send) + except BaseException as exc: + if self.raise_server_exceptions: + raise exc + + if self.raise_server_exceptions: + assert response_started, "TestClient did not receive any response." + elif not response_started: + raw_kwargs = { + "status_code": 500, + "headers": [], + "stream": io.BytesIO(), + } + + raw_kwargs["stream"] = httpx.ByteStream(raw_kwargs["stream"].read()) + + response = httpx.Response(**raw_kwargs, request=request) + if template is not None: + response.template = template # type: ignore[attr-defined] + response.context = context # type: ignore[attr-defined] + return response + + +class TestClient(httpx.Client): + __test__ = False + task: Future[None] + portal: anyio.abc.BlockingPortal | None = None + + def __init__( + self, + app: ASGIApp, + base_url: str = "http://testserver", + raise_server_exceptions: bool = True, + root_path: str = "", + backend: Literal["asyncio", "trio"] = "asyncio", + backend_options: dict[str, Any] | None = None, + cookies: httpx._types.CookieTypes | None = None, + headers: dict[str, str] | None = None, + follow_redirects: bool = True, + client: tuple[str, int] = ("testclient", 50000), + ) -> None: + self.async_backend = _AsyncBackend(backend=backend, backend_options=backend_options or {}) + if _is_asgi3(app): + asgi_app = app + else: + app = cast(ASGI2App, app) # type: ignore[assignment] + asgi_app = _WrapASGI2(app) # type: ignore[arg-type] + self.app = asgi_app + self.app_state: dict[str, Any] = {} + transport = _TestClientTransport( + self.app, + portal_factory=self._portal_factory, + raise_server_exceptions=raise_server_exceptions, + root_path=root_path, + app_state=self.app_state, + client=client, + ) + if headers is None: + headers = {} + headers.setdefault("user-agent", "testclient") + super().__init__( + base_url=base_url, + headers=headers, + transport=transport, + follow_redirects=follow_redirects, + cookies=cookies, + ) + + @contextlib.contextmanager + def _portal_factory(self) -> Generator[anyio.abc.BlockingPortal, None, None]: + if self.portal is not None: + yield self.portal + else: + with anyio.from_thread.start_blocking_portal(**self.async_backend) as portal: + yield portal + + def request( # type: ignore[override] + self, + method: str, + url: httpx._types.URLTypes, + *, + content: httpx._types.RequestContent | None = None, + data: _RequestData | None = None, + files: httpx._types.RequestFiles | None = None, + json: Any = None, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + if timeout is not httpx.USE_CLIENT_DEFAULT: + warnings.warn( + "You should not use the 'timeout' argument with the TestClient. " + "See https://github.com/encode/starlette/issues/1108 for more information.", + DeprecationWarning, + ) + url = self._merge_url(url) + return super().request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def get( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().get( + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def options( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().options( + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def head( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().head( + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def post( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + content: httpx._types.RequestContent | None = None, + data: _RequestData | None = None, + files: httpx._types.RequestFiles | None = None, + json: Any = None, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().post( + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def put( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + content: httpx._types.RequestContent | None = None, + data: _RequestData | None = None, + files: httpx._types.RequestFiles | None = None, + json: Any = None, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().put( + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def patch( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + content: httpx._types.RequestContent | None = None, + data: _RequestData | None = None, + files: httpx._types.RequestFiles | None = None, + json: Any = None, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().patch( + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def delete( # type: ignore[override] + self, + url: httpx._types.URLTypes, + *, + params: httpx._types.QueryParamTypes | None = None, + headers: httpx._types.HeaderTypes | None = None, + cookies: httpx._types.CookieTypes | None = None, + auth: httpx._types.AuthTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + follow_redirects: bool | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + timeout: httpx._types.TimeoutTypes | httpx._client.UseClientDefault = httpx._client.USE_CLIENT_DEFAULT, + extensions: dict[str, Any] | None = None, + ) -> httpx.Response: + return super().delete( + url, + params=params, + headers=headers, + cookies=cookies, + auth=auth, + follow_redirects=follow_redirects, + timeout=timeout, + extensions=extensions, + ) + + def websocket_connect( + self, + url: str, + subprotocols: Sequence[str] | None = None, + **kwargs: Any, + ) -> WebSocketTestSession: + url = urljoin("ws://testserver", url) + headers = kwargs.get("headers", {}) + headers.setdefault("connection", "upgrade") + headers.setdefault("sec-websocket-key", "testserver==") + headers.setdefault("sec-websocket-version", "13") + if subprotocols is not None: + headers.setdefault("sec-websocket-protocol", ", ".join(subprotocols)) + kwargs["headers"] = headers + try: + super().request("GET", url, **kwargs) + except _Upgrade as exc: + session = exc.session + else: + raise RuntimeError("Expected WebSocket upgrade") # pragma: no cover + + return session + + def __enter__(self) -> Self: + with contextlib.ExitStack() as stack: + self.portal = portal = stack.enter_context(anyio.from_thread.start_blocking_portal(**self.async_backend)) + + @stack.callback + def reset_portal() -> None: + self.portal = None + + send: anyio.create_memory_object_stream[MutableMapping[str, Any] | None] = ( + anyio.create_memory_object_stream(math.inf) + ) + receive: anyio.create_memory_object_stream[MutableMapping[str, Any]] = anyio.create_memory_object_stream( + math.inf + ) + for channel in (*send, *receive): + stack.callback(channel.close) + self.stream_send = StapledObjectStream(*send) + self.stream_receive = StapledObjectStream(*receive) + self.task = portal.start_task_soon(self.lifespan) + portal.call(self.wait_startup) + + @stack.callback + def wait_shutdown() -> None: + portal.call(self.wait_shutdown) + + self.exit_stack = stack.pop_all() + + return self + + def __exit__(self, *args: Any) -> None: + self.exit_stack.close() + + async def lifespan(self) -> None: + scope = {"type": "lifespan", "state": self.app_state} + try: + await self.app(scope, self.stream_receive.receive, self.stream_send.send) + finally: + await self.stream_send.send(None) + + async def wait_startup(self) -> None: + await self.stream_receive.send({"type": "lifespan.startup"}) + + async def receive() -> Any: + message = await self.stream_send.receive() + if message is None: + self.task.result() + return message + + message = await receive() + assert message["type"] in ( + "lifespan.startup.complete", + "lifespan.startup.failed", + ) + if message["type"] == "lifespan.startup.failed": + await receive() + + async def wait_shutdown(self) -> None: + async def receive() -> Any: + message = await self.stream_send.receive() + if message is None: + self.task.result() + return message + + await self.stream_receive.send({"type": "lifespan.shutdown"}) + message = await receive() + assert message["type"] in ( + "lifespan.shutdown.complete", + "lifespan.shutdown.failed", + ) + if message["type"] == "lifespan.shutdown.failed": + await receive() diff --git a/venv/lib/python3.10/site-packages/starlette/types.py b/venv/lib/python3.10/site-packages/starlette/types.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f478d78741df2af89e0d839a40bb1412a51bd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/types.py @@ -0,0 +1,26 @@ +from collections.abc import Awaitable, Mapping, MutableMapping +from contextlib import AbstractAsyncContextManager +from typing import TYPE_CHECKING, Any, Callable, TypeVar, Union + +if TYPE_CHECKING: + from starlette.requests import Request + from starlette.responses import Response + from starlette.websockets import WebSocket + +AppType = TypeVar("AppType") + +Scope = MutableMapping[str, Any] +Message = MutableMapping[str, Any] + +Receive = Callable[[], Awaitable[Message]] +Send = Callable[[Message], Awaitable[None]] + +ASGIApp = Callable[[Scope, Receive, Send], Awaitable[None]] + +StatelessLifespan = Callable[[AppType], AbstractAsyncContextManager[None]] +StatefulLifespan = Callable[[AppType], AbstractAsyncContextManager[Mapping[str, Any]]] +Lifespan = Union[StatelessLifespan[AppType], StatefulLifespan[AppType]] + +HTTPExceptionHandler = Callable[["Request", Exception], "Response | Awaitable[Response]"] +WebSocketExceptionHandler = Callable[["WebSocket", Exception], Awaitable[None]] +ExceptionHandler = Union[HTTPExceptionHandler, WebSocketExceptionHandler] diff --git a/venv/lib/python3.10/site-packages/starlette/websockets.py b/venv/lib/python3.10/site-packages/starlette/websockets.py new file mode 100644 index 0000000000000000000000000000000000000000..fb76361c8a2210dc6c494549b0c8c3b9c2e01a18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/starlette/websockets.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import enum +import json +from collections.abc import AsyncIterator, Iterable +from typing import Any, cast + +from starlette.requests import HTTPConnection +from starlette.responses import Response +from starlette.types import Message, Receive, Scope, Send + + +class WebSocketState(enum.Enum): + CONNECTING = 0 + CONNECTED = 1 + DISCONNECTED = 2 + RESPONSE = 3 + + +class WebSocketDisconnect(Exception): + def __init__(self, code: int = 1000, reason: str | None = None) -> None: + self.code = code + self.reason = reason or "" + + +class WebSocket(HTTPConnection): + def __init__(self, scope: Scope, receive: Receive, send: Send) -> None: + super().__init__(scope) + assert scope["type"] == "websocket" + self._receive = receive + self._send = send + self.client_state = WebSocketState.CONNECTING + self.application_state = WebSocketState.CONNECTING + + async def receive(self) -> Message: + """ + Receive ASGI websocket messages, ensuring valid state transitions. + """ + if self.client_state == WebSocketState.CONNECTING: + message = await self._receive() + message_type = message["type"] + if message_type != "websocket.connect": + raise RuntimeError(f'Expected ASGI message "websocket.connect", but got {message_type!r}') + self.client_state = WebSocketState.CONNECTED + return message + elif self.client_state == WebSocketState.CONNECTED: + message = await self._receive() + message_type = message["type"] + if message_type not in {"websocket.receive", "websocket.disconnect"}: + raise RuntimeError( + f'Expected ASGI message "websocket.receive" or "websocket.disconnect", but got {message_type!r}' + ) + if message_type == "websocket.disconnect": + self.client_state = WebSocketState.DISCONNECTED + return message + else: + raise RuntimeError('Cannot call "receive" once a disconnect message has been received.') + + async def send(self, message: Message) -> None: + """ + Send ASGI websocket messages, ensuring valid state transitions. + """ + if self.application_state == WebSocketState.CONNECTING: + message_type = message["type"] + if message_type not in {"websocket.accept", "websocket.close", "websocket.http.response.start"}: + raise RuntimeError( + 'Expected ASGI message "websocket.accept", "websocket.close" or "websocket.http.response.start", ' + f"but got {message_type!r}" + ) + if message_type == "websocket.close": + self.application_state = WebSocketState.DISCONNECTED + elif message_type == "websocket.http.response.start": + self.application_state = WebSocketState.RESPONSE + else: + self.application_state = WebSocketState.CONNECTED + await self._send(message) + elif self.application_state == WebSocketState.CONNECTED: + message_type = message["type"] + if message_type not in {"websocket.send", "websocket.close"}: + raise RuntimeError( + f'Expected ASGI message "websocket.send" or "websocket.close", but got {message_type!r}' + ) + if message_type == "websocket.close": + self.application_state = WebSocketState.DISCONNECTED + try: + await self._send(message) + except OSError: + self.application_state = WebSocketState.DISCONNECTED + raise WebSocketDisconnect(code=1006) + elif self.application_state == WebSocketState.RESPONSE: + message_type = message["type"] + if message_type != "websocket.http.response.body": + raise RuntimeError(f'Expected ASGI message "websocket.http.response.body", but got {message_type!r}') + if not message.get("more_body", False): + self.application_state = WebSocketState.DISCONNECTED + await self._send(message) + else: + raise RuntimeError('Cannot call "send" once a close message has been sent.') + + async def accept( + self, + subprotocol: str | None = None, + headers: Iterable[tuple[bytes, bytes]] | None = None, + ) -> None: + headers = headers or [] + + if self.client_state == WebSocketState.CONNECTING: # pragma: no branch + # If we haven't yet seen the 'connect' message, then wait for it first. + await self.receive() + await self.send({"type": "websocket.accept", "subprotocol": subprotocol, "headers": headers}) + + def _raise_on_disconnect(self, message: Message) -> None: + if message["type"] == "websocket.disconnect": + raise WebSocketDisconnect(message["code"], message.get("reason")) + + async def receive_text(self) -> str: + if self.application_state != WebSocketState.CONNECTED: + raise RuntimeError('WebSocket is not connected. Need to call "accept" first.') + message = await self.receive() + self._raise_on_disconnect(message) + return cast(str, message["text"]) + + async def receive_bytes(self) -> bytes: + if self.application_state != WebSocketState.CONNECTED: + raise RuntimeError('WebSocket is not connected. Need to call "accept" first.') + message = await self.receive() + self._raise_on_disconnect(message) + return cast(bytes, message["bytes"]) + + async def receive_json(self, mode: str = "text") -> Any: + if mode not in {"text", "binary"}: + raise RuntimeError('The "mode" argument should be "text" or "binary".') + if self.application_state != WebSocketState.CONNECTED: + raise RuntimeError('WebSocket is not connected. Need to call "accept" first.') + message = await self.receive() + self._raise_on_disconnect(message) + + if mode == "text": + text = message["text"] + else: + text = message["bytes"].decode("utf-8") + return json.loads(text) + + async def iter_text(self) -> AsyncIterator[str]: + try: + while True: + yield await self.receive_text() + except WebSocketDisconnect: + pass + + async def iter_bytes(self) -> AsyncIterator[bytes]: + try: + while True: + yield await self.receive_bytes() + except WebSocketDisconnect: + pass + + async def iter_json(self) -> AsyncIterator[Any]: + try: + while True: + yield await self.receive_json() + except WebSocketDisconnect: + pass + + async def send_text(self, data: str) -> None: + await self.send({"type": "websocket.send", "text": data}) + + async def send_bytes(self, data: bytes) -> None: + await self.send({"type": "websocket.send", "bytes": data}) + + async def send_json(self, data: Any, mode: str = "text") -> None: + if mode not in {"text", "binary"}: + raise RuntimeError('The "mode" argument should be "text" or "binary".') + text = json.dumps(data, separators=(",", ":"), ensure_ascii=False) + if mode == "text": + await self.send({"type": "websocket.send", "text": text}) + else: + await self.send({"type": "websocket.send", "bytes": text.encode("utf-8")}) + + async def close(self, code: int = 1000, reason: str | None = None) -> None: + await self.send({"type": "websocket.close", "code": code, "reason": reason or ""}) + + async def send_denial_response(self, response: Response) -> None: + if "websocket.http.response" in self.scope.get("extensions", {}): + await response(self.scope, self.receive, self.send) + else: + raise RuntimeError("The server doesn't support the Websocket Denial Response extension.") + + +class WebSocketClose: + def __init__(self, code: int = 1000, reason: str | None = None) -> None: + self.code = code + self.reason = reason or "" + + async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: + await send({"type": "websocket.close", "code": self.code, "reason": self.reason}) diff --git a/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..1bec23eaf1dd562ae3d3216420b1b1bbfbd39cbc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2017 Facebook Inc. (Soumith Chintala), +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..88b509dd116a4c3396d57eca6af5d3b6bd126fa3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/METADATA @@ -0,0 +1,124 @@ +Metadata-Version: 2.2 +Name: torchaudio +Version: 2.6.0 +Summary: An audio package for PyTorch +Home-page: https://github.com/pytorch/audio +Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Author-email: soumith@pytorch.org +Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang +Maintainer-email: moto@meta.com +Classifier: Environment :: Plugins +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Multimedia :: Sound/Audio +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: torch==2.6.0 +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: maintainer +Dynamic: maintainer-email +Dynamic: requires-dist +Dynamic: summary + +torchaudio: an audio library for PyTorch +======================================== + +[![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/) +[![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio) +[![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio) + +![TorchAudio Logo](docs/source/_static/img/logo.png) + +The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to +the audio domain. By supporting PyTorch, torchaudio follows the same philosophy +of providing strong GPU acceleration, having a focus on trainable features through +the autograd system, and having consistent style (tensor names and dimension names). +Therefore, it is primarily a machine learning library and not a general signal +processing library. The benefits of PyTorch can be seen in torchaudio through +having all the computations be through PyTorch operations which makes it easy +to use and feel like a natural extension. + +- [Support audio I/O (Load files, Save files)](http://pytorch.org/audio/main/) + - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX + - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html) +- [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html) +- Audio and speech processing functions + - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html) +- Common audio transforms + - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html) +- Compliance interfaces: Run code using PyTorch that align with other libraries + - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html) + +Installation +------------ + +Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio. + + +API Reference +------------- + +API Reference is located here: http://pytorch.org/audio/main/ + +Contributing Guidelines +----------------------- + +Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md) + +Citation +-------- + +If you find this package useful, please cite as: + +```bibtex +@article{yang2021torchaudio, + title={TorchAudio: Building Blocks for Audio and Speech Processing}, + author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi}, + journal={arXiv preprint arXiv:2110.15018}, + year={2021} +} +``` + +```bibtex +@misc{hwang2023torchaudio, + title={TorchAudio 2.1: Advancing speech recognition, self-supervised learning, and audio processing components for PyTorch}, + author={Jeff Hwang and Moto Hira and Caroline Chen and Xiaohui Zhang and Zhaoheng Ni and Guangzhi Sun and Pingchuan Ma and Ruizhe Huang and Vineel Pratap and Yuekai Zhang and Anurag Kumar and Chin-Yun Yu and Chuang Zhu and Chunxi Liu and Jacob Kahn and Mirco Ravanelli and Peng Sun and Shinji Watanabe and Yangyang Shi and Yumeng Tao and Robin Scheibler and Samuele Cornell and Sean Kim and Stavros Petridis}, + year={2023}, + eprint={2310.17864}, + archivePrefix={arXiv}, + primaryClass={eess.AS} +} +``` + +Disclaimer on Datasets +---------------------- + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! + +Pre-trained Model License +------------------------- + +The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case. + +For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details. + +Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/). diff --git a/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..deaa22a86a74a7b82e21de28739bac279556ea2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/RECORD @@ -0,0 +1,280 @@ +torchaudio-2.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +torchaudio-2.6.0.dist-info/LICENSE,sha256=k6WIYahYzBCOa2uDPgjnbosqZjOeSoAHyKWowf-cQNY,1338 +torchaudio-2.6.0.dist-info/METADATA,sha256=6Mt_Z5EGlz4hs3HGs2ekPhnD1m_6FX7iXryUcguI_a8,6618 +torchaudio-2.6.0.dist-info/RECORD,, +torchaudio-2.6.0.dist-info/WHEEL,sha256=aJ4RvyGkNe3spInijG_tdeo2KZhSBTVwmh5LQ523pbk,104 +torchaudio-2.6.0.dist-info/top_level.txt,sha256=GT0MktEbHKoLnvd-6ii7_dhJVvshupOujk840BcHU4U,17 +torchaudio/__init__.py,sha256=VSnZ6s4e5clAj7f7aCXBZt9amskeXg1j19txAQBQ2Iw,892 +torchaudio/__pycache__/__init__.cpython-310.pyc,, +torchaudio/__pycache__/kaldi_io.cpython-310.pyc,, +torchaudio/__pycache__/version.cpython-310.pyc,, +torchaudio/_backend/__init__.py,sha256=6zMYGajHaaCXUE_U7HuGLp0fqcviYAjBZdFDI4E7C-0,1631 +torchaudio/_backend/__pycache__/__init__.cpython-310.pyc,, +torchaudio/_backend/__pycache__/backend.cpython-310.pyc,, +torchaudio/_backend/__pycache__/common.cpython-310.pyc,, +torchaudio/_backend/__pycache__/ffmpeg.cpython-310.pyc,, +torchaudio/_backend/__pycache__/soundfile.cpython-310.pyc,, +torchaudio/_backend/__pycache__/soundfile_backend.cpython-310.pyc,, +torchaudio/_backend/__pycache__/sox.cpython-310.pyc,, +torchaudio/_backend/__pycache__/utils.cpython-310.pyc,, +torchaudio/_backend/backend.py,sha256=hSrfZcj5FMzx5ZpwubN-LLMvBFb7ENyw7HvT_6pVYVU,1565 +torchaudio/_backend/common.py,sha256=55Y0r0MsdW6gvTOT_Zy60UGFXc60DfdJ7uvycJKK3is,1783 +torchaudio/_backend/ffmpeg.py,sha256=oL_whDjkPtHzo6HJLiEPlGHdrOqzjlu81g-vlaNkRBA,11294 +torchaudio/_backend/soundfile.py,sha256=n0Epw0J9rBb89xVJWTXaDfb96YFz0-i2xarXIdDd-Cw,1703 +torchaudio/_backend/soundfile_backend.py,sha256=qJHEEXU1egCkPJ2Y9uJWFvVhW3AqDZ7z7P7mkJjJJWM,17376 +torchaudio/_backend/sox.py,sha256=p_y9bXKz_6Hto5LORGrHXVbXNS7nsvWUc9iucbN-tCA,3360 +torchaudio/_backend/utils.py,sha256=Q_RgMaeKFvwOoVdWfdwnL0CmpQli_tmi4wPQ2RRHyRA,13299 +torchaudio/_extension/__init__.py,sha256=lQPB8K7VSxWmtTEiMmF-u7WVq1O10_t5nEghkjCf4Ks,2202 +torchaudio/_extension/__pycache__/__init__.cpython-310.pyc,, +torchaudio/_extension/__pycache__/utils.cpython-310.pyc,, +torchaudio/_extension/utils.py,sha256=4FTD6xwcSLqVJ3Kmpx5cvJp1oAUKmWwRjwuxpcbrmzw,6258 +torchaudio/_internal/__init__.py,sha256=gjU8g9HhVd9hHrHXJM0xOlZL6cT8ktO60MN8RHI6ZbA,241 +torchaudio/_internal/__pycache__/__init__.cpython-310.pyc,, +torchaudio/_internal/__pycache__/module_utils.cpython-310.pyc,, +torchaudio/_internal/module_utils.py,sha256=SJr-RS6hs6uJkIVx_WZwsFPKUjtuG6lsfw3uI0UItDE,3562 +torchaudio/backend/__init__.py,sha256=AL8njOL5hDhIGq5tjRxfFzZXxQdTGlz5gs9g4RToblY,281 +torchaudio/backend/__pycache__/__init__.cpython-310.pyc,, +torchaudio/backend/__pycache__/_no_backend.cpython-310.pyc,, +torchaudio/backend/__pycache__/_sox_io_backend.cpython-310.pyc,, +torchaudio/backend/__pycache__/common.cpython-310.pyc,, +torchaudio/backend/__pycache__/no_backend.cpython-310.pyc,, +torchaudio/backend/__pycache__/soundfile_backend.cpython-310.pyc,, +torchaudio/backend/__pycache__/sox_io_backend.cpython-310.pyc,, +torchaudio/backend/_no_backend.py,sha256=9Ss3b4BMFao5Kfdqh6S8JSLUoYCodbPgNQCiDHbNhDQ,757 +torchaudio/backend/_sox_io_backend.py,sha256=PnH-ClsiOy0ekOTY1RKB-cL6xrTPtrzmuXGX3ugATps,11456 +torchaudio/backend/common.py,sha256=T_iYc4u_EzfIh7zbG_xW052fyJMXUXEpPfDOaAQ6sAY,443 +torchaudio/backend/no_backend.py,sha256=PBEQ9vFG5uVurktjxRAiEqSuVJxImnMyPQlt0reRpP0,469 +torchaudio/backend/soundfile_backend.py,sha256=2Tyh5yAn7LQqKzeqW-rx4o2QbmmUrocmh3iYPnuAds0,499 +torchaudio/backend/sox_io_backend.py,sha256=XsAB5HkRbI9-W2nXx-yUMUPJP2Ca5sd09TLywrQ2N-E,477 +torchaudio/compliance/__init__.py,sha256=hhNObUS0c-fS-VMudM7zl3-CvupvCDmESlikntSMn5g,48 +torchaudio/compliance/__pycache__/__init__.cpython-310.pyc,, +torchaudio/compliance/__pycache__/kaldi.cpython-310.pyc,, +torchaudio/compliance/kaldi.py,sha256=XL6hpYTd6nSPb2imIdeU4TM06I2fqh1AmG968y8ZbSk,36666 +torchaudio/datasets/__init__.py,sha256=taRr3duDaEK1Pfzj9N1dFuZpXfy8e4uFItcJiRLAQwQ,1171 +torchaudio/datasets/__pycache__/__init__.cpython-310.pyc,, +torchaudio/datasets/__pycache__/cmuarctic.cpython-310.pyc,, +torchaudio/datasets/__pycache__/cmudict.cpython-310.pyc,, +torchaudio/datasets/__pycache__/commonvoice.cpython-310.pyc,, +torchaudio/datasets/__pycache__/dr_vctk.cpython-310.pyc,, +torchaudio/datasets/__pycache__/fluentcommands.cpython-310.pyc,, +torchaudio/datasets/__pycache__/gtzan.cpython-310.pyc,, +torchaudio/datasets/__pycache__/iemocap.cpython-310.pyc,, +torchaudio/datasets/__pycache__/librilight_limited.cpython-310.pyc,, +torchaudio/datasets/__pycache__/librimix.cpython-310.pyc,, +torchaudio/datasets/__pycache__/librispeech.cpython-310.pyc,, +torchaudio/datasets/__pycache__/librispeech_biasing.cpython-310.pyc,, +torchaudio/datasets/__pycache__/libritts.cpython-310.pyc,, +torchaudio/datasets/__pycache__/ljspeech.cpython-310.pyc,, +torchaudio/datasets/__pycache__/musdb_hq.cpython-310.pyc,, +torchaudio/datasets/__pycache__/quesst14.cpython-310.pyc,, +torchaudio/datasets/__pycache__/snips.cpython-310.pyc,, +torchaudio/datasets/__pycache__/speechcommands.cpython-310.pyc,, +torchaudio/datasets/__pycache__/tedlium.cpython-310.pyc,, +torchaudio/datasets/__pycache__/utils.cpython-310.pyc,, +torchaudio/datasets/__pycache__/vctk.cpython-310.pyc,, +torchaudio/datasets/__pycache__/voxceleb1.cpython-310.pyc,, +torchaudio/datasets/__pycache__/yesno.cpython-310.pyc,, +torchaudio/datasets/cmuarctic.py,sha256=KAhTHUJ3g5RSlmsU5mCTcvutOCm3Oqcd3643u3HNqIg,7097 +torchaudio/datasets/cmudict.py,sha256=9OEpNDYpyqeEyinAnyGIU8FampDj7ziSOHRwJLIlq2M,5990 +torchaudio/datasets/commonvoice.py,sha256=9khedUCmdEkCKPU6_r8VWz6I2VdJokatuziZ6BxJMZs,2763 +torchaudio/datasets/dr_vctk.py,sha256=Km4-tKllAgnOKCuq66YRWhTlNWmC7D0Xz3dAttRRGSo,4377 +torchaudio/datasets/fluentcommands.py,sha256=u3tkO4-AAaTWdbRQi6lIvad4x2plZgXM39KljGtmRsw,3245 +torchaudio/datasets/gtzan.py,sha256=I5dRP_QGuQ1joXWRwZwtvpwi22uZTb8QZm9Mr2W55Mg,24357 +torchaudio/datasets/iemocap.py,sha256=X_WCoXOzRqcWRRRoUtY0AlD9SJcUUOACIcgbV0irt48,4930 +torchaudio/datasets/librilight_limited.py,sha256=fAwpX0hEMze5aV57BP7rjBLwRiZa3Aje_NXi_3o16wA,4179 +torchaudio/datasets/librimix.py,sha256=VtKOhf6VJc1ysWCvUvh0SbtjOkXJChmBM_BhoSkg_2A,5116 +torchaudio/datasets/librispeech.py,sha256=zkzJFWchWs4AktYAI-ghmWH4ZeJ84C0uDo9E1_pTgSI,6308 +torchaudio/datasets/librispeech_biasing.py,sha256=d-02tyrXI-CSGbXBFYFcnM_yT8WSGABHfpNiFxyadL0,6958 +torchaudio/datasets/libritts.py,sha256=EtWOoCDz7_qGLZF5YcZfnHaLxH4Y8QJCnopafLiqFno,5870 +torchaudio/datasets/ljspeech.py,sha256=92NeLQsC1iKpqfiMkKKbcJDpaYdZKVdVEBQJze1wmxY,3494 +torchaudio/datasets/musdb_hq.py,sha256=TYKjpat6JKr9bkFqUecu7_hRdshRfQP2UbknaYR3Q0U,5075 +torchaudio/datasets/quesst14.py,sha256=QyGd4fMS820ATbP8YgBtu7bSSK09pw5RZklsPJ8Jf0Y,4455 +torchaudio/datasets/snips.py,sha256=WaYUknGFM3rnLklOj5ZYHSX5mhlf_Ce4p3LBZdA9yJc,5008 +torchaudio/datasets/speechcommands.py,sha256=cLSgiVYlQjEOuYPpFeAtcXSGirraH4IMoP8p9WIvUoY,7481 +torchaudio/datasets/tedlium.py,sha256=a8Hf2QvOki7_chgXcMAFMk-piTjodktfnc3HRbUVJkU,8698 +torchaudio/datasets/utils.py,sha256=QaI02lOcesy6Dnvlof4BeTDIbiOqUcoVEPxL5_T8vwU,1689 +torchaudio/datasets/vctk.py,sha256=twR_n8LyQcT8A_HrJoMx3RkaVrRXXZAnIVU1d0E0npQ,5699 +torchaudio/datasets/voxceleb1.py,sha256=9vU0ftB4-2usO8ZiEUKR_IQTEdHhA0M8l9scXCNehnw,11725 +torchaudio/datasets/yesno.py,sha256=4sgfMeSxz8HaRDk6A2UIFP-20q29MwEO_r8DoEtfbvE,3026 +torchaudio/functional/__init__.py,sha256=l-gC2WyY5COabU0lhkUS8EnwOYdEYR_6234OyoAIgnU,2357 +torchaudio/functional/__pycache__/__init__.cpython-310.pyc,, +torchaudio/functional/__pycache__/_alignment.cpython-310.pyc,, +torchaudio/functional/__pycache__/filtering.cpython-310.pyc,, +torchaudio/functional/__pycache__/functional.cpython-310.pyc,, +torchaudio/functional/_alignment.py,sha256=wmDeohWvuoYORYDeIRxnYUhUqv1uCUkaCZYLEK_ryUg,4695 +torchaudio/functional/filtering.py,sha256=EdYtv2z893Qi58BHIR1VGDfRaGCo0sIKl4k98-vwPkg,61554 +torchaudio/functional/functional.py,sha256=c8qr3mmPXLi40N4NCLcpHvQeUIuoNtbszksjtruC15g,96006 +torchaudio/io/__init__.py,sha256=8nd6s_xuBh5iVzIvQ-qNlforukZzuCx36DyvCmHK748,297 +torchaudio/io/__pycache__/__init__.cpython-310.pyc,, +torchaudio/io/__pycache__/_effector.cpython-310.pyc,, +torchaudio/io/__pycache__/_playback.cpython-310.pyc,, +torchaudio/io/_effector.py,sha256=APDrIU2biwFsSVmhrXjelmc4ndcmb0JL-H189Zp689g,11870 +torchaudio/io/_playback.py,sha256=70IxGrGPkI1h4rz8_04SFCGsbbGZkTiUdRhbPOMLLgQ,2321 +torchaudio/kaldi_io.py,sha256=TwS2YgSLlJwOXjNNsHBuXyxhKeKKpptVHLBV7QYZCas,5073 +torchaudio/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +torchaudio/lib/__pycache__/__init__.cpython-310.pyc,, +torchaudio/lib/_torchaudio.so,sha256=gXokHtvNK0UE1DQAp9pFu_7Wt0v-NPqIUzw5sjwqMJQ,133640 +torchaudio/lib/_torchaudio_sox.so,sha256=zajrtsPlVL1aqtaaJKBODBb_hNh3ef5-19glzyzRauo,261504 +torchaudio/lib/libctc_prefix_decoder.so,sha256=N5VUd3f-0dKu6E4NXrZwbmV8gCUAubo-1I3FXJyM3Ww,4943448 +torchaudio/lib/libtorchaudio.so,sha256=DoWtqEYaHDCY-TF0IaRlEAvmZWh4Il5CoerZO_JeVgI,2447512 +torchaudio/lib/libtorchaudio_sox.so,sha256=f9b5mPLCFABVFWHIqTqtbrrQvDcD0F0n5NdcncmqSqU,151736 +torchaudio/lib/pybind11_prefixctc.so,sha256=fSYRkthMWWibUS4MYNGNuI1EnrZJpfS_6OX71cht8Eo,213776 +torchaudio/models/__init__.py,sha256=BNMNGuwpJAFRsdtwHYQ6slGClkrUTu31_7mXh7FjeV4,1995 +torchaudio/models/__pycache__/__init__.cpython-310.pyc,, +torchaudio/models/__pycache__/_hdemucs.cpython-310.pyc,, +torchaudio/models/__pycache__/conformer.cpython-310.pyc,, +torchaudio/models/__pycache__/conv_tasnet.cpython-310.pyc,, +torchaudio/models/__pycache__/deepspeech.cpython-310.pyc,, +torchaudio/models/__pycache__/emformer.cpython-310.pyc,, +torchaudio/models/__pycache__/rnnt.cpython-310.pyc,, +torchaudio/models/__pycache__/rnnt_decoder.cpython-310.pyc,, +torchaudio/models/__pycache__/tacotron2.cpython-310.pyc,, +torchaudio/models/__pycache__/wav2letter.cpython-310.pyc,, +torchaudio/models/__pycache__/wavernn.cpython-310.pyc,, +torchaudio/models/_hdemucs.py,sha256=VPnQ73lA9lfAxRjZ85NCGJYP36mPNwTjS-TU4qelu_k,38242 +torchaudio/models/conformer.py,sha256=5IceU-jcZKofkHTTqRKoytubQ75MzZPrPlfkLsIlxeA,10068 +torchaudio/models/conv_tasnet.py,sha256=v-DI_Ej9FCBBbSH-Spkh3tzq8rkBhbQNA-Wp52Uf32E,12540 +torchaudio/models/decoder/__init__.py,sha256=4IS_DyQageh2_uY3YE1aBCYEE3HArCFd8ZUfbgww-Tc,1206 +torchaudio/models/decoder/__pycache__/__init__.cpython-310.pyc,, +torchaudio/models/decoder/__pycache__/_ctc_decoder.cpython-310.pyc,, +torchaudio/models/decoder/__pycache__/_cuda_ctc_decoder.cpython-310.pyc,, +torchaudio/models/decoder/_ctc_decoder.py,sha256=K3gSsG9htU08fe7tKSuIJPDIs7ruY50pJ3eNdNhXSVY,20082 +torchaudio/models/decoder/_cuda_ctc_decoder.py,sha256=rtpN1Z_Xni1LlHgHx6jJ1Jap4TnQ0rRRMvwGWa-xnvA,7186 +torchaudio/models/deepspeech.py,sha256=kQW3B6YcjYuq7xRzWjRJFGr7ZNraY9gMYDTxII7Cgtg,2746 +torchaudio/models/emformer.py,sha256=ncDeEcYegUmIKQoDBoufUhVWj4dYpZAXxLX0qmEqt1A,37766 +torchaudio/models/rnnt.py,sha256=jz66nwDd1qGT6KQR1lbA_urPktygewhm0FH66T7P3Ek,35541 +torchaudio/models/rnnt_decoder.py,sha256=IwlDsuw1SA-uCRrXGMBqm05auGFSha2bZ-8BOImnK0c,12839 +torchaudio/models/squim/__init__.py,sha256=b98nAaL28Q4w3lrqd_6wUd0An-xNhhJn4Tj8oZlzQnc,346 +torchaudio/models/squim/__pycache__/__init__.cpython-310.pyc,, +torchaudio/models/squim/__pycache__/objective.cpython-310.pyc,, +torchaudio/models/squim/__pycache__/subjective.cpython-310.pyc,, +torchaudio/models/squim/objective.py,sha256=YPkEWdDMyeP7GcR0OzUPHr2wKhIKFMjy4peYsABmZQk,12289 +torchaudio/models/squim/subjective.py,sha256=N00kILSPm0akWyNsrNYKmHgZmooo8gbyUm5IVLf7bx8,5797 +torchaudio/models/tacotron2.py,sha256=FimYhGSI8FKwWb87CLk4h3yKWatCU2HvFmU1t5WUn4E,45914 +torchaudio/models/wav2letter.py,sha256=KNcq4p0qZG2Bwfdakv7YwLCvi_yGT-qB4fJwGMuFQhg,3278 +torchaudio/models/wav2vec2/__init__.py,sha256=WlafukV6GwuSNh0CZifrYUt4V5l59kjvGX7AZNonjfk,927 +torchaudio/models/wav2vec2/__pycache__/__init__.cpython-310.pyc,, +torchaudio/models/wav2vec2/__pycache__/components.cpython-310.pyc,, +torchaudio/models/wav2vec2/__pycache__/model.cpython-310.pyc,, +torchaudio/models/wav2vec2/__pycache__/wavlm_attention.cpython-310.pyc,, +torchaudio/models/wav2vec2/components.py,sha256=DRmW-GHYf-JReCg_0l1ovNWJBnAavePO3S2vPY-1ze4,47077 +torchaudio/models/wav2vec2/model.py,sha256=Z2VN6KbDOOdq5JtP7lxPQebwYqsxKms1Eu4IjDJtZaQ,60092 +torchaudio/models/wav2vec2/utils/__init__.py,sha256=qmMbz4HAN5kEEyl4cSGm_JQZI47beyh4witydPC_qns,181 +torchaudio/models/wav2vec2/utils/__pycache__/__init__.cpython-310.pyc,, +torchaudio/models/wav2vec2/utils/__pycache__/import_fairseq.cpython-310.pyc,, +torchaudio/models/wav2vec2/utils/__pycache__/import_huggingface.cpython-310.pyc,, +torchaudio/models/wav2vec2/utils/import_fairseq.py,sha256=oCwG6qpG0bCXue2V56fjDcC8cA2rgy4b3O_nu_FI9ZY,9198 +torchaudio/models/wav2vec2/utils/import_huggingface.py,sha256=1nVCipp-lOUAyl_-P103DWLUeTOZi9X_ffX93bOXxEk,5946 +torchaudio/models/wav2vec2/wavlm_attention.py,sha256=1DU_pkoLCeHQwSF4lJ06cez0PsMVoXNxiYKP0Yv0qFQ,10844 +torchaudio/models/wavernn.py,sha256=5xUyao5g69jRXX4ReNi4mP_aTSIonJPP6XcPrqKybEk,15446 +torchaudio/pipelines/__init__.py,sha256=Xy8NmInKwTcNBHwLTTjHjrfczRLuQq8a67ENt1OTVXM,2745 +torchaudio/pipelines/__pycache__/__init__.cpython-310.pyc,, +torchaudio/pipelines/__pycache__/_source_separation_pipeline.cpython-310.pyc,, +torchaudio/pipelines/__pycache__/_squim_pipeline.cpython-310.pyc,, +torchaudio/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc,, +torchaudio/pipelines/_source_separation_pipeline.py,sha256=WxngB1d13H5IVqs4RPqSL53ZGYsJ3tnfCpxgc5FNSOM,4224 +torchaudio/pipelines/_squim_pipeline.py,sha256=eCimTeoqNX8LilIQNGmb8UaRtnLIXa4LNShXFjodcZM,6280 +torchaudio/pipelines/_tts/__init__.py,sha256=PP7l8XzVURqelwuMJFgfOCv4fvzZunDiy90ZQlRkv7g,426 +torchaudio/pipelines/_tts/__pycache__/__init__.cpython-310.pyc,, +torchaudio/pipelines/_tts/__pycache__/impl.cpython-310.pyc,, +torchaudio/pipelines/_tts/__pycache__/interface.cpython-310.pyc,, +torchaudio/pipelines/_tts/__pycache__/utils.cpython-310.pyc,, +torchaudio/pipelines/_tts/impl.py,sha256=Tig4_5sITJADwxN5eZGek7Ath_-e3sV8CTM5t6UpeUU,15374 +torchaudio/pipelines/_tts/interface.py,sha256=yUaS0UK3PTRruYXRWFil7lAhr-1iYiyBaDBLmEnJPUQ,10224 +torchaudio/pipelines/_tts/utils.py,sha256=0rLyoFWS78n5jn9AC99pgIwAjaXSw-MVbj_pjSaOHiM,4616 +torchaudio/pipelines/_wav2vec2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +torchaudio/pipelines/_wav2vec2/__pycache__/__init__.cpython-310.pyc,, +torchaudio/pipelines/_wav2vec2/__pycache__/aligner.cpython-310.pyc,, +torchaudio/pipelines/_wav2vec2/__pycache__/impl.cpython-310.pyc,, +torchaudio/pipelines/_wav2vec2/__pycache__/utils.cpython-310.pyc,, +torchaudio/pipelines/_wav2vec2/aligner.py,sha256=pIWRgQ-kdYUxtL8bdc0qk9wBjwRrHY1uSWL3L4e2vxs,2709 +torchaudio/pipelines/_wav2vec2/impl.py,sha256=zdXFjytJO5MvnB-3aygzUUFKxCTkQGU_OX_rhUh9c0k,65561 +torchaudio/pipelines/_wav2vec2/utils.py,sha256=Q8_fWOR2JDnHu0TTRmHzRjI3BOJa0hGIAl0cjtALgsQ,6971 +torchaudio/pipelines/rnnt_pipeline.py,sha256=Qy37z7v6d1jLOHd67zbRu21dgL6Fml1rTd7j4Jl1NsM,13749 +torchaudio/prototype/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +torchaudio/prototype/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/datasets/__init__.py,sha256=GSMcp2CykcBc-krhlHTrPm5DCvDFwnA7_6GFNCGwsaQ,47 +torchaudio/prototype/datasets/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/datasets/__pycache__/musan.cpython-310.pyc,, +torchaudio/prototype/datasets/musan.py,sha256=g68PIJCtJM_mXK8vngJ4PRzMqvp-YShPLN9qTMgeiKw,2096 +torchaudio/prototype/functional/__init__.py,sha256=GlbhnDHcNyUWdRd3R-ATzRkG2FXsbqjL56OptyTXpec,562 +torchaudio/prototype/functional/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/functional/__pycache__/_dsp.cpython-310.pyc,, +torchaudio/prototype/functional/__pycache__/_rir.cpython-310.pyc,, +torchaudio/prototype/functional/__pycache__/functional.cpython-310.pyc,, +torchaudio/prototype/functional/_dsp.py,sha256=H4IZgQYjrmV6ITb7iex3F4qwBSFDyPbdrb0e4ZXbkMY,16638 +torchaudio/prototype/functional/_rir.py,sha256=k-svDQK56U1WNpj4dNUxWArBiVM7sZ_BZ98oOop4NNg,17255 +torchaudio/prototype/functional/functional.py,sha256=xd8ZQe69Utl7HzC-VDyhniS0K-dment-Z7FrEwTrfYk,6464 +torchaudio/prototype/models/__init__.py,sha256=zrav5cgVlM51xvocGlL10GoR2r5UuQrenPDlYRUzv40,1254 +torchaudio/prototype/models/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/models/__pycache__/_conformer_wav2vec2.cpython-310.pyc,, +torchaudio/prototype/models/__pycache__/_emformer_hubert.cpython-310.pyc,, +torchaudio/prototype/models/__pycache__/conv_emformer.cpython-310.pyc,, +torchaudio/prototype/models/__pycache__/hifi_gan.cpython-310.pyc,, +torchaudio/prototype/models/__pycache__/rnnt.cpython-310.pyc,, +torchaudio/prototype/models/__pycache__/rnnt_decoder.cpython-310.pyc,, +torchaudio/prototype/models/_conformer_wav2vec2.py,sha256=J7ZJ0dPIFLj9RyPsnuSQC9Y5OVJ9xL6F4JS44zua9zA,29522 +torchaudio/prototype/models/_emformer_hubert.py,sha256=D1WlL1S5xNrN5zOWYnzGyRUtPWnFZOkJprhbkYln0fM,13498 +torchaudio/prototype/models/conv_emformer.py,sha256=tdUz8WwhNlmGXpmki4voZg5nrg749xi23rmfrq2XRCk,23076 +torchaudio/prototype/models/hifi_gan.py,sha256=-ZMA722hoYabIbJl3OGqlxyhhqAHEL7UsTEkOyy8w5I,12480 +torchaudio/prototype/models/rnnt.py,sha256=MTsXxGGv8xOIlH_zhOeSUdedI29CHBIsJ0Pcr8D6yK0,30859 +torchaudio/prototype/models/rnnt_decoder.py,sha256=lIacC7qCjMxjAuBHpgrPXlNI3eERo11fYgaEwPDT7ms,15735 +torchaudio/prototype/pipelines/__init__.py,sha256=yo19xKvIW3XDdDo19thGSMkPRuR8xTwSe0qMWEPS9bE,382 +torchaudio/prototype/pipelines/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/pipelines/__pycache__/hifigan_pipeline.cpython-310.pyc,, +torchaudio/prototype/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc,, +torchaudio/prototype/pipelines/_vggish/__init__.py,sha256=yi9HO_14_YWFOEvQOhTXb9eqF3JGJ9FtM5-J-a3nEnA,89 +torchaudio/prototype/pipelines/_vggish/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_impl.cpython-310.pyc,, +torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_pipeline.cpython-310.pyc,, +torchaudio/prototype/pipelines/_vggish/_vggish_impl.py,sha256=EvpbNx4oHk7Zr7LQo-c191XRDLuklj3dZLi2Lgrpe_0,8497 +torchaudio/prototype/pipelines/_vggish/_vggish_pipeline.py,sha256=M47A-KFargc9PxZUeu0zqaZsIapAV4XhJqj_P2IHs9g,2713 +torchaudio/prototype/pipelines/hifigan_pipeline.py,sha256=mEmTdVBn50Zyyy0hcjp-fGv9oIbvJFBYBjShXgjFvhU,9654 +torchaudio/prototype/pipelines/rnnt_pipeline.py,sha256=3wwlCg3refzmsAA69XulFRy4GIixA2EH5ZqFbNFKXhk,2184 +torchaudio/prototype/transforms/__init__.py,sha256=1PcKxc8AWlSRkmchT6IDYDseO7itWY7ueZrlcAUZlkY,225 +torchaudio/prototype/transforms/__pycache__/__init__.cpython-310.pyc,, +torchaudio/prototype/transforms/__pycache__/_transforms.cpython-310.pyc,, +torchaudio/prototype/transforms/_transforms.py,sha256=K2MjxUrTxlk0nO0NcJA07bDCoFolsQh-TabE84eKC6k,19144 +torchaudio/sox_effects/__init__.py,sha256=gCxdiwHK3ldlGCeYc9VatJW5HyzjWIgw_Sz_krp_rOw,262 +torchaudio/sox_effects/__pycache__/__init__.cpython-310.pyc,, +torchaudio/sox_effects/__pycache__/sox_effects.cpython-310.pyc,, +torchaudio/sox_effects/sox_effects.py,sha256=7cHpPFRJ_pZuohHMnX9JIhiVmIJGYntSmgT6QH5GNMA,10981 +torchaudio/transforms/__init__.py,sha256=Tp1o4haiJAV3MRJenmvGXFbmt-RE4qM_pd6U3Ghohqw,1270 +torchaudio/transforms/__pycache__/__init__.cpython-310.pyc,, +torchaudio/transforms/__pycache__/_multi_channel.cpython-310.pyc,, +torchaudio/transforms/__pycache__/_transforms.cpython-310.pyc,, +torchaudio/transforms/_multi_channel.py,sha256=GZ2rrwFt2KtSG7At7kS9Bqh1KmYYw0HwcUnEjc-AWr8,22221 +torchaudio/transforms/_transforms.py,sha256=QHrEsxxxm1bPd5dltPeTcNOsMBu0Ecxa2oe6GIX-nvk,86872 +torchaudio/utils/__init__.py,sha256=NCtfdIUxDi1u0zaamscSbiWzbxn2TOI-MHHWOKU0RnQ,174 +torchaudio/utils/__pycache__/__init__.cpython-310.pyc,, +torchaudio/utils/__pycache__/download.cpython-310.pyc,, +torchaudio/utils/__pycache__/ffmpeg_utils.cpython-310.pyc,, +torchaudio/utils/__pycache__/sox_utils.cpython-310.pyc,, +torchaudio/utils/download.py,sha256=2IFKD1rsWBFE31HTiyUgpE5y7AJh8_AUPdc-btNQuKw,2882 +torchaudio/utils/ffmpeg_utils.py,sha256=3I6YM95eNyOAg2K-ebEy9kjBzEDq3_OBqggXztPIDcU,319 +torchaudio/utils/sox_utils.py,sha256=WGSj_RfELpol8U2XPABGsAjO7yrPHS3_MgHkx7oHYQU,2421 +torchaudio/version.py,sha256=SF5cvuzciY5C8y2uV8NtCi5xDUUft5kIDdTyXGMI43k,85 +torio/__init__.py,sha256=aX9s0XAHxHhEXE1akQt74BZ0cMUDgBPhaYHQH1lCbXQ,111 +torio/__pycache__/__init__.cpython-310.pyc,, +torio/_extension/__init__.py,sha256=q5jjeOhSrzqn0WTEwrx61Fr13aCjb7IQCDGsBqAdGEU,313 +torio/_extension/__pycache__/__init__.cpython-310.pyc,, +torio/_extension/__pycache__/utils.cpython-310.pyc,, +torio/_extension/utils.py,sha256=ktE0L_z-RF1qkpLVGgdG4DEGHa2Zn6uokOAmwC7Evvo,4904 +torio/io/__init__.py,sha256=xz7REkkyfRhAASzVCAfoNruFtAGIx1I--usPAa2tMww,226 +torio/io/__pycache__/__init__.cpython-310.pyc,, +torio/io/__pycache__/_streaming_media_decoder.cpython-310.pyc,, +torio/io/__pycache__/_streaming_media_encoder.cpython-310.pyc,, +torio/io/_streaming_media_decoder.py,sha256=vSylEWAB_JXOW-0E1n0zDM3Q3Vf1jc1-CNpdUSs13XU,34376 +torio/io/_streaming_media_encoder.py,sha256=rSTYmHdi7RPJ6YPgAyGJhbQvn4mcxLem3nlnr_ophTs,19722 +torio/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +torio/lib/__pycache__/__init__.cpython-310.pyc,, +torio/lib/_torio_ffmpeg4.so,sha256=SEdANNFdmym4nsCe6yHKMR2xbomyBMT1yozrbTjkgs8,491592 +torio/lib/_torio_ffmpeg5.so,sha256=_1Kna9G1qyyJ1LpiydqckJ2BTcTyZv7G8P016d1gOs8,491592 +torio/lib/_torio_ffmpeg6.so,sha256=xx-Iy_RdlvJjC0nmbKlSDk8BBiOKzXye0mkrfPVaHqE,491592 +torio/lib/libtorio_ffmpeg4.so,sha256=03A-zqG3-P6pukptuxcxrvDe1VKs0eU-6Kh7HrOALiI,652080 +torio/lib/libtorio_ffmpeg5.so,sha256=Y5ujutnM5y440vY_yuY2UpExSiUjaB7TuTqWJsABQJg,652080 +torio/lib/libtorio_ffmpeg6.so,sha256=lvIorEVbavPOF4G2njijwyForyajUacEm2mhNEaMAnU,652080 +torio/utils/__init__.py,sha256=ScHtnontymRDNn9qEIC0neue5mfG82yhB8bwETOb0Z4,56 +torio/utils/__pycache__/__init__.cpython-310.pyc,, +torio/utils/__pycache__/ffmpeg_utils.cpython-310.pyc,, +torio/utils/ffmpeg_utils.py,sha256=JsP2ptjQAE4U7Z_CSauQKH_k72wdu6nrBMfNHl9pIXQ,8026 diff --git a/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..94856657663aef513dc1fcecdf40a37eb27b1551 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.8.0) +Root-Is-Purelib: false +Tag: cp310-cp310-linux_x86_64 + diff --git a/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f448fc64e7113394edf208556101c579616cc18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio-2.6.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +torchaudio +torio diff --git a/venv/lib/python3.10/site-packages/torchaudio/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..90b411eae1f27ff5dc8adf4917c1ea8725cd2c9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/__init__.py @@ -0,0 +1,53 @@ +# Initialize extension and backend first +from . import _extension # noqa # usort: skip +from ._backend import ( # noqa # usort: skip + AudioMetaData, + get_audio_backend, + info, + list_audio_backends, + load, + save, + set_audio_backend, +) + +from . import ( # noqa: F401 + compliance, + datasets, + functional, + io, + kaldi_io, + models, + pipelines, + sox_effects, + transforms, + utils, +) + +# For BC +from . import backend # noqa # usort: skip + +try: + from .version import __version__, git_version # noqa: F401 +except ImportError: + pass + + +__all__ = [ + "AudioMetaData", + "load", + "info", + "save", + "io", + "compliance", + "datasets", + "functional", + "models", + "pipelines", + "kaldi_io", + "utils", + "sox_effects", + "transforms", + "list_audio_backends", + "get_audio_backend", + "set_audio_backend", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/datasets/yesno.py b/venv/lib/python3.10/site-packages/torchaudio/datasets/yesno.py new file mode 100644 index 0000000000000000000000000000000000000000..baad08f1593a49af5f95658e8d4b67be6d3deeb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/datasets/yesno.py @@ -0,0 +1,89 @@ +import os +from pathlib import Path +from typing import List, Tuple, Union + +import torchaudio +from torch import Tensor +from torch.utils.data import Dataset +from torchaudio._internal import download_url_to_file +from torchaudio.datasets.utils import _extract_tar + + +_RELEASE_CONFIGS = { + "release1": { + "folder_in_archive": "waves_yesno", + "url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz", + "checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73", + } +} + + +class YESNO(Dataset): + """*YesNo* :cite:`YesNo` dataset. + + Args: + root (str or Path): Path to the directory where the dataset is found or downloaded. + url (str, optional): The URL to download the dataset from. + (default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``) + folder_in_archive (str, optional): + The top-level directory of the dataset. (default: ``"waves_yesno"``) + download (bool, optional): + Whether to download the dataset if it is not found at root path. (default: ``False``). + """ + + def __init__( + self, + root: Union[str, Path], + url: str = _RELEASE_CONFIGS["release1"]["url"], + folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"], + download: bool = False, + ) -> None: + + self._parse_filesystem(root, url, folder_in_archive, download) + + def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None: + root = Path(root) + archive = os.path.basename(url) + archive = root / archive + + self._path = root / folder_in_archive + if download: + if not os.path.isdir(self._path): + if not os.path.isfile(archive): + checksum = _RELEASE_CONFIGS["release1"]["checksum"] + download_url_to_file(url, archive, hash_prefix=checksum) + _extract_tar(archive) + + if not os.path.isdir(self._path): + raise RuntimeError("Dataset not found. Please use `download=True` to download it.") + + self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav")) + + def _load_item(self, fileid: str, path: str): + labels = [int(c) for c in fileid.split("_")] + file_audio = os.path.join(path, fileid + ".wav") + waveform, sample_rate = torchaudio.load(file_audio) + return waveform, sample_rate, labels + + def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]: + """Load the n-th sample from the dataset. + + Args: + n (int): The index of the sample to be loaded + + Returns: + Tuple of the following items; + + Tensor: + Waveform + int: + Sample rate + List[int]: + labels + """ + fileid = self._walker[n] + item = self._load_item(fileid, self._path) + return item + + def __len__(self) -> int: + return len(self._walker) diff --git a/venv/lib/python3.10/site-packages/torchaudio/functional/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e795e2ff89bab28463259c9508e136271bf19a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/functional/__pycache__/_alignment.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/functional/__pycache__/_alignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8da36eb7f68da9579aaea8aa4ce16d78e9973b72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/functional/__pycache__/_alignment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/functional/filtering.py b/venv/lib/python3.10/site-packages/torchaudio/functional/filtering.py new file mode 100644 index 0000000000000000000000000000000000000000..1628d82c33c2a33a48ae49842cf012f4054c509c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/functional/filtering.py @@ -0,0 +1,1670 @@ +import math +import warnings +from typing import Optional + +import torch +from torch import Tensor + +from torchaudio._extension import _IS_TORCHAUDIO_EXT_AVAILABLE + + +def _dB2Linear(x: float) -> float: + return math.exp(x * math.log(10) / 20.0) + + +def _generate_wave_table( + wave_type: str, + data_type: str, + table_size: int, + min: float, + max: float, + phase: float, + device: torch.device, +) -> Tensor: + r"""A helper function for phaser. Generates a table with given parameters. + + Args: + wave_type (str): SINE or TRIANGULAR + data_type (str): desired data_type ( `INT` or `FLOAT` ) + table_size (int): desired table size + min (float): desired min value + max (float): desired max value + phase (float): desired phase + device (torch.device): Torch device on which table must be generated + Returns: + Tensor: A 1D tensor with wave table values + """ + + phase_offset = int(phase / math.pi / 2 * table_size + 0.5) + + t = torch.arange(table_size, device=device, dtype=torch.int32) + + point = (t + phase_offset) % table_size + + d = torch.zeros_like(point, device=device, dtype=torch.float64) + + if wave_type == "SINE": + d = (torch.sin(point.to(torch.float64) / table_size * 2 * math.pi) + 1) / 2 + elif wave_type == "TRIANGLE": + d = point.to(torch.float64) * 2 / table_size + value = torch.div(4 * point, table_size, rounding_mode="floor") + d[value == 0] = d[value == 0] + 0.5 + d[value == 1] = 1.5 - d[value == 1] + d[value == 2] = 1.5 - d[value == 2] + d[value == 3] = d[value == 3] - 1.5 + + d = d * (max - min) + min + + if data_type == "INT": + mask = d < 0 + d[mask] = d[mask] - 0.5 + d[~mask] = d[~mask] + 0.5 + d = d.to(torch.int32) + elif data_type == "FLOAT": + d = d.to(torch.float32) + + return d + + +def allpass_biquad(waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707) -> Tensor: + r"""Design two-pole all-pass filter. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform(torch.Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + + alpha = torch.sin(w0) / 2 / Q + + b0 = 1 - alpha + b1 = -2 * torch.cos(w0) + b2 = 1 + alpha + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def band_biquad( + waveform: Tensor, + sample_rate: int, + central_freq: float, + Q: float = 0.707, + noise: bool = False, +) -> Tensor: + r"""Design two-pole band filter. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). + noise (bool, optional) : If ``True``, uses the alternate mode for un-pitched audio (e.g. percussion). + If ``False``, uses mode oriented to pitched audio, i.e. voice, singing, + or instrumental music (Default: ``False``). + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + bw_Hz = central_freq / Q + + a0 = 1.0 + a2 = torch.exp(-2 * math.pi * bw_Hz / sample_rate) + a1 = -4 * a2 / (1 + a2) * torch.cos(w0) + + b0 = torch.sqrt(1 - a1 * a1 / (4 * a2)) * (1 - a2) + + if noise: + mult = torch.sqrt(((1 + a2) * (1 + a2) - a1 * a1) * (1 - a2) / (1 + a2)) / b0 + b0 = mult * b0 + + b1 = 0.0 + b2 = 0.0 + + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def bandpass_biquad( + waveform: Tensor, + sample_rate: int, + central_freq: float, + Q: float = 0.707, + const_skirt_gain: bool = False, +) -> Tensor: + r"""Design two-pole band-pass filter. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + const_skirt_gain (bool, optional) : If ``True``, uses a constant skirt gain (peak gain = Q). + If ``False``, uses a constant 0dB peak gain. (Default: ``False``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + + temp = torch.sin(w0) / 2 if const_skirt_gain else alpha + b0 = temp + b1 = 0.0 + b2 = -temp + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def bandreject_biquad(waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707) -> Tensor: + r"""Design two-pole band-reject filter. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + central_freq (float or torch.Tensor): central frequency (in Hz) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + + b0 = 1.0 + b1 = -2 * torch.cos(w0) + b2 = 1.0 + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def bass_biquad( + waveform: Tensor, + sample_rate: int, + gain: float, + central_freq: float = 100, + Q: float = 0.707, +) -> Tensor: + r"""Design a bass tone-control effect. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB. + central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``100``) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + gain = torch.as_tensor(gain, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + A = torch.exp(gain / 40 * math.log(10)) + + temp1 = 2 * torch.sqrt(A) * alpha + temp2 = (A - 1) * torch.cos(w0) + temp3 = (A + 1) * torch.cos(w0) + + b0 = A * ((A + 1) - temp2 + temp1) + b1 = 2 * A * ((A - 1) - temp3) + b2 = A * ((A + 1) - temp2 - temp1) + a0 = (A + 1) + temp2 + temp1 + a1 = -2 * ((A - 1) + temp3) + a2 = (A + 1) + temp2 - temp1 + + return biquad(waveform, b0 / a0, b1 / a0, b2 / a0, a0 / a0, a1 / a0, a2 / a0) + + +def biquad(waveform: Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float) -> Tensor: + r"""Perform a biquad filter of input tensor. Initial conditions set to 0. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + b0 (float or torch.Tensor): numerator coefficient of current input, x[n] + b1 (float or torch.Tensor): numerator coefficient of input one time step ago x[n-1] + b2 (float or torch.Tensor): numerator coefficient of input two time steps ago x[n-2] + a0 (float or torch.Tensor): denominator coefficient of current output y[n], typically 1 + a1 (float or torch.Tensor): denominator coefficient of current output y[n-1] + a2 (float or torch.Tensor): denominator coefficient of current output y[n-2] + + Returns: + Tensor: Waveform with dimension of `(..., time)` + + Reference: + - https://en.wikipedia.org/wiki/Digital_biquad_filter + """ + + device = waveform.device + dtype = waveform.dtype + + b0 = torch.as_tensor(b0, dtype=dtype, device=device).view(1) + b1 = torch.as_tensor(b1, dtype=dtype, device=device).view(1) + b2 = torch.as_tensor(b2, dtype=dtype, device=device).view(1) + a0 = torch.as_tensor(a0, dtype=dtype, device=device).view(1) + a1 = torch.as_tensor(a1, dtype=dtype, device=device).view(1) + a2 = torch.as_tensor(a2, dtype=dtype, device=device).view(1) + + output_waveform = lfilter( + waveform, + torch.cat([a0, a1, a2]), + torch.cat([b0, b1, b2]), + ) + return output_waveform + + +def contrast(waveform: Tensor, enhancement_amount: float = 75.0) -> Tensor: + r"""Apply contrast effect. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Comparable with compression, this effect modifies an audio signal to make it sound louder + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + enhancement_amount (float, optional): controls the amount of the enhancement + Allowed range of values for enhancement_amount : 0-100 + Note that enhancement_amount = 0 still gives a significant contrast enhancement + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + """ + + if not 0 <= enhancement_amount <= 100: + raise ValueError("Allowed range of values for enhancement_amount : 0-100") + + contrast = enhancement_amount / 750.0 + + temp1 = waveform * (math.pi / 2) + temp2 = contrast * torch.sin(temp1 * 4) + output_waveform = torch.sin(temp1 + temp2) + + return output_waveform + + +def dcshift(waveform: Tensor, shift: float, limiter_gain: Optional[float] = None) -> Tensor: + r"""Apply a DC shift to the audio. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + This can be useful to remove a DC offset + (caused perhaps by a hardware problem in the recording chain) from the audio + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + shift (float): indicates the amount to shift the audio + Allowed range of values for shift : -2.0 to +2.0 + limiter_gain (float of None, optional): It is used only on peaks to prevent clipping + It should have a value much less than 1 (e.g. 0.05 or 0.02) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + """ + output_waveform = waveform + limiter_threshold = 0.0 + + if limiter_gain is not None: + limiter_threshold = 1.0 - (abs(shift) - limiter_gain) + + # Note: + # the following index-based update breaks auto-grad support + if limiter_gain is not None and shift > 0: + mask = waveform > limiter_threshold + temp = (waveform[mask] - limiter_threshold) * limiter_gain / (1 - limiter_threshold) + output_waveform[mask] = (temp + limiter_threshold + shift).clamp(max=limiter_threshold) + output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) + elif limiter_gain is not None and shift < 0: + mask = waveform < -limiter_threshold + temp = (waveform[mask] + limiter_threshold) * limiter_gain / (1 - limiter_threshold) + output_waveform[mask] = (temp - limiter_threshold + shift).clamp(min=-limiter_threshold) + output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1) + else: + output_waveform = (waveform + shift).clamp(min=-1, max=1) + + return output_waveform + + +def deemph_biquad(waveform: Tensor, sample_rate: int) -> Tensor: + r"""Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, Allowed sample rate ``44100`` or ``48000`` + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + + if sample_rate == 44100: + central_freq = 5283 + width_slope = 0.4845 + gain = -9.477 + elif sample_rate == 48000: + central_freq = 5356 + width_slope = 0.479 + gain = -9.62 + else: + raise ValueError("Sample rate must be 44100 (audio-CD) or 48000 (DAT)") + + w0 = 2 * math.pi * central_freq / sample_rate + A = math.exp(gain / 40.0 * math.log(10)) + alpha = math.sin(w0) / 2 * math.sqrt((A + 1 / A) * (1 / width_slope - 1) + 2) + + temp1 = 2 * math.sqrt(A) * alpha + temp2 = (A - 1) * math.cos(w0) + temp3 = (A + 1) * math.cos(w0) + + b0 = A * ((A + 1) + temp2 + temp1) + b1 = -2 * A * ((A - 1) + temp3) + b2 = A * ((A + 1) + temp2 - temp1) + a0 = (A + 1) - temp2 + temp1 + a1 = 2 * ((A - 1) - temp3) + a2 = (A + 1) - temp2 - temp1 + + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def _add_noise_shaping(dithered_waveform: Tensor, waveform: Tensor) -> Tensor: + r"""Noise shaping is calculated by error: + error[n] = dithered[n] - original[n] + noise_shaped_waveform[n] = dithered[n] + error[n-1] + """ + wf_shape = waveform.size() + waveform = waveform.reshape(-1, wf_shape[-1]) + + dithered_shape = dithered_waveform.size() + dithered_waveform = dithered_waveform.reshape(-1, dithered_shape[-1]) + + error = dithered_waveform - waveform + + # add error[n-1] to dithered_waveform[n], so offset the error by 1 index + zeros = torch.zeros(1, dtype=error.dtype, device=error.device) + for index in range(error.size()[0]): + err = error[index] + error_offset = torch.cat((zeros, err)) + error[index] = error_offset[: waveform.size()[1]] + + noise_shaped = dithered_waveform + error + return noise_shaped.reshape(dithered_shape[:-1] + noise_shaped.shape[-1:]) + + +def _apply_probability_distribution(waveform: Tensor, density_function: str = "TPDF") -> Tensor: + r"""Apply a probability distribution function on a waveform. + + Triangular probability density function (TPDF) dither noise has a + triangular distribution; values in the center of the range have a higher + probability of occurring. + + Rectangular probability density function (RPDF) dither noise has a + uniform distribution; any value in the specified range has the same + probability of occurring. + + Gaussian probability density function (GPDF) has a normal distribution. + The relationship of probabilities of results follows a bell-shaped, + or Gaussian curve, typical of dither generated by analog sources. + Args: + waveform (Tensor): Tensor of audio of dimension (..., time) + density_function (str, optional): The density function of a + continuous random variable (Default: ``"TPDF"``) + Options: Triangular Probability Density Function - `TPDF` + Rectangular Probability Density Function - `RPDF` + Gaussian Probability Density Function - `GPDF` + Returns: + Tensor: waveform dithered with TPDF + """ + + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, shape[-1]) + + channel_size = waveform.size()[0] - 1 + time_size = waveform.size()[-1] - 1 + + random_channel = ( + int( + torch.randint( + channel_size, + [ + 1, + ], + ).item() + ) + if channel_size > 0 + else 0 + ) + random_time = ( + int( + torch.randint( + time_size, + [ + 1, + ], + ).item() + ) + if time_size > 0 + else 0 + ) + + number_of_bits = 16 + up_scaling = 2 ** (number_of_bits - 1) - 2 + signal_scaled = waveform * up_scaling + down_scaling = 2 ** (number_of_bits - 1) + + signal_scaled_dis = waveform + if density_function == "RPDF": + RPDF = waveform[random_channel][random_time] - 0.5 + + signal_scaled_dis = signal_scaled + RPDF + elif density_function == "GPDF": + # TODO Replace by distribution code once + # https://github.com/pytorch/pytorch/issues/29843 is resolved + # gaussian = torch.distributions.normal.Normal(torch.mean(waveform, -1), 1).sample() + + num_rand_variables = 6 + + gaussian = waveform[random_channel][random_time] + for ws in num_rand_variables * [time_size]: + rand_chan = int( + torch.randint( + channel_size, + [ + 1, + ], + ).item() + ) + gaussian += waveform[rand_chan][ + int( + torch.randint( + ws, + [ + 1, + ], + ).item() + ) + ] + + signal_scaled_dis = signal_scaled + gaussian + else: + # dtype needed for https://github.com/pytorch/pytorch/issues/32358 + TPDF = torch.bartlett_window(time_size + 1, dtype=signal_scaled.dtype, device=signal_scaled.device) + TPDF = TPDF.repeat((channel_size + 1), 1) + signal_scaled_dis = signal_scaled + TPDF + + quantised_signal_scaled = torch.round(signal_scaled_dis) + quantised_signal = quantised_signal_scaled / down_scaling + + # unpack batch + return quantised_signal.reshape(shape[:-1] + quantised_signal.shape[-1:]) + + +def dither(waveform: Tensor, density_function: str = "TPDF", noise_shaping: bool = False) -> Tensor: + r"""Apply dither + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Dither increases the perceived dynamic range of audio stored at a + particular bit-depth by eliminating nonlinear truncation distortion + (i.e. adding minimally perceived noise to mask distortion caused by quantization). + + Args: + waveform (Tensor): Tensor of audio of dimension (..., time) + density_function (str, optional): + The density function of a continuous random variable. One of + ``"TPDF"`` (Triangular Probability Density Function), + ``"RPDF"`` (Rectangular Probability Density Function) or + ``"GPDF"`` (Gaussian Probability Density Function) (Default: ``"TPDF"``). + noise_shaping (bool, optional): a filtering process that shapes the spectral + energy of quantisation error (Default: ``False``) + + Returns: + Tensor: waveform dithered + """ + dithered = _apply_probability_distribution(waveform, density_function=density_function) + + if noise_shaping: + return _add_noise_shaping(dithered, waveform) + else: + return dithered + + +def equalizer_biquad( + waveform: Tensor, + sample_rate: int, + center_freq: float, + gain: float, + Q: float = 0.707, +) -> Tensor: + r"""Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + center_freq (float): filter's central frequency + gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + """ + dtype = waveform.dtype + device = waveform.device + center_freq = torch.as_tensor(center_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + gain = torch.as_tensor(gain, dtype=dtype, device=device) + + w0 = 2 * math.pi * center_freq / sample_rate + A = torch.exp(gain / 40.0 * math.log(10)) + alpha = torch.sin(w0) / 2 / Q + + b0 = 1 + alpha * A + b1 = -2 * torch.cos(w0) + b2 = 1 - alpha * A + a0 = 1 + alpha / A + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha / A + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def filtfilt( + waveform: Tensor, + a_coeffs: Tensor, + b_coeffs: Tensor, + clamp: bool = True, +) -> Tensor: + r"""Apply an IIR filter forward and backward to a waveform. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Inspired by https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1. + a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either + 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. + Lower delay coefficients are first, e.g. ``[a0, a1, a2, ...]``. + Must be same size as b_coeffs (pad with 0's as necessary). + b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either + 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. + Lower delay coefficients are first, e.g. ``[b0, b1, b2, ...]``. + Must be same size as a_coeffs (pad with 0's as necessary). + clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``) + + Returns: + Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs`` + are 2D Tensors, or `(..., time)` otherwise. + """ + forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp=False, batching=True) + backward_filtered = lfilter( + forward_filtered.flip(-1), + a_coeffs, + b_coeffs, + clamp=clamp, + batching=True, + ).flip(-1) + return backward_filtered + + +def flanger( + waveform: Tensor, + sample_rate: int, + delay: float = 0.0, + depth: float = 2.0, + regen: float = 0.0, + width: float = 71.0, + speed: float = 0.5, + phase: float = 25.0, + modulation: str = "sinusoidal", + interpolation: str = "linear", +) -> Tensor: + r"""Apply a flanger effect to the audio. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., channel, time)` . + Max 4 channels allowed + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + delay (float, optional): desired delay in milliseconds(ms) + Allowed range of values are 0 to 30 + depth (float, optional): desired delay depth in milliseconds(ms) + Allowed range of values are 0 to 10 + regen (float, optional): desired regen(feedback gain) in dB + Allowed range of values are -95 to 95 + width (float, optional): desired width(delay gain) in dB + Allowed range of values are 0 to 100 + speed (float, optional): modulation speed in Hz + Allowed range of values are 0.1 to 10 + phase (float, optional): percentage phase-shift for multi-channel + Allowed range of values are 0 to 100 + modulation (str, optional): Use either "sinusoidal" or "triangular" modulation. (Default: ``sinusoidal``) + interpolation (str, optional): Use either "linear" or "quadratic" for delay-line interpolation. + (Default: ``linear``) + + Returns: + Tensor: Waveform of dimension of `(..., channel, time)` + + Reference: + - http://sox.sourceforge.net/sox.html + + - Scott Lehman, `Effects Explained`_, + + .. _Effects Explained: + https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html + """ + + if modulation not in ("sinusoidal", "triangular"): + raise ValueError('Only "sinusoidal" or "triangular" modulation allowed') + + if interpolation not in ("linear", "quadratic"): + raise ValueError('Only "linear" or "quadratic" interpolation allowed') + + actual_shape = waveform.shape + device, dtype = waveform.device, waveform.dtype + + if actual_shape[-2] > 4: + raise ValueError("Max 4 channels allowed") + + # convert to 3D (batch, channels, time) + waveform = waveform.view(-1, actual_shape[-2], actual_shape[-1]) + + # Scaling + feedback_gain = regen / 100 + delay_gain = width / 100 + channel_phase = phase / 100 + delay_min = delay / 1000 + delay_depth = depth / 1000 + + n_channels = waveform.shape[-2] + + if modulation == "sinusoidal": + wave_type = "SINE" + else: + wave_type = "TRIANGLE" + + # Balance output: + in_gain = 1.0 / (1 + delay_gain) + delay_gain = delay_gain / (1 + delay_gain) + + # Balance feedback loop: + delay_gain = delay_gain * (1 - abs(feedback_gain)) + + delay_buf_length = int((delay_min + delay_depth) * sample_rate + 0.5) + delay_buf_length = delay_buf_length + 2 + + delay_bufs = torch.zeros(waveform.shape[0], n_channels, delay_buf_length, dtype=dtype, device=device) + delay_last = torch.zeros(waveform.shape[0], n_channels, dtype=dtype, device=device) + + lfo_length = int(sample_rate / speed) + + table_min = math.floor(delay_min * sample_rate + 0.5) + table_max = delay_buf_length - 2.0 + + lfo = _generate_wave_table( + wave_type=wave_type, + data_type="FLOAT", + table_size=lfo_length, + min=float(table_min), + max=float(table_max), + phase=3 * math.pi / 2, + device=device, + ) + + output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device) + + delay_buf_pos = 0 + lfo_pos = 0 + channel_idxs = torch.arange(0, n_channels, device=device) + + for i in range(waveform.shape[-1]): + + delay_buf_pos = (delay_buf_pos + delay_buf_length - 1) % delay_buf_length + + cur_channel_phase = (channel_idxs * lfo_length * channel_phase + 0.5).to(torch.int64) + delay_tensor = lfo[(lfo_pos + cur_channel_phase) % lfo_length] + frac_delay = torch.frac(delay_tensor) + delay_tensor = torch.floor(delay_tensor) + + int_delay = delay_tensor.to(torch.int64) + + temp = waveform[:, :, i] + + delay_bufs[:, :, delay_buf_pos] = temp + delay_last * feedback_gain + + delayed_0 = delay_bufs[:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length] + + int_delay = int_delay + 1 + + delayed_1 = delay_bufs[:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length] + + int_delay = int_delay + 1 + + if interpolation == "linear": + delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay + else: + delayed_2 = delay_bufs[:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length] + + int_delay = int_delay + 1 + + delayed_2 = delayed_2 - delayed_0 + delayed_1 = delayed_1 - delayed_0 + a = delayed_2 * 0.5 - delayed_1 + b = delayed_1 * 2 - delayed_2 * 0.5 + + delayed = delayed_0 + (a * frac_delay + b) * frac_delay + + delay_last = delayed + output_waveform[:, :, i] = waveform[:, :, i] * in_gain + delayed * delay_gain + + lfo_pos = (lfo_pos + 1) % lfo_length + + return output_waveform.clamp(min=-1, max=1).view(actual_shape) + + +def gain(waveform: Tensor, gain_db: float = 1.0) -> Tensor: + r"""Apply amplification or attenuation to the whole waveform. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + gain_db (float, optional) Gain adjustment in decibels (dB) (Default: ``1.0``). + + Returns: + Tensor: the whole waveform amplified by gain_db. + """ + if gain_db == 0: + return waveform + + ratio = 10 ** (gain_db / 20) + + return waveform * ratio + + +def highpass_biquad(waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707) -> Tensor: + r"""Design biquad highpass filter and perform filtering. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + cutoff_freq (float or torch.Tensor): filter cutoff frequency + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform dimension of `(..., time)` + """ + dtype = waveform.dtype + device = waveform.device + cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * cutoff_freq / sample_rate + alpha = torch.sin(w0) / 2.0 / Q + + b0 = (1 + torch.cos(w0)) / 2 + b1 = -1 - torch.cos(w0) + b2 = b0 + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor): + n_order = a_coeffs_flipped.size(1) + a_coeffs_flipped = a_coeffs_flipped.unsqueeze(2) + for i_sample, o0 in enumerate(input_signal_windows.permute(2, 0, 1)): + windowed_output_signal = padded_output_waveform[:, :, i_sample : i_sample + n_order] + o0 -= (windowed_output_signal.transpose(0, 1) @ a_coeffs_flipped)[..., 0].t() + padded_output_waveform[:, :, i_sample + n_order - 1] = o0 + + +if _IS_TORCHAUDIO_EXT_AVAILABLE: + _lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop +else: + _lfilter_core_cpu_loop = _lfilter_core_generic_loop + + +def _lfilter_core( + waveform: Tensor, + a_coeffs: Tensor, + b_coeffs: Tensor, +) -> Tensor: + + if a_coeffs.size() != b_coeffs.size(): + raise ValueError( + "Expected coeffs to be the same size." + f"Found a_coeffs size: {a_coeffs.size()}, b_coeffs size: {b_coeffs.size()}" + ) + if waveform.ndim != 3: + raise ValueError(f"Expected waveform to be 3 dimensional. Found: {waveform.ndim}") + if not (waveform.device == a_coeffs.device == b_coeffs.device): + raise ValueError( + "Expected waveform and coeffs to be on the same device." + f"Found: waveform device:{waveform.device}, a_coeffs device: {a_coeffs.device}, " + f"b_coeffs device: {b_coeffs.device}" + ) + + n_batch, n_channel, n_sample = waveform.size() + n_order = a_coeffs.size(1) + if n_order <= 0: + raise ValueError(f"Expected n_order to be positive. Found: {n_order}") + + # Pad the input and create output + + padded_waveform = torch.nn.functional.pad(waveform, [n_order - 1, 0]) + padded_output_waveform = torch.zeros_like(padded_waveform) + + # Set up the coefficients matrix + # Flip coefficients' order + a_coeffs_flipped = a_coeffs.flip(1) + b_coeffs_flipped = b_coeffs.flip(1) + + # calculate windowed_input_signal in parallel using convolution + input_signal_windows = torch.nn.functional.conv1d(padded_waveform, b_coeffs_flipped.unsqueeze(1), groups=n_channel) + + input_signal_windows.div_(a_coeffs[:, :1]) + a_coeffs_flipped.div_(a_coeffs[:, :1]) + + if ( + input_signal_windows.device == torch.device("cpu") + and a_coeffs_flipped.device == torch.device("cpu") + and padded_output_waveform.device == torch.device("cpu") + ): + _lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + else: + _lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform) + + output = padded_output_waveform[:, :, n_order - 1 :] + return output + + +if _IS_TORCHAUDIO_EXT_AVAILABLE: + _lfilter = torch.ops.torchaudio._lfilter +else: + _lfilter = _lfilter_core + + +def lfilter(waveform: Tensor, a_coeffs: Tensor, b_coeffs: Tensor, clamp: bool = True, batching: bool = True) -> Tensor: + r"""Perform an IIR filter by evaluating difference equation, using differentiable implementation + developed separately by *Yu et al.* :cite:`ismir_YuF23` and *Forgione et al.* :cite:`forgione2021dynonet`. + The gradients of ``a_coeffs`` are computed based on a faster algorithm from :cite:`ycy2024diffapf`. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Note: + To avoid numerical problems, small filter order is preferred. + Using double precision could also minimize numerical precision errors. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1. + a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either + 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. + Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``. + Must be same size as b_coeffs (pad with 0's as necessary). + b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either + 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`. + Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``. + Must be same size as a_coeffs (pad with 0's as necessary). + clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``) + batching (bool, optional): Effective only when coefficients are 2D. If ``True``, then waveform should be at + least 2D, and the size of second axis from last should equals to ``num_filters``. + The output can be expressed as ``output[..., i, :] = lfilter(waveform[..., i, :], + a_coeffs[i], b_coeffs[i], clamp=clamp, batching=False)``. (Default: ``True``) + + Returns: + Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs`` + are 2D Tensors, or `(..., time)` otherwise. + """ + if a_coeffs.size() != b_coeffs.size(): + raise ValueError( + "Expected coeffs to be the same size." + f"Found: a_coeffs size: {a_coeffs.size()}, b_coeffs size: {b_coeffs.size()}" + ) + if a_coeffs.ndim > 2: + raise ValueError(f"Expected coeffs to have greater than 1 dimension. Found: {a_coeffs.ndim}") + + if a_coeffs.ndim > 1: + if batching: + if waveform.ndim <= 0: + raise ValueError("Expected waveform to have a positive number of dimensions." f"Found: {waveform.ndim}") + if waveform.shape[-2] != a_coeffs.shape[0]: + raise ValueError( + "Expected number of batches in waveform and coeffs to be the same." + f"Found: coeffs batches: {a_coeffs.shape[0]}, waveform batches: {waveform.shape[-2]}" + ) + else: + waveform = torch.stack([waveform] * a_coeffs.shape[0], -2) + else: + a_coeffs = a_coeffs.unsqueeze(0) + b_coeffs = b_coeffs.unsqueeze(0) + + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, a_coeffs.shape[0], shape[-1]) + output = _lfilter(waveform, a_coeffs, b_coeffs) + + if clamp: + output = torch.clamp(output, min=-1.0, max=1.0) + + # unpack batch + output = output.reshape(shape[:-1] + output.shape[-1:]) + + return output + + +def lowpass_biquad(waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707) -> Tensor: + r"""Design biquad lowpass filter and perform filtering. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (torch.Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + cutoff_freq (float or torch.Tensor): filter cutoff frequency + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + """ + dtype = waveform.dtype + device = waveform.device + cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + + w0 = 2 * math.pi * cutoff_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + + b0 = (1 - torch.cos(w0)) / 2 + b1 = 1 - torch.cos(w0) + b2 = b0 + a0 = 1 + alpha + a1 = -2 * torch.cos(w0) + a2 = 1 - alpha + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def _overdrive_core_loop_generic( + waveform: Tensor, temp: Tensor, last_in: Tensor, last_out: Tensor, output_waveform: Tensor +): + for i in range(waveform.shape[-1]): + last_out = temp[:, i] - last_in + 0.995 * last_out + last_in = temp[:, i] + output_waveform[:, i] = waveform[:, i] * 0.5 + last_out * 0.75 + + +if _IS_TORCHAUDIO_EXT_AVAILABLE: + _overdrive_core_loop_cpu = torch.ops.torchaudio._overdrive_core_loop +else: + _overdrive_core_loop_cpu = _overdrive_core_loop_generic + + +def overdrive(waveform: Tensor, gain: float = 20, colour: float = 20) -> Tensor: + r"""Apply a overdrive effect to the audio. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + This effect applies a non linear distortion to the audio signal. + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + gain (float, optional): desired gain at the boost (or attenuation) in dB + Allowed range of values are 0 to 100 + colour (float, optional): controls the amount of even harmonic content in the over-driven output + Allowed range of values are 0 to 100 + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + """ + actual_shape = waveform.shape + device, dtype = waveform.device, waveform.dtype + + # convert to 2D (..,time) + waveform = waveform.view(-1, actual_shape[-1]) + + gain = _dB2Linear(gain) + colour = colour / 200 + last_in = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device) + last_out = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device) + + temp = waveform * gain + colour + + mask1 = temp < -1 + temp[mask1] = torch.tensor(-2.0 / 3.0, dtype=dtype, device=device) + # Wrapping the constant with Tensor is required for Torchscript + + mask2 = temp > 1 + temp[mask2] = torch.tensor(2.0 / 3.0, dtype=dtype, device=device) + + mask3 = ~mask1 & ~mask2 + temp[mask3] = temp[mask3] - (temp[mask3] ** 3) * (1.0 / 3) + + output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device) + + # Uses CPU optimized loop function if available for CPU device + if device == torch.device("cpu"): + _overdrive_core_loop_cpu(waveform, temp, last_in, last_out, output_waveform) + else: + _overdrive_core_loop_generic(waveform, temp, last_in, last_out, output_waveform) + + return output_waveform.clamp(min=-1, max=1).view(actual_shape) + + +def phaser( + waveform: Tensor, + sample_rate: int, + gain_in: float = 0.4, + gain_out: float = 0.74, + delay_ms: float = 3.0, + decay: float = 0.4, + mod_speed: float = 0.5, + sinusoidal: bool = True, +) -> Tensor: + r"""Apply a phasing effect to the audio. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + gain_in (float, optional): desired input gain at the boost (or attenuation) in dB + Allowed range of values are 0 to 1 + gain_out (float, optional): desired output gain at the boost (or attenuation) in dB + Allowed range of values are 0 to 1e9 + delay_ms (float, optional): desired delay in milliseconds + Allowed range of values are 0 to 5.0 + decay (float, optional): desired decay relative to gain-in + Allowed range of values are 0 to 0.99 + mod_speed (float, optional): modulation speed in Hz + Allowed range of values are 0.1 to 2 + sinusoidal (bool, optional): If ``True``, uses sinusoidal modulation (preferable for multiple instruments) + If ``False``, uses triangular modulation (gives single instruments a sharper phasing effect) + (Default: ``True``) + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - Scott Lehman, `Effects Explained`_. + + .. _Effects Explained: + https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html + """ + actual_shape = waveform.shape + device, dtype = waveform.device, waveform.dtype + + # convert to 2D (channels,time) + waveform = waveform.view(-1, actual_shape[-1]) + + delay_buf_len = int((delay_ms * 0.001 * sample_rate) + 0.5) + delay_buf = torch.zeros(waveform.shape[0], delay_buf_len, dtype=dtype, device=device) + + mod_buf_len = int(sample_rate / mod_speed + 0.5) + + if sinusoidal: + wave_type = "SINE" + else: + wave_type = "TRIANGLE" + + mod_buf = _generate_wave_table( + wave_type=wave_type, + data_type="INT", + table_size=mod_buf_len, + min=1.0, + max=float(delay_buf_len), + phase=math.pi / 2, + device=device, + ) + + delay_pos = 0 + mod_pos = 0 + + output_waveform_pre_gain_list = [] + waveform = waveform * gain_in + delay_buf = delay_buf * decay + waveform_list = [waveform[:, i] for i in range(waveform.size(1))] + delay_buf_list = [delay_buf[:, i] for i in range(delay_buf.size(1))] + mod_buf_list = [mod_buf[i] for i in range(mod_buf.size(0))] + + for i in range(waveform.shape[-1]): + idx = int((delay_pos + mod_buf_list[mod_pos]) % delay_buf_len) + mod_pos = (mod_pos + 1) % mod_buf_len + delay_pos = (delay_pos + 1) % delay_buf_len + temp = (waveform_list[i]) + (delay_buf_list[idx]) + delay_buf_list[delay_pos] = temp * decay + output_waveform_pre_gain_list.append(temp) + + output_waveform = torch.stack(output_waveform_pre_gain_list, dim=1).to(dtype=dtype, device=device) + output_waveform.mul_(gain_out) + + return output_waveform.clamp(min=-1, max=1).view(actual_shape) + + +def riaa_biquad(waveform: Tensor, sample_rate: int) -> Tensor: + r"""Apply RIAA vinyl playback equalization. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz). + Allowed sample rates in Hz : ``44100``,``48000``,``88200``,``96000`` + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + + if sample_rate == 44100: + zeros = [-0.2014898, 0.9233820] + poles = [0.7083149, 0.9924091] + + elif sample_rate == 48000: + zeros = [-0.1766069, 0.9321590] + poles = [0.7396325, 0.9931330] + + elif sample_rate == 88200: + zeros = [-0.1168735, 0.9648312] + poles = [0.8590646, 0.9964002] + + elif sample_rate == 96000: + zeros = [-0.1141486, 0.9676817] + poles = [0.8699137, 0.9966946] + + else: + raise ValueError("Sample rate must be 44.1k, 48k, 88.2k, or 96k") + + # polynomial coefficients with roots zeros[0] and zeros[1] + b0 = 1.0 + b1 = -(zeros[0] + zeros[1]) + b2 = zeros[0] * zeros[1] + + # polynomial coefficients with roots poles[0] and poles[1] + a0 = 1.0 + a1 = -(poles[0] + poles[1]) + a2 = poles[0] * poles[1] + + # Normalize to 0dB at 1kHz + y = 2 * math.pi * 1000 / sample_rate + b_re = b0 + b1 * math.cos(-y) + b2 * math.cos(-2 * y) + a_re = a0 + a1 * math.cos(-y) + a2 * math.cos(-2 * y) + b_im = b1 * math.sin(-y) + b2 * math.sin(-2 * y) + a_im = a1 * math.sin(-y) + a2 * math.sin(-2 * y) + g = 1 / math.sqrt((b_re**2 + b_im**2) / (a_re**2 + a_im**2)) + + b0 *= g + b1 *= g + b2 *= g + + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def treble_biquad( + waveform: Tensor, + sample_rate: int, + gain: float, + central_freq: float = 3000, + Q: float = 0.707, +) -> Tensor: + r"""Design a treble tone-control effect. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): audio waveform of dimension of `(..., time)` + sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz) + gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB. + central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``3000``) + Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``). + + Returns: + Tensor: Waveform of dimension of `(..., time)` + + Reference: + - http://sox.sourceforge.net/sox.html + - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF + """ + dtype = waveform.dtype + device = waveform.device + central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device) + Q = torch.as_tensor(Q, dtype=dtype, device=device) + gain = torch.as_tensor(gain, dtype=dtype, device=device) + + w0 = 2 * math.pi * central_freq / sample_rate + alpha = torch.sin(w0) / 2 / Q + A = torch.exp(gain / 40 * math.log(10)) + + temp1 = 2 * torch.sqrt(A) * alpha + temp2 = (A - 1) * torch.cos(w0) + temp3 = (A + 1) * torch.cos(w0) + + b0 = A * ((A + 1) + temp2 + temp1) + b1 = -2 * A * ((A - 1) + temp3) + b2 = A * ((A + 1) + temp2 - temp1) + a0 = (A + 1) - temp2 + temp1 + a1 = 2 * ((A - 1) - temp3) + a2 = (A + 1) - temp2 - temp1 + + return biquad(waveform, b0, b1, b2, a0, a1, a2) + + +def _measure( + measure_len_ws: int, + samples: Tensor, + spectrum: Tensor, + noise_spectrum: Tensor, + spectrum_window: Tensor, + spectrum_start: int, + spectrum_end: int, + cepstrum_window: Tensor, + cepstrum_start: int, + cepstrum_end: int, + noise_reduction_amount: float, + measure_smooth_time_mult: float, + noise_up_time_mult: Tensor, + noise_down_time_mult: Tensor, + boot_count: int, +) -> float: + device = samples.device + + if spectrum.size(-1) != noise_spectrum.size(-1): + raise ValueError( + "Expected spectrum size to match noise spectrum size in final dimension." + f"Found: spectrum size: {spectrum.size()}, noise_spectrum size: {noise_spectrum.size()}" + ) + + dft_len_ws = spectrum.size()[-1] + + dftBuf = torch.zeros(dft_len_ws, device=device) + + dftBuf[:measure_len_ws] = samples * spectrum_window[:measure_len_ws] + + # lsx_safe_rdft((int)p->dft_len_ws, 1, c->dftBuf); + _dftBuf = torch.fft.rfft(dftBuf) + + mult: float = boot_count / (1.0 + boot_count) if boot_count >= 0 else measure_smooth_time_mult + + _d = _dftBuf[spectrum_start:spectrum_end].abs() + spectrum[spectrum_start:spectrum_end].mul_(mult).add_(_d * (1 - mult)) + _d = spectrum[spectrum_start:spectrum_end] ** 2 + + _zeros = torch.zeros(spectrum_end - spectrum_start, device=device) + _mult = ( + _zeros + if boot_count >= 0 + else torch.where( + _d > noise_spectrum[spectrum_start:spectrum_end], + noise_up_time_mult, # if + noise_down_time_mult, # else, + ) + ) + + noise_spectrum[spectrum_start:spectrum_end].mul_(_mult).add_(_d * (1 - _mult)) + _d = torch.sqrt( + torch.max( + _zeros, + _d - noise_reduction_amount * noise_spectrum[spectrum_start:spectrum_end], + ), + ) + + _cepstrum_Buf: Tensor = torch.zeros(dft_len_ws >> 1, device=device) + _cepstrum_Buf[spectrum_start:spectrum_end] = _d * cepstrum_window + _cepstrum_Buf[spectrum_end : dft_len_ws >> 1].zero_() + + # lsx_safe_rdft((int)p->dft_len_ws >> 1, 1, c->dftBuf); + _cepstrum_Buf = torch.fft.rfft(_cepstrum_Buf) + + result: float = float(torch.sum(_cepstrum_Buf[cepstrum_start:cepstrum_end].abs().pow(2))) + result = math.log(result / (cepstrum_end - cepstrum_start)) if result > 0 else -math.inf + return max(0, 21 + result) + + +def vad( + waveform: Tensor, + sample_rate: int, + trigger_level: float = 7.0, + trigger_time: float = 0.25, + search_time: float = 1.0, + allowed_gap: float = 0.25, + pre_trigger_time: float = 0.0, + # Fine-tuning parameters + boot_time: float = 0.35, + noise_up_time: float = 0.1, + noise_down_time: float = 0.01, + noise_reduction_amount: float = 1.35, + measure_freq: float = 20.0, + measure_duration: Optional[float] = None, + measure_smooth_time: float = 0.4, + hp_filter_freq: float = 50.0, + lp_filter_freq: float = 6000.0, + hp_lifter_freq: float = 150.0, + lp_lifter_freq: float = 2000.0, +) -> Tensor: + r"""Voice Activity Detector. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Attempts to trim silence and quiet background sounds from the ends of recordings of speech. + The algorithm currently uses a simple cepstral power measurement to detect voice, + so may be fooled by other things, especially music. + + The effect can trim only from the front of the audio, + so in order to trim from the back, the reverse effect must also be used. + + Args: + waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)` + Tensor of shape `(channels, time)` is treated as a multi-channel recording + of the same event and the resulting output will be trimmed to the earliest + voice activity in any channel. + sample_rate (int): Sample rate of audio signal. + trigger_level (float, optional): The measurement level used to trigger activity detection. + This may need to be cahnged depending on the noise level, signal level, + and other characteristics of the input audio. (Default: 7.0) + trigger_time (float, optional): The time constant (in seconds) + used to help ignore short bursts of sound. (Default: 0.25) + search_time (float, optional): The amount of audio (in seconds) + to search for quieter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 1.0) + allowed_gap (float, optional): The allowed gap (in seconds) between + quieter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 0.25) + pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve + before the trigger point and any found quieter/shorter bursts. (Default: 0.0) + boot_time (float, optional) The algorithm (internally) uses adaptive noise + estimation/reduction in order to detect the start of the wanted audio. + This option sets the time for the initial noise estimate. (Default: 0.35) + noise_up_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is increasing. (Default: 0.1) + noise_down_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is decreasing. (Default: 0.01) + noise_reduction_amount (float, optional) Amount of noise reduction to use in + the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35) + measure_freq (float, optional) Frequency of the algorithm's + processing/measurements. (Default: 20.0) + measure_duration: (float, optional) Measurement duration. + (Default: Twice the measurement period; i.e. with overlap.) + measure_smooth_time (float, optional) Time constant used to smooth + spectral measurements. (Default: 0.4) + hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied + at the input to the detector algorithm. (Default: 50.0) + lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied + at the input to the detector algorithm. (Default: 6000.0) + hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used + in the detector algorithm. (Default: 150.0) + lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used + in the detector algorithm. (Default: 2000.0) + + Returns: + Tensor: Tensor of audio of dimension `(..., time)`. + + Reference: + - http://sox.sourceforge.net/sox.html + """ + device = waveform.device + + if waveform.ndim > 2: + warnings.warn( + "Expected input tensor dimension of 1 for single channel" + f" or 2 for multi-channel. Got {waveform.ndim} instead. " + "Batch semantics is not supported. " + "Please refer to https://github.com/pytorch/audio/issues/1348" + " and https://github.com/pytorch/audio/issues/1468." + ) + + measure_duration: float = 2.0 / measure_freq if measure_duration is None else measure_duration + + measure_len_ws = int(sample_rate * measure_duration + 0.5) + measure_len_ns = measure_len_ws + # for (dft_len_ws = 16; dft_len_ws < measure_len_ws; dft_len_ws <<= 1); + dft_len_ws = 16 + while dft_len_ws < measure_len_ws: + dft_len_ws *= 2 + + measure_period_ns = int(sample_rate / measure_freq + 0.5) + measures_len = math.ceil(search_time * measure_freq) + search_pre_trigger_len_ns = measures_len * measure_period_ns + gap_len = int(allowed_gap * measure_freq + 0.5) + + fixed_pre_trigger_len_ns = int(pre_trigger_time * sample_rate + 0.5) + samplesLen_ns = fixed_pre_trigger_len_ns + search_pre_trigger_len_ns + measure_len_ns + + spectrum_window = torch.zeros(measure_len_ws, device=device) + for i in range(measure_len_ws): + # sox.h:741 define SOX_SAMPLE_MIN (sox_sample_t)SOX_INT_MIN(32) + spectrum_window[i] = 2.0 / math.sqrt(float(measure_len_ws)) + # lsx_apply_hann(spectrum_window, (int)measure_len_ws); + spectrum_window *= torch.hann_window(measure_len_ws, device=device, dtype=torch.float) + + spectrum_start: int = int(hp_filter_freq / sample_rate * dft_len_ws + 0.5) + spectrum_start: int = max(spectrum_start, 1) + spectrum_end: int = int(lp_filter_freq / sample_rate * dft_len_ws + 0.5) + spectrum_end: int = min(spectrum_end, dft_len_ws // 2) + + cepstrum_window = torch.zeros(spectrum_end - spectrum_start, device=device) + for i in range(spectrum_end - spectrum_start): + cepstrum_window[i] = 2.0 / math.sqrt(float(spectrum_end) - spectrum_start) + # lsx_apply_hann(cepstrum_window,(int)(spectrum_end - spectrum_start)); + cepstrum_window *= torch.hann_window(spectrum_end - spectrum_start, device=device, dtype=torch.float) + + cepstrum_start = math.ceil(sample_rate * 0.5 / lp_lifter_freq) + cepstrum_end = math.floor(sample_rate * 0.5 / hp_lifter_freq) + cepstrum_end = min(cepstrum_end, dft_len_ws // 4) + + if cepstrum_end <= cepstrum_start: + raise ValueError( + "Expected cepstrum_start to be smaller than cepstrum_end." + f"Found: cepstrum_start: {cepstrum_start}, cepstrum_end: {cepstrum_end}." + ) + + noise_up_time_mult = torch.tensor(math.exp(-1.0 / (noise_up_time * measure_freq)), device=device) + noise_down_time_mult = torch.tensor(math.exp(-1.0 / (noise_down_time * measure_freq)), device=device) + measure_smooth_time_mult = math.exp(-1.0 / (measure_smooth_time * measure_freq)) + trigger_meas_time_mult = math.exp(-1.0 / (trigger_time * measure_freq)) + + boot_count_max = int(boot_time * measure_freq - 0.5) + boot_count = measures_index = flushedLen_ns = 0 + + # pack batch + shape = waveform.size() + waveform = waveform.view(-1, shape[-1]) + + n_channels, ilen = waveform.size() + + mean_meas = torch.zeros(n_channels, device=device) + spectrum = torch.zeros(n_channels, dft_len_ws, device=device) + noise_spectrum = torch.zeros(n_channels, dft_len_ws, device=device) + measures = torch.zeros(n_channels, measures_len, device=device) + + has_triggered: bool = False + num_measures_to_flush: int = 0 + + pos = 0 + for pos in range(measure_len_ns, ilen, measure_period_ns): + for i in range(n_channels): + meas: float = _measure( + measure_len_ws=measure_len_ws, + samples=waveform[i, pos - measure_len_ws : pos], + spectrum=spectrum[i], + noise_spectrum=noise_spectrum[i], + spectrum_window=spectrum_window, + spectrum_start=spectrum_start, + spectrum_end=spectrum_end, + cepstrum_window=cepstrum_window, + cepstrum_start=cepstrum_start, + cepstrum_end=cepstrum_end, + noise_reduction_amount=noise_reduction_amount, + measure_smooth_time_mult=measure_smooth_time_mult, + noise_up_time_mult=noise_up_time_mult, + noise_down_time_mult=noise_down_time_mult, + boot_count=boot_count, + ) + measures[i, measures_index] = meas + mean_meas[i] = mean_meas[i] * trigger_meas_time_mult + meas * (1.0 - trigger_meas_time_mult) + + has_triggered = has_triggered or (mean_meas[i] >= trigger_level) + if has_triggered: + n: int = measures_len + k: int = measures_index + jTrigger: int = n + jZero: int = n + j: int = 0 + + for j in range(n): + if (measures[i, k] >= trigger_level) and (j <= jTrigger + gap_len): + jZero = jTrigger = j + elif (measures[i, k] == 0) and (jTrigger >= jZero): + jZero = j + k = (k + n - 1) % n + j = min(j, jZero) + # num_measures_to_flush = range_limit(j, num_measures_to_flush, n); + num_measures_to_flush = min(max(num_measures_to_flush, j), n) + # end if has_triggered + # end for channel + measures_index += 1 + measures_index = measures_index % measures_len + if boot_count >= 0: + boot_count = -1 if boot_count == boot_count_max else boot_count + 1 + + if has_triggered: + flushedLen_ns = (measures_len - num_measures_to_flush) * measure_period_ns + break + # end for window + if not has_triggered: + return waveform[..., :0].view(shape[:-1] + torch.Size([0])) + + res = waveform[:, max(pos - samplesLen_ns + flushedLen_ns, 0) :] + # unpack batch + return res.view(shape[:-1] + res.shape[-1:]) diff --git a/venv/lib/python3.10/site-packages/torchaudio/functional/functional.py b/venv/lib/python3.10/site-packages/torchaudio/functional/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..af34e707e552d6eae403278743b406988e16e9db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/functional/functional.py @@ -0,0 +1,2535 @@ +# -*- coding: utf-8 -*- + +import math +import tempfile +import warnings +from collections.abc import Sequence +from typing import List, Optional, Tuple, Union + +import torch +import torchaudio +from torch import Tensor +from torchaudio._internal.module_utils import deprecated + +from .filtering import highpass_biquad, treble_biquad + +__all__ = [ + "spectrogram", + "inverse_spectrogram", + "griffinlim", + "amplitude_to_DB", + "DB_to_amplitude", + "compute_deltas", + "melscale_fbanks", + "linear_fbanks", + "create_dct", + "compute_deltas", + "detect_pitch_frequency", + "DB_to_amplitude", + "mu_law_encoding", + "mu_law_decoding", + "phase_vocoder", + "mask_along_axis", + "mask_along_axis_iid", + "sliding_window_cmn", + "spectral_centroid", + "apply_codec", + "resample", + "edit_distance", + "loudness", + "pitch_shift", + "rnnt_loss", + "psd", + "mvdr_weights_souden", + "mvdr_weights_rtf", + "rtf_evd", + "rtf_power", + "apply_beamforming", + "fftconvolve", + "convolve", + "add_noise", + "speed", + "preemphasis", + "deemphasis", +] + + +def spectrogram( + waveform: Tensor, + pad: int, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, + power: Optional[float], + normalized: Union[bool, str], + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + return_complex: Optional[bool] = None, +) -> Tensor: + r"""Create a spectrogram or a batch of spectrograms from a raw audio signal. + The spectrogram can be either magnitude-only or complex. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (Tensor): Tensor of audio of dimension `(..., time)` + pad (int): Two sided padding of signal + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT + hop_length (int): Length of hop between STFT windows + win_length (int): Window size + power (float or None): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for magnitude, 2 for power, etc. + If None, then the complex spectrum is returned instead. + normalized (bool or str): Whether to normalize by magnitude after stft. If input is str, choices are + ``"window"`` and ``"frame_length"``, if specific normalization type is desirable. ``True`` maps to + ``"window"``. When normalized on ``"window"``, waveform is normalized upon the window's L2 energy. If + normalized on ``"frame_length"``, waveform is normalized by dividing by + :math:`(\text{frame\_length})^{0.5}`. + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + Default: ``True`` + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. Default: ``"reflect"`` + onesided (bool, optional): controls whether to return half of results to + avoid redundancy. Default: ``True`` + return_complex (bool, optional): + Deprecated and not used. + + Returns: + Tensor: Dimension `(..., freq, time)`, freq is + ``n_fft // 2 + 1`` and ``n_fft`` is the number of + Fourier bins, and time is the number of window hops (n_frame). + """ + if return_complex is not None: + warnings.warn( + "`return_complex` argument is now deprecated and is not effective." + "`torchaudio.functional.spectrogram(power=None)` always returns a tensor with " + "complex dtype. Please remove the argument in the function call." + ) + + if pad > 0: + # TODO add "with torch.no_grad():" back when JIT supports it + waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant") + + frame_length_norm, window_norm = _get_spec_norms(normalized) + + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, shape[-1]) + + # default values are consistent with librosa.core.spectrum._spectrogram + spec_f = torch.stft( + input=waveform, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + center=center, + pad_mode=pad_mode, + normalized=frame_length_norm, + onesided=onesided, + return_complex=True, + ) + + # unpack batch + spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) + + if window_norm: + spec_f /= window.pow(2.0).sum().sqrt() + if power is not None: + if power == 1.0: + return spec_f.abs() + return spec_f.abs().pow(power) + return spec_f + + +def inverse_spectrogram( + spectrogram: Tensor, + length: Optional[int], + pad: int, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, + normalized: Union[bool, str], + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, +) -> Tensor: + r"""Create an inverse spectrogram or a batch of inverse spectrograms from the provided + complex-valued spectrogram. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time). + length (int or None): The output length of the waveform. + pad (int): Two sided padding of signal. It is only effective when ``length`` is provided. + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT + hop_length (int): Length of hop between STFT windows + win_length (int): Window size + normalized (bool or str): Whether the stft output was normalized by magnitude. If input is str, choices are + ``"window"`` and ``"frame_length"``, dependent on normalization mode. ``True`` maps to + ``"window"``. + center (bool, optional): whether the waveform was padded on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + Default: ``True`` + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. This parameter is provided for compatibility with the + spectrogram function and is not used. Default: ``"reflect"`` + onesided (bool, optional): controls whether spectrogram was done in onesided mode. + Default: ``True`` + + Returns: + Tensor: Dimension `(..., time)`. Least squares estimation of the original signal. + """ + + frame_length_norm, window_norm = _get_spec_norms(normalized) + + if not spectrogram.is_complex(): + raise ValueError("Expected `spectrogram` to be complex dtype.") + + if window_norm: + spectrogram = spectrogram * window.pow(2.0).sum().sqrt() + + # pack batch + shape = spectrogram.size() + spectrogram = spectrogram.reshape(-1, shape[-2], shape[-1]) + + # default values are consistent with librosa.core.spectrum._spectrogram + waveform = torch.istft( + input=spectrogram, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + center=center, + normalized=frame_length_norm, + onesided=onesided, + length=length + 2 * pad if length is not None else None, + return_complex=False, + ) + + if length is not None and pad > 0: + # remove padding from front and back + waveform = waveform[:, pad:-pad] + + # unpack batch + waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) + + return waveform + + +def _get_spec_norms(normalized: Union[str, bool]): + frame_length_norm, window_norm = False, False + if torch.jit.isinstance(normalized, str): + if normalized not in ["frame_length", "window"]: + raise ValueError("Invalid normalized parameter: {}".format(normalized)) + if normalized == "frame_length": + frame_length_norm = True + elif normalized == "window": + window_norm = True + elif torch.jit.isinstance(normalized, bool): + if normalized: + window_norm = True + else: + raise TypeError("Input type not supported") + return frame_length_norm, window_norm + + +def _get_complex_dtype(real_dtype: torch.dtype): + if real_dtype == torch.double: + return torch.cdouble + if real_dtype == torch.float: + return torch.cfloat + if real_dtype == torch.half: + return torch.complex32 + raise ValueError(f"Unexpected dtype {real_dtype}") + + +def griffinlim( + specgram: Tensor, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, + power: float, + n_iter: int, + momentum: float, + length: Optional[int], + rand_init: bool, +) -> Tensor: + r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Implementation ported from + *librosa* :cite:`brian_mcfee-proc-scipy-2015`, *A fast Griffin-Lim algorithm* :cite:`6701851` + and *Signal estimation from modified short-time Fourier transform* :cite:`1172092`. + + Args: + specgram (Tensor): A magnitude-only STFT spectrogram of dimension `(..., freq, frames)` + where freq is ``n_fft // 2 + 1``. + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins + hop_length (int): Length of hop between STFT windows. ( + Default: ``win_length // 2``) + win_length (int): Window size. (Default: ``n_fft``) + power (float): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for magnitude, 2 for power, etc. + n_iter (int): Number of iteration for phase recovery process. + momentum (float): The momentum parameter for fast Griffin-Lim. + Setting this to 0 recovers the original Griffin-Lim method. + Values near 1 can lead to faster convergence, but above 1 may not converge. + length (int or None): Array length of the expected output. + rand_init (bool): Initializes phase randomly if True, to zero otherwise. + + Returns: + Tensor: waveform of `(..., time)`, where time equals the ``length`` parameter if given. + """ + if not 0 <= momentum < 1: + raise ValueError("momentum must be in range [0, 1). Found: {}".format(momentum)) + + momentum = momentum / (1 + momentum) + + # pack batch + shape = specgram.size() + specgram = specgram.reshape([-1] + list(shape[-2:])) + + specgram = specgram.pow(1 / power) + + # initialize the phase + if rand_init: + angles = torch.rand(specgram.size(), dtype=_get_complex_dtype(specgram.dtype), device=specgram.device) + else: + angles = torch.full(specgram.size(), 1, dtype=_get_complex_dtype(specgram.dtype), device=specgram.device) + + # And initialize the previous iterate to 0 + tprev = torch.tensor(0.0, dtype=specgram.dtype, device=specgram.device) + for _ in range(n_iter): + # Invert with our current estimate of the phases + inverse = torch.istft( + specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length + ) + + # Rebuild the spectrogram + rebuilt = torch.stft( + input=inverse, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + center=True, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + + # Update our phase estimates + angles = rebuilt + if momentum: + angles = angles - tprev.mul_(momentum) + angles = angles.div(angles.abs().add(1e-16)) + + # Store the previous iterate + tprev = rebuilt + + # Return the final phase estimates + waveform = torch.istft( + specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length + ) + + # unpack batch + waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) + + return waveform + + +def amplitude_to_DB( + x: Tensor, multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float] = None +) -> Tensor: + r"""Turn a spectrogram from the power/amplitude scale to the decibel scale. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + The output of each tensor in a batch depends on the maximum value of that tensor, + and so may return different values for an audio clip split into snippets vs. a full clip. + + Args: + + x (Tensor): Input spectrogram(s) before being converted to decibel scale. + The expected shapes are ``(freq, time)``, ``(channel, freq, time)`` or + ``(..., batch, channel, freq, time)``. + + .. note:: + + When ``top_db`` is specified, cut-off values are computed for each audio + in the batch. Therefore if the input shape is 4D (or larger), different + cut-off values are used for audio data in the batch. + If the input shape is 2D or 3D, a single cutoff value is used. + + multiplier (float): Use 10. for power and 20. for amplitude + amin (float): Number to clamp ``x`` + db_multiplier (float): Log10(max(reference value and amin)) + top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number + is 80. (Default: ``None``) + + Returns: + Tensor: Output tensor in decibel scale + """ + x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) + x_db -= multiplier * db_multiplier + + if top_db is not None: + # Expand batch + shape = x_db.size() + packed_channels = shape[-3] if x_db.dim() > 2 else 1 + x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) + + x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1)) + + # Repack batch + x_db = x_db.reshape(shape) + + return x_db + + +def DB_to_amplitude(x: Tensor, ref: float, power: float) -> Tensor: + r"""Turn a tensor from the decibel scale to the power/amplitude scale. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + x (Tensor): Input tensor before being converted to power/amplitude scale. + ref (float): Reference which the output will be scaled by. + power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude. + + Returns: + Tensor: Output tensor in power/amplitude scale. + """ + return ref * torch.pow(torch.pow(10.0, 0.1 * x), power) + + +def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float: + r"""Convert Hz to Mels. + + Args: + freqs (float): Frequencies in Hz + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Returns: + mels (float): Frequency in Mels + """ + + if mel_scale not in ["slaney", "htk"]: + raise ValueError('mel_scale should be one of "htk" or "slaney".') + + if mel_scale == "htk": + return 2595.0 * math.log10(1.0 + (freq / 700.0)) + + # Fill in the linear part + f_min = 0.0 + f_sp = 200.0 / 3 + + mels = (freq - f_min) / f_sp + + # Fill in the log-scale part + min_log_hz = 1000.0 + min_log_mel = (min_log_hz - f_min) / f_sp + logstep = math.log(6.4) / 27.0 + + if freq >= min_log_hz: + mels = min_log_mel + math.log(freq / min_log_hz) / logstep + + return mels + + +def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor: + """Convert mel bin numbers to frequencies. + + Args: + mels (Tensor): Mel frequencies + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Returns: + freqs (Tensor): Mels converted in Hz + """ + + if mel_scale not in ["slaney", "htk"]: + raise ValueError('mel_scale should be one of "htk" or "slaney".') + + if mel_scale == "htk": + return 700.0 * (10.0 ** (mels / 2595.0) - 1.0) + + # Fill in the linear scale + f_min = 0.0 + f_sp = 200.0 / 3 + freqs = f_min + f_sp * mels + + # And now the nonlinear scale + min_log_hz = 1000.0 + min_log_mel = (min_log_hz - f_min) / f_sp + logstep = math.log(6.4) / 27.0 + + log_t = mels >= min_log_mel + freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) + + return freqs + + +def _create_triangular_filterbank( + all_freqs: Tensor, + f_pts: Tensor, +) -> Tensor: + """Create a triangular filter bank. + + Args: + all_freqs (Tensor): STFT freq points of size (`n_freqs`). + f_pts (Tensor): Filter mid points of size (`n_filter`). + + Returns: + fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`). + """ + # Adopted from Librosa + # calculate the difference between each filter mid point and each stft freq point in hertz + f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1) + slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2) + # create overlapping triangles + zero = torch.zeros(1) + down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter) + up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter) + fb = torch.max(zero, torch.min(down_slopes, up_slopes)) + + return fb + + +def melscale_fbanks( + n_freqs: int, + f_min: float, + f_max: float, + n_mels: int, + sample_rate: int, + norm: Optional[str] = None, + mel_scale: str = "htk", +) -> Tensor: + r"""Create a frequency bin conversion matrix. + + .. devices:: CPU + + .. properties:: TorchScript + + Note: + For the sake of the numerical compatibility with librosa, not all the coefficients + in the resulting filter bank has magnitude of 1. + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/mel_fbanks.png + :alt: Visualization of generated filter bank + + Args: + n_freqs (int): Number of frequencies to highlight/apply + f_min (float): Minimum frequency (Hz) + f_max (float): Maximum frequency (Hz) + n_mels (int): Number of mel filterbanks + sample_rate (int): Sample rate of the audio waveform + norm (str or None, optional): If "slaney", divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Returns: + Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``) + meaning number of frequencies to highlight/apply to x the number of filterbanks. + Each column is a filterbank so that assuming there is a matrix A of + size (..., ``n_freqs``), the applied result would be + ``A @ melscale_fbanks(A.size(-1), ...)``. + + """ + + if norm is not None and norm != "slaney": + raise ValueError('norm must be one of None or "slaney"') + + # freq bins + all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) + + # calculate mel freq bins + m_min = _hz_to_mel(f_min, mel_scale=mel_scale) + m_max = _hz_to_mel(f_max, mel_scale=mel_scale) + + m_pts = torch.linspace(m_min, m_max, n_mels + 2) + f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) + + # create filterbank + fb = _create_triangular_filterbank(all_freqs, f_pts) + + if norm is not None and norm == "slaney": + # Slaney-style mel is scaled to be approx constant energy per channel + enorm = 2.0 / (f_pts[2 : n_mels + 2] - f_pts[:n_mels]) + fb *= enorm.unsqueeze(0) + + if (fb.max(dim=0).values == 0.0).any(): + warnings.warn( + "At least one mel filterbank has all zero values. " + f"The value for `n_mels` ({n_mels}) may be set too high. " + f"Or, the value for `n_freqs` ({n_freqs}) may be set too low." + ) + + return fb + + +def linear_fbanks( + n_freqs: int, + f_min: float, + f_max: float, + n_filter: int, + sample_rate: int, +) -> Tensor: + r"""Creates a linear triangular filterbank. + + .. devices:: CPU + + .. properties:: TorchScript + + Note: + For the sake of the numerical compatibility with librosa, not all the coefficients + in the resulting filter bank has magnitude of 1. + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/lin_fbanks.png + :alt: Visualization of generated filter bank + + Args: + n_freqs (int): Number of frequencies to highlight/apply + f_min (float): Minimum frequency (Hz) + f_max (float): Maximum frequency (Hz) + n_filter (int): Number of (linear) triangular filter + sample_rate (int): Sample rate of the audio waveform + + Returns: + Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``) + meaning number of frequencies to highlight/apply to x the number of filterbanks. + Each column is a filterbank so that assuming there is a matrix A of + size (..., ``n_freqs``), the applied result would be + ``A * linear_fbanks(A.size(-1), ...)``. + """ + # freq bins + all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) + + # filter mid-points + f_pts = torch.linspace(f_min, f_max, n_filter + 2) + + # create filterbank + fb = _create_triangular_filterbank(all_freqs, f_pts) + + return fb + + +def create_dct(n_mfcc: int, n_mels: int, norm: Optional[str]) -> Tensor: + r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), + normalized depending on norm. + + .. devices:: CPU + + .. properties:: TorchScript + + Args: + n_mfcc (int): Number of mfc coefficients to retain + n_mels (int): Number of mel filterbanks + norm (str or None): Norm to use (either "ortho" or None) + + Returns: + Tensor: The transformation matrix, to be right-multiplied to + row-wise data of size (``n_mels``, ``n_mfcc``). + """ + + if norm is not None and norm != "ortho": + raise ValueError('norm must be either "ortho" or None') + + # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II + n = torch.arange(float(n_mels)) + k = torch.arange(float(n_mfcc)).unsqueeze(1) + dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels) + + if norm is None: + dct *= 2.0 + else: + dct[0] *= 1.0 / math.sqrt(2.0) + dct *= math.sqrt(2.0 / float(n_mels)) + return dct.t() + + +def mu_law_encoding(x: Tensor, quantization_channels: int) -> Tensor: + r"""Encode signal based on mu-law companding. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + For more info see the + `Wikipedia Entry `_ + + This algorithm expects the signal has been scaled to between -1 and 1 and + returns a signal encoded with values from 0 to quantization_channels - 1. + + Args: + x (Tensor): Input tensor + quantization_channels (int): Number of channels + + Returns: + Tensor: Input after mu-law encoding + """ + mu = quantization_channels - 1.0 + if not x.is_floating_point(): + warnings.warn( + "The input Tensor must be of floating type. \ + This will be an error in the v0.12 release." + ) + x = x.to(torch.float) + mu = torch.tensor(mu, dtype=x.dtype) + x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) + x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64) + return x_mu + + +def mu_law_decoding(x_mu: Tensor, quantization_channels: int) -> Tensor: + r"""Decode mu-law encoded signal. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + For more info see the + `Wikipedia Entry `_ + + This expects an input with values between 0 and quantization_channels - 1 + and returns a signal scaled between -1 and 1. + + Args: + x_mu (Tensor): Input tensor + quantization_channels (int): Number of channels + + Returns: + Tensor: Input after mu-law decoding + """ + mu = quantization_channels - 1.0 + if not x_mu.is_floating_point(): + x_mu = x_mu.to(torch.float) + mu = torch.tensor(mu, dtype=x_mu.dtype) + x = ((x_mu) / mu) * 2 - 1.0 + x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu + return x + + +def phase_vocoder(complex_specgrams: Tensor, rate: float, phase_advance: Tensor) -> Tensor: + r"""Given a STFT tensor, speed up in time without modifying pitch by a factor of ``rate``. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + complex_specgrams (Tensor): + A tensor of dimension `(..., freq, num_frame)` with complex dtype. + rate (float): Speed-up factor + phase_advance (Tensor): Expected phase advance in each bin. Dimension of `(freq, 1)` + + Returns: + Tensor: + Stretched spectrogram. The resulting tensor is of the same dtype as the input + spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``. + + Example + >>> freq, hop_length = 1025, 512 + >>> # (channel, freq, time) + >>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat) + >>> rate = 1.3 # Speed up by 30% + >>> phase_advance = torch.linspace( + >>> 0, math.pi * hop_length, freq)[..., None] + >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) + >>> x.shape # with 231 == ceil(300 / 1.3) + torch.Size([2, 1025, 231]) + """ + if rate == 1.0: + return complex_specgrams + + # pack batch + shape = complex_specgrams.size() + complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:])) + + # Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32 + # Note torch.real is a view so it does not incur any memory copy. + real_dtype = torch.real(complex_specgrams).dtype + time_steps = torch.arange(0, complex_specgrams.size(-1), rate, device=complex_specgrams.device, dtype=real_dtype) + + alphas = time_steps % 1.0 + phase_0 = complex_specgrams[..., :1].angle() + + # Time Padding + complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2]) + + # (new_bins, freq, 2) + complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long()) + complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long()) + + angle_0 = complex_specgrams_0.angle() + angle_1 = complex_specgrams_1.angle() + + norm_0 = complex_specgrams_0.abs() + norm_1 = complex_specgrams_1.abs() + + phase = angle_1 - angle_0 - phase_advance + phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi)) + + # Compute Phase Accum + phase = phase + phase_advance + phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) + phase_acc = torch.cumsum(phase, -1) + + mag = alphas * norm_1 + (1 - alphas) * norm_0 + + complex_specgrams_stretch = torch.polar(mag, phase_acc) + + # unpack batch + complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:]) + return complex_specgrams_stretch + + +def _get_mask_param(mask_param: int, p: float, axis_length: int) -> int: + if p == 1.0: + return mask_param + else: + return min(mask_param, int(axis_length * p)) + + +def mask_along_axis_iid( + specgrams: Tensor, + mask_param: int, + mask_value: float, + axis: int, + p: float = 1.0, +) -> Tensor: + r"""Apply a mask along ``axis``. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Mask will be applied from indices ``[v_0, v_0 + v)``, + where ``v`` is sampled from ``uniform(0, max_v)`` and + ``v_0`` from ``uniform(0, specgrams.size(axis) - v)``, + with ``max_v = mask_param`` when ``p = 1.0`` and + ``max_v = min(mask_param, floor(specgrams.size(axis) * p))`` otherwise. + + Args: + specgrams (Tensor): Real spectrograms `(..., freq, time)`, with at least 3 dimensions. + mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] + mask_value (float): Value to assign to the masked columns + axis (int): Axis to apply masking on, which should be the one of the last two dimensions. + p (float, optional): maximum proportion of columns that can be masked. (Default: 1.0) + + Returns: + Tensor: Masked spectrograms with the same dimensions as input specgrams Tensor` + """ + + dim = specgrams.dim() + + if dim < 3: + raise ValueError(f"Spectrogram must have at least three dimensions ({dim} given).") + + if axis not in [dim - 2, dim - 1]: + raise ValueError( + f"Only Frequency and Time masking are supported (axis {dim-2} and axis {dim-1} supported; {axis} given)." + ) + + if not 0.0 <= p <= 1.0: + raise ValueError(f"The value of p must be between 0.0 and 1.0 ({p} given).") + + mask_param = _get_mask_param(mask_param, p, specgrams.shape[axis]) + if mask_param < 1: + return specgrams + + device = specgrams.device + dtype = specgrams.dtype + + value = torch.rand(specgrams.shape[: (dim - 2)], device=device, dtype=dtype) * mask_param + min_value = torch.rand(specgrams.shape[: (dim - 2)], device=device, dtype=dtype) * (specgrams.size(axis) - value) + + # Create broadcastable mask + mask_start = min_value.long()[..., None, None] + mask_end = (min_value.long() + value.long())[..., None, None] + mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) + + # Per batch example masking + specgrams = specgrams.transpose(axis, -1) + specgrams = specgrams.masked_fill((mask >= mask_start) & (mask < mask_end), mask_value) + specgrams = specgrams.transpose(axis, -1) + + return specgrams + + +def mask_along_axis( + specgram: Tensor, + mask_param: int, + mask_value: float, + axis: int, + p: float = 1.0, +) -> Tensor: + r"""Apply a mask along ``axis``. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Mask will be applied from indices ``[v_0, v_0 + v)``, + where ``v`` is sampled from ``uniform(0, max_v)`` and + ``v_0`` from ``uniform(0, specgram.size(axis) - v)``, with + ``max_v = mask_param`` when ``p = 1.0`` and + ``max_v = min(mask_param, floor(specgram.size(axis) * p))`` + otherwise. + All examples will have the same mask interval. + + Args: + specgram (Tensor): Real spectrograms `(..., freq, time)`, with at least 2 dimensions. + mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param] + mask_value (float): Value to assign to the masked columns + axis (int): Axis to apply masking on, which should be the one of the last two dimensions. + p (float, optional): maximum proportion of columns that can be masked. (Default: 1.0) + + Returns: + Tensor: Masked spectrograms with the same dimensions as input specgram Tensor + """ + dim = specgram.dim() + + if dim < 2: + raise ValueError(f"Spectrogram must have at least two dimensions (time and frequency) ({dim} given).") + + if axis not in [dim - 2, dim - 1]: + raise ValueError( + f"Only Frequency and Time masking are supported (axis {dim-2} and axis {dim-1} supported; {axis} given)." + ) + + if not 0.0 <= p <= 1.0: + raise ValueError(f"The value of p must be between 0.0 and 1.0 ({p} given).") + + mask_param = _get_mask_param(mask_param, p, specgram.shape[axis]) + if mask_param < 1: + return specgram + + # pack batch + shape = specgram.size() + specgram = specgram.reshape([-1] + list(shape[-2:])) + # After packing, specgram is a 3D tensor, and the axis corresponding to the to-be-masked dimension + # is now (axis - dim + 3), e.g. a tensor of shape (10, 2, 50, 10, 2) becomes a tensor of shape (1000, 10, 2). + value = torch.rand(1) * mask_param + min_value = torch.rand(1) * (specgram.size(axis - dim + 3) - value) + + mask_start = (min_value.long()).squeeze() + mask_end = (min_value.long() + value.long()).squeeze() + mask = torch.arange(0, specgram.shape[axis - dim + 3], device=specgram.device, dtype=specgram.dtype) + mask = (mask >= mask_start) & (mask < mask_end) + # unsqueeze the mask if the axis is frequency + if axis == dim - 2: + mask = mask.unsqueeze(-1) + + if mask_end - mask_start >= mask_param: + raise ValueError("Number of columns to be masked should be less than mask_param") + + specgram = specgram.masked_fill(mask, mask_value) + + # unpack batch + specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) + + return specgram + + +def compute_deltas(specgram: Tensor, win_length: int = 5, mode: str = "replicate") -> Tensor: + r"""Compute delta coefficients of a tensor, usually a spectrogram: + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + .. math:: + d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2} + + where :math:`d_t` is the deltas at time :math:`t`, + :math:`c_t` is the spectrogram coeffcients at time :math:`t`, + :math:`N` is ``(win_length-1)//2``. + + Args: + specgram (Tensor): Tensor of audio of dimension `(..., freq, time)` + win_length (int, optional): The window length used for computing delta (Default: ``5``) + mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``) + + Returns: + Tensor: Tensor of deltas of dimension `(..., freq, time)` + + Example + >>> specgram = torch.randn(1, 40, 1000) + >>> delta = compute_deltas(specgram) + >>> delta2 = compute_deltas(delta) + """ + device = specgram.device + dtype = specgram.dtype + + # pack batch + shape = specgram.size() + specgram = specgram.reshape(1, -1, shape[-1]) + + if win_length < 3: + raise ValueError(f"Window length should be greater than or equal to 3. Found win_length {win_length}") + + n = (win_length - 1) // 2 + + # twice sum of integer squared + denom = n * (n + 1) * (2 * n + 1) / 3 + + specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) + + kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) + + output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom + + # unpack batch + output = output.reshape(shape) + + return output + + +def _compute_nccf(waveform: Tensor, sample_rate: int, frame_time: float, freq_low: int) -> Tensor: + r""" + Compute Normalized Cross-Correlation Function (NCCF). + + .. math:: + \phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}}, + + where + :math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`, + :math:`w` is the waveform, + :math:`N` is the length of a frame, + :math:`b_i` is the beginning of frame :math:`i`, + :math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`. + """ + + EPSILON = 10 ** (-9) + + # Number of lags to check + lags = int(math.ceil(sample_rate / freq_low)) + + frame_size = int(math.ceil(sample_rate * frame_time)) + + waveform_length = waveform.size()[-1] + num_of_frames = int(math.ceil(waveform_length / frame_size)) + + p = lags + num_of_frames * frame_size - waveform_length + waveform = torch.nn.functional.pad(waveform, (0, p)) + + # Compute lags + output_lag = [] + for lag in range(1, lags + 1): + s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] + s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] + + output_frames = ( + (s1 * s2).sum(-1) + / (EPSILON + torch.linalg.vector_norm(s1, ord=2, dim=-1)).pow(2) + / (EPSILON + torch.linalg.vector_norm(s2, ord=2, dim=-1)).pow(2) + ) + + output_lag.append(output_frames.unsqueeze(-1)) + + nccf = torch.cat(output_lag, -1) + + return nccf + + +def _combine_max(a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float = 0.99) -> Tuple[Tensor, Tensor]: + """ + Take value from first if bigger than a multiplicative factor of the second, elementwise. + """ + mask = a[0] > thresh * b[0] + values = mask * a[0] + ~mask * b[0] + indices = mask * a[1] + ~mask * b[1] + return values, indices + + +def _find_max_per_frame(nccf: Tensor, sample_rate: int, freq_high: int) -> Tensor: + r""" + For each frame, take the highest value of NCCF, + apply centered median smoothing, and convert to frequency. + + Note: If the max among all the lags is very close + to the first half of lags, then the latter is taken. + """ + + lag_min = int(math.ceil(sample_rate / freq_high)) + + # Find near enough max that is smallest + + best = torch.max(nccf[..., lag_min:], -1) + + half_size = nccf.shape[-1] // 2 + half = torch.max(nccf[..., lag_min:half_size], -1) + + best = _combine_max(half, best) + indices = best[1] + + # Add back minimal lag + indices += lag_min + # Add 1 empirical calibration offset + indices += 1 + + return indices + + +def _median_smoothing(indices: Tensor, win_length: int) -> Tensor: + r""" + Apply median smoothing to the 1D tensor over the given window. + """ + + # Centered windowed + pad_length = (win_length - 1) // 2 + + # "replicate" padding in any dimension + indices = torch.nn.functional.pad(indices, (pad_length, 0), mode="constant", value=0.0) + + indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) + roll = indices.unfold(-1, win_length, 1) + + values, _ = torch.median(roll, -1) + return values + + +def detect_pitch_frequency( + waveform: Tensor, + sample_rate: int, + frame_time: float = 10 ** (-2), + win_length: int = 30, + freq_low: int = 85, + freq_high: int = 3400, +) -> Tensor: + r"""Detect pitch frequency. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + It is implemented using normalized cross-correlation function and median smoothing. + + Args: + waveform (Tensor): Tensor of audio of dimension `(..., freq, time)` + sample_rate (int): The sample rate of the waveform (Hz) + frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``). + win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``). + freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``). + freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``). + + Returns: + Tensor: Tensor of freq of dimension `(..., frame)` + """ + # pack batch + shape = list(waveform.size()) + waveform = waveform.reshape([-1] + shape[-1:]) + + nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) + indices = _find_max_per_frame(nccf, sample_rate, freq_high) + indices = _median_smoothing(indices, win_length) + + # Convert indices to frequency + EPSILON = 10 ** (-9) + freq = sample_rate / (EPSILON + indices.to(torch.float)) + + # unpack batch + freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) + + return freq + + +def sliding_window_cmn( + specgram: Tensor, + cmn_window: int = 600, + min_cmn_window: int = 100, + center: bool = False, + norm_vars: bool = False, +) -> Tensor: + r""" + Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + specgram (Tensor): Tensor of spectrogram of dimension `(..., time, freq)` + cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) + min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). + Only applicable if center == false, ignored if center==true (int, default = 100) + center (bool, optional): If true, use a window centered on the current frame + (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) + norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) + + Returns: + Tensor: Tensor matching input shape `(..., freq, time)` + """ + input_shape = specgram.shape + num_frames, num_feats = input_shape[-2:] + specgram = specgram.view(-1, num_frames, num_feats) + num_channels = specgram.shape[0] + + dtype = specgram.dtype + device = specgram.device + last_window_start = last_window_end = -1 + cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) + cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) + cmn_specgram = torch.zeros(num_channels, num_frames, num_feats, dtype=dtype, device=device) + for t in range(num_frames): + window_start = 0 + window_end = 0 + if center: + window_start = t - cmn_window // 2 + window_end = window_start + cmn_window + else: + window_start = t - cmn_window + window_end = t + 1 + if window_start < 0: + window_end -= window_start + window_start = 0 + if not center: + if window_end > t: + window_end = max(t + 1, min_cmn_window) + if window_end > num_frames: + window_start -= window_end - num_frames + window_end = num_frames + if window_start < 0: + window_start = 0 + if last_window_start == -1: + input_part = specgram[:, window_start : window_end - window_start, :] + cur_sum += torch.sum(input_part, 1) + if norm_vars: + cur_sumsq += torch.cumsum(input_part**2, 1)[:, -1, :] + else: + if window_start > last_window_start: + frame_to_remove = specgram[:, last_window_start, :] + cur_sum -= frame_to_remove + if norm_vars: + cur_sumsq -= frame_to_remove**2 + if window_end > last_window_end: + frame_to_add = specgram[:, last_window_end, :] + cur_sum += frame_to_add + if norm_vars: + cur_sumsq += frame_to_add**2 + window_frames = window_end - window_start + last_window_start = window_start + last_window_end = window_end + cmn_specgram[:, t, :] = specgram[:, t, :] - cur_sum / window_frames + if norm_vars: + if window_frames == 1: + cmn_specgram[:, t, :] = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) + else: + variance = cur_sumsq + variance = variance / window_frames + variance -= (cur_sum**2) / (window_frames**2) + variance = torch.pow(variance, -0.5) + cmn_specgram[:, t, :] *= variance + + cmn_specgram = cmn_specgram.view(input_shape[:-2] + (num_frames, num_feats)) + if len(input_shape) == 2: + cmn_specgram = cmn_specgram.squeeze(0) + return cmn_specgram + + +def spectral_centroid( + waveform: Tensor, + sample_rate: int, + pad: int, + window: Tensor, + n_fft: int, + hop_length: int, + win_length: int, +) -> Tensor: + r"""Compute the spectral centroid for each channel along the time axis. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + The spectral centroid is defined as the weighted average of the + frequency values, weighted by their magnitude. + + Args: + waveform (Tensor): Tensor of audio of dimension `(..., time)` + sample_rate (int): Sample rate of the audio waveform + pad (int): Two sided padding of signal + window (Tensor): Window tensor that is applied/multiplied to each frame/window + n_fft (int): Size of FFT + hop_length (int): Length of hop between STFT windows + win_length (int): Window size + + Returns: + Tensor: Dimension `(..., time)` + """ + specgram = spectrogram( + waveform, + pad=pad, + window=window, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + power=1.0, + normalized=False, + ) + freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) + freq_dim = -2 + return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) + + +@deprecated("Please migrate to :py:class:`torchaudio.io.AudioEffector`.", remove=False) +def apply_codec( + waveform: Tensor, + sample_rate: int, + format: str, + channels_first: bool = True, + compression: Optional[float] = None, + encoding: Optional[str] = None, + bits_per_sample: Optional[int] = None, +) -> Tensor: + r""" + Apply codecs as a form of augmentation. + + .. devices:: CPU + + Args: + waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```. + sample_rate (int): Sample rate of the audio waveform. + format (str): File format. + channels_first (bool, optional): + When True, both the input and output Tensor have dimension `(channel, time)`. + Otherwise, they have dimension `(time, channel)`. + compression (float or None, optional): Used for formats other than WAV. + For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. + encoding (str or None, optional): Changes the encoding for the supported formats. + For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. + bits_per_sample (int or None, optional): Changes the bit depth for the supported formats. + For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. + + Returns: + Tensor: Resulting Tensor. + If ``channels_first=True``, it has `(channel, time)` else `(time, channel)`. + """ + from torchaudio.backend import _sox_io_backend + + with tempfile.NamedTemporaryFile() as f: + torchaudio.backend._sox_io_backend.save( + f.name, waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample + ) + augmented, sr = _sox_io_backend.load(f.name, channels_first=channels_first, format=format) + if sr != sample_rate: + augmented = resample(augmented, sr, sample_rate) + return augmented + + +_CPU = torch.device("cpu") + + +def _get_sinc_resample_kernel( + orig_freq: int, + new_freq: int, + gcd: int, + lowpass_filter_width: int = 6, + rolloff: float = 0.99, + resampling_method: str = "sinc_interp_hann", + beta: Optional[float] = None, + device: torch.device = _CPU, + dtype: Optional[torch.dtype] = None, +): + if not (int(orig_freq) == orig_freq and int(new_freq) == new_freq): + raise Exception( + "Frequencies must be of integer type to ensure quality resampling computation. " + "To work around this, manually convert both frequencies to integer values " + "that maintain their resampling rate ratio before passing them into the function. " + "Example: To downsample a 44100 hz waveform by a factor of 8, use " + "`orig_freq=8` and `new_freq=1` instead of `orig_freq=44100` and `new_freq=5512.5`. " + "For more information, please refer to https://github.com/pytorch/audio/issues/1487." + ) + + if resampling_method in ["sinc_interpolation", "kaiser_window"]: + method_map = { + "sinc_interpolation": "sinc_interp_hann", + "kaiser_window": "sinc_interp_kaiser", + } + warnings.warn( + f'"{resampling_method}" resampling method name is being deprecated and replaced by ' + f'"{method_map[resampling_method]}" in the next release. ' + "The default behavior remains unchanged.", + stacklevel=3, + ) + elif resampling_method not in ["sinc_interp_hann", "sinc_interp_kaiser"]: + raise ValueError("Invalid resampling method: {}".format(resampling_method)) + + orig_freq = int(orig_freq) // gcd + new_freq = int(new_freq) // gcd + + if lowpass_filter_width <= 0: + raise ValueError("Low pass filter width should be positive.") + base_freq = min(orig_freq, new_freq) + # This will perform antialiasing filtering by removing the highest frequencies. + # At first I thought I only needed this when downsampling, but when upsampling + # you will get edge artifacts without this, as the edge is equivalent to zero padding, + # which will add high freq artifacts. + base_freq *= rolloff + + # The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor) + # using the sinc interpolation formula: + # x(t) = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - t)) + # We can then sample the function x(t) with a different sample rate: + # y[j] = x(j / new_freq) + # or, + # y[j] = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - j / new_freq)) + + # We see here that y[j] is the convolution of x[i] with a specific filter, for which + # we take an FIR approximation, stopping when we see at least `lowpass_filter_width` zeros crossing. + # But y[j+1] is going to have a different set of weights and so on, until y[j + new_freq]. + # Indeed: + # y[j + new_freq] = sum_i x[i] sinc(pi * orig_freq * ((i / orig_freq - (j + new_freq) / new_freq)) + # = sum_i x[i] sinc(pi * orig_freq * ((i - orig_freq) / orig_freq - j / new_freq)) + # = sum_i x[i + orig_freq] sinc(pi * orig_freq * (i / orig_freq - j / new_freq)) + # so y[j+new_freq] uses the same filter as y[j], but on a shifted version of x by `orig_freq`. + # This will explain the F.conv1d after, with a stride of orig_freq. + width = math.ceil(lowpass_filter_width * orig_freq / base_freq) + # If orig_freq is still big after GCD reduction, most filters will be very unbalanced, i.e., + # they will have a lot of almost zero values to the left or to the right... + # There is probably a way to evaluate those filters more efficiently, but this is kept for + # future work. + idx_dtype = dtype if dtype is not None else torch.float64 + + idx = torch.arange(-width, width + orig_freq, dtype=idx_dtype, device=device)[None, None] / orig_freq + + t = torch.arange(0, -new_freq, -1, dtype=dtype, device=device)[:, None, None] / new_freq + idx + t *= base_freq + t = t.clamp_(-lowpass_filter_width, lowpass_filter_width) + + # we do not use built in torch windows here as we need to evaluate the window + # at specific positions, not over a regular grid. + if resampling_method == "sinc_interp_hann": + window = torch.cos(t * math.pi / lowpass_filter_width / 2) ** 2 + else: + # sinc_interp_kaiser + if beta is None: + beta = 14.769656459379492 + beta_tensor = torch.tensor(float(beta)) + window = torch.i0(beta_tensor * torch.sqrt(1 - (t / lowpass_filter_width) ** 2)) / torch.i0(beta_tensor) + + t *= math.pi + + scale = base_freq / orig_freq + kernels = torch.where(t == 0, torch.tensor(1.0).to(t), t.sin() / t) + kernels *= window * scale + + if dtype is None: + kernels = kernels.to(dtype=torch.float32) + + return kernels, width + + +def _apply_sinc_resample_kernel( + waveform: Tensor, + orig_freq: int, + new_freq: int, + gcd: int, + kernel: Tensor, + width: int, +): + if not waveform.is_floating_point(): + raise TypeError(f"Expected floating point type for waveform tensor, but received {waveform.dtype}.") + + orig_freq = int(orig_freq) // gcd + new_freq = int(new_freq) // gcd + + # pack batch + shape = waveform.size() + waveform = waveform.view(-1, shape[-1]) + + num_wavs, length = waveform.shape + waveform = torch.nn.functional.pad(waveform, (width, width + orig_freq)) + resampled = torch.nn.functional.conv1d(waveform[:, None], kernel, stride=orig_freq) + resampled = resampled.transpose(1, 2).reshape(num_wavs, -1) + target_length = torch.ceil(torch.as_tensor(new_freq * length / orig_freq)).long() + resampled = resampled[..., :target_length] + + # unpack batch + resampled = resampled.view(shape[:-1] + resampled.shape[-1:]) + return resampled + + +def resample( + waveform: Tensor, + orig_freq: int, + new_freq: int, + lowpass_filter_width: int = 6, + rolloff: float = 0.99, + resampling_method: str = "sinc_interp_hann", + beta: Optional[float] = None, +) -> Tensor: + r"""Resamples the waveform at the new frequency using bandlimited interpolation. :cite:`RESAMPLE`. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Note: + ``transforms.Resample`` precomputes and reuses the resampling kernel, so using it will result in + more efficient computation if resampling multiple waveforms with the same resampling parameters. + + Args: + waveform (Tensor): The input signal of dimension `(..., time)` + orig_freq (int): The original frequency of the signal + new_freq (int): The desired frequency + lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper + but less efficient. (Default: ``6``) + rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist. + Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``) + resampling_method (str, optional): The resampling method to use. + Options: [``"sinc_interp_hann"``, ``"sinc_interp_kaiser"``] (Default: ``"sinc_interp_hann"``) + beta (float or None, optional): The shape parameter used for kaiser window. + + Returns: + Tensor: The waveform at the new frequency of dimension `(..., time).` + """ + + if orig_freq <= 0.0 or new_freq <= 0.0: + raise ValueError("Original frequency and desired frequecy should be positive") + + if orig_freq == new_freq: + return waveform + + gcd = math.gcd(int(orig_freq), int(new_freq)) + + kernel, width = _get_sinc_resample_kernel( + orig_freq, + new_freq, + gcd, + lowpass_filter_width, + rolloff, + resampling_method, + beta, + waveform.device, + waveform.dtype, + ) + resampled = _apply_sinc_resample_kernel(waveform, orig_freq, new_freq, gcd, kernel, width) + return resampled + + +@torch.jit.unused +def edit_distance(seq1: Sequence, seq2: Sequence) -> int: + """ + Calculate the word level edit (Levenshtein) distance between two sequences. + + .. devices:: CPU + + The function computes an edit distance allowing deletion, insertion and + substitution. The result is an integer. + + For most applications, the two input sequences should be the same type. If + two strings are given, the output is the edit distance between the two + strings (character edit distance). If two lists of strings are given, the + output is the edit distance between sentences (word edit distance). Users + may want to normalize the output by the length of the reference sequence. + + Args: + seq1 (Sequence): the first sequence to compare. + seq2 (Sequence): the second sequence to compare. + Returns: + int: The distance between the first and second sequences. + """ + len_sent2 = len(seq2) + dold = list(range(len_sent2 + 1)) + dnew = [0 for _ in range(len_sent2 + 1)] + + for i in range(1, len(seq1) + 1): + dnew[0] = i + for j in range(1, len_sent2 + 1): + if seq1[i - 1] == seq2[j - 1]: + dnew[j] = dold[j - 1] + else: + substitution = dold[j - 1] + 1 + insertion = dnew[j - 1] + 1 + deletion = dold[j] + 1 + dnew[j] = min(substitution, insertion, deletion) + + dnew, dold = dold, dnew + + return int(dold[-1]) + + +def loudness(waveform: Tensor, sample_rate: int): + r"""Measure audio loudness according to the ITU-R BS.1770-4 recommendation. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + waveform(torch.Tensor): audio waveform of dimension `(..., channels, time)` + sample_rate (int): sampling rate of the waveform + + Returns: + Tensor: loudness estimates (LKFS) + + Reference: + - https://www.itu.int/rec/R-REC-BS.1770-4-201510-I/en + """ + + if waveform.size(-2) > 5: + raise ValueError("Only up to 5 channels are supported.") + + gate_duration = 0.4 + overlap = 0.75 + gamma_abs = -70.0 + kweight_bias = -0.691 + gate_samples = int(round(gate_duration * sample_rate)) + step = int(round(gate_samples * (1 - overlap))) + + # Apply K-weighting + waveform = treble_biquad(waveform, sample_rate, 4.0, 1500.0, 1 / math.sqrt(2)) + waveform = highpass_biquad(waveform, sample_rate, 38.0, 0.5) + + # Compute the energy for each block + energy = torch.square(waveform).unfold(-1, gate_samples, step) + energy = torch.mean(energy, dim=-1) + + # Compute channel-weighted summation + g = torch.tensor([1.0, 1.0, 1.0, 1.41, 1.41], dtype=waveform.dtype, device=waveform.device) + g = g[: energy.size(-2)] + + energy_weighted = torch.sum(g.unsqueeze(-1) * energy, dim=-2) + loudness = -0.691 + 10 * torch.log10(energy_weighted) + + # Apply absolute gating of the blocks + gated_blocks = loudness > gamma_abs + gated_blocks = gated_blocks.unsqueeze(-2) + + energy_filtered = torch.sum(gated_blocks * energy, dim=-1) / torch.count_nonzero(gated_blocks, dim=-1) + energy_weighted = torch.sum(g * energy_filtered, dim=-1) + gamma_rel = kweight_bias + 10 * torch.log10(energy_weighted) - 10 + + # Apply relative gating of the blocks + gated_blocks = torch.logical_and(gated_blocks.squeeze(-2), loudness > gamma_rel.unsqueeze(-1)) + gated_blocks = gated_blocks.unsqueeze(-2) + + energy_filtered = torch.sum(gated_blocks * energy, dim=-1) / torch.count_nonzero(gated_blocks, dim=-1) + energy_weighted = torch.sum(g * energy_filtered, dim=-1) + LKFS = kweight_bias + 10 * torch.log10(energy_weighted) + return LKFS + + +def pitch_shift( + waveform: Tensor, + sample_rate: int, + n_steps: int, + bins_per_octave: int = 12, + n_fft: int = 512, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + window: Optional[Tensor] = None, +) -> Tensor: + """ + Shift the pitch of a waveform by ``n_steps`` steps. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + waveform (Tensor): The input waveform of shape `(..., time)`. + sample_rate (int): Sample rate of `waveform`. + n_steps (int): The (fractional) steps to shift `waveform`. + bins_per_octave (int, optional): The number of steps per octave (Default: ``12``). + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``). + win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``). + hop_length (int or None, optional): Length of hop between STFT windows. If None, then + ``win_length // 4`` is used (Default: ``None``). + window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window. + If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``). + + + Returns: + Tensor: The pitch-shifted audio waveform of shape `(..., time)`. + """ + waveform_stretch = _stretch_waveform( + waveform, + n_steps, + bins_per_octave, + n_fft, + win_length, + hop_length, + window, + ) + rate = 2.0 ** (-float(n_steps) / bins_per_octave) + waveform_shift = resample(waveform_stretch, int(sample_rate / rate), sample_rate) + + return _fix_waveform_shape(waveform_shift, waveform.size()) + + +def _stretch_waveform( + waveform: Tensor, + n_steps: int, + bins_per_octave: int = 12, + n_fft: int = 512, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + window: Optional[Tensor] = None, +) -> Tensor: + """ + Pitch shift helper function to preprocess and stretch waveform before resampling step. + + Args: + See pitch_shift arg descriptions. + + Returns: + Tensor: The preprocessed waveform stretched prior to resampling. + """ + if hop_length is None: + hop_length = n_fft // 4 + if win_length is None: + win_length = n_fft + if window is None: + window = torch.hann_window(window_length=win_length, device=waveform.device) + + # pack batch + shape = waveform.size() + waveform = waveform.reshape(-1, shape[-1]) + + ori_len = shape[-1] + rate = 2.0 ** (-float(n_steps) / bins_per_octave) + spec_f = torch.stft( + input=waveform, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + center=True, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + phase_advance = torch.linspace(0, math.pi * hop_length, spec_f.shape[-2], device=spec_f.device)[..., None] + spec_stretch = phase_vocoder(spec_f, rate, phase_advance) + len_stretch = int(round(ori_len / rate)) + waveform_stretch = torch.istft( + spec_stretch, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=len_stretch + ) + return waveform_stretch + + +def _fix_waveform_shape( + waveform_shift: Tensor, + shape: List[int], +) -> Tensor: + """ + PitchShift helper function to process after resampling step to fix the shape back. + + Args: + waveform_shift(Tensor): The waveform after stretch and resample + shape (List[int]): The shape of initial waveform + + Returns: + Tensor: The pitch-shifted audio waveform of shape `(..., time)`. + """ + ori_len = shape[-1] + shift_len = waveform_shift.size()[-1] + if shift_len > ori_len: + waveform_shift = waveform_shift[..., :ori_len] + else: + waveform_shift = torch.nn.functional.pad(waveform_shift, [0, ori_len - shift_len]) + + # unpack batch + waveform_shift = waveform_shift.view(shape[:-1] + waveform_shift.shape[-1:]) + return waveform_shift + + +def rnnt_loss( + logits: Tensor, + targets: Tensor, + logit_lengths: Tensor, + target_lengths: Tensor, + blank: int = -1, + clamp: float = -1, + reduction: str = "mean", + fused_log_softmax: bool = True, +): + """Compute the RNN Transducer loss from *Sequence Transduction with Recurrent Neural Networks* + :cite:`graves2012sequence`. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + The RNN Transducer loss extends the CTC loss by defining a distribution over output + sequences of all lengths, and by jointly modelling both input-output and output-output + dependencies. + + Args: + logits (Tensor): Tensor of dimension `(batch, max seq length, max target length + 1, class)` + containing output from joiner + targets (Tensor): Tensor of dimension `(batch, max target length)` containing targets with zero padded + logit_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of each sequence from encoder + target_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of targets for each sequence + blank (int, optional): blank label (Default: ``-1``) + clamp (float, optional): clamp for gradients (Default: ``-1``) + reduction (string, optional): Specifies the reduction to apply to the output: + ``"none"`` | ``"mean"`` | ``"sum"``. (Default: ``"mean"``) + fused_log_softmax (bool): set to False if calling log_softmax outside of loss (Default: ``True``) + Returns: + Tensor: Loss with the reduction option applied. If ``reduction`` is ``"none"``, then size `(batch)`, + otherwise scalar. + """ + if reduction not in ["none", "mean", "sum"]: + raise ValueError('reduction should be one of "none", "mean", or "sum"') + + if blank < 0: # reinterpret blank index if blank < 0. + blank = logits.shape[-1] + blank + + costs, _ = torch.ops.torchaudio.rnnt_loss( + logits=logits, + targets=targets, + logit_lengths=logit_lengths, + target_lengths=target_lengths, + blank=blank, + clamp=clamp, + fused_log_softmax=fused_log_softmax, + ) + + if reduction == "mean": + return costs.mean() + elif reduction == "sum": + return costs.sum() + + return costs + + +def psd( + specgram: Tensor, + mask: Optional[Tensor] = None, + normalize: bool = True, + eps: float = 1e-10, +) -> Tensor: + """Compute cross-channel power spectral density (PSD) matrix. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + specgram (torch.Tensor): Multi-channel complex-valued spectrum. + Tensor with dimensions `(..., channel, freq, time)`. + mask (torch.Tensor or None, optional): Time-Frequency mask for normalization. + Tensor with dimensions `(..., freq, time)`. (Default: ``None``) + normalize (bool, optional): If ``True``, normalize the mask along the time dimension. (Default: ``True``) + eps (float, optional): Value to add to the denominator in mask normalization. (Default: ``1e-15``) + + Returns: + torch.Tensor: The complex-valued PSD matrix of the input spectrum. + Tensor with dimensions `(..., freq, channel, channel)` + """ + specgram = specgram.transpose(-3, -2) # shape (freq, channel, time) + # outer product: + # (..., ch_1, time) x (..., ch_2, time) -> (..., time, ch_1, ch_2) + psd = torch.einsum("...ct,...et->...tce", [specgram, specgram.conj()]) + + if mask is not None: + if mask.shape[:-1] != specgram.shape[:-2] or mask.shape[-1] != specgram.shape[-1]: + raise ValueError( + "The dimensions of mask except the channel dimension should be the same as specgram." + f"Found {mask.shape} for mask and {specgram.shape} for specgram." + ) + # Normalized mask along time dimension: + if normalize: + mask = mask / (mask.sum(dim=-1, keepdim=True) + eps) + + psd = psd * mask[..., None, None] + + psd = psd.sum(dim=-3) + return psd + + +def _compute_mat_trace(input: torch.Tensor, dim1: int = -1, dim2: int = -2) -> torch.Tensor: + r"""Compute the trace of a Tensor along ``dim1`` and ``dim2`` dimensions. + + Args: + input (torch.Tensor): Tensor with dimensions `(..., channel, channel)`. + dim1 (int, optional): The first dimension of the diagonal matrix. + (Default: ``-1``) + dim2 (int, optional): The second dimension of the diagonal matrix. + (Default: ``-2``) + + Returns: + Tensor: The trace of the input Tensor. + """ + if input.ndim < 2: + raise ValueError("The dimension of the tensor must be at least 2.") + if input.shape[dim1] != input.shape[dim2]: + raise ValueError("The size of ``dim1`` and ``dim2`` must be the same.") + input = torch.diagonal(input, 0, dim1=dim1, dim2=dim2) + return input.sum(dim=-1) + + +def _tik_reg(mat: torch.Tensor, reg: float = 1e-7, eps: float = 1e-8) -> torch.Tensor: + """Perform Tikhonov regularization (only modifying real part). + + Args: + mat (torch.Tensor): Input matrix with dimensions `(..., channel, channel)`. + reg (float, optional): Regularization factor. (Default: 1e-8) + eps (float, optional): Value to avoid the correlation matrix is all-zero. (Default: ``1e-8``) + + Returns: + Tensor: Regularized matrix with dimensions `(..., channel, channel)`. + """ + # Add eps + C = mat.size(-1) + eye = torch.eye(C, dtype=mat.dtype, device=mat.device) + epsilon = _compute_mat_trace(mat).real[..., None, None] * reg + # in case that correlation_matrix is all-zero + epsilon = epsilon + eps + mat = mat + epsilon * eye[..., :, :] + return mat + + +def _assert_psd_matrices(psd_s: torch.Tensor, psd_n: torch.Tensor) -> None: + """Assertion checks of the PSD matrices of target speech and noise. + + Args: + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + """ + if psd_s.ndim < 3 or psd_n.ndim < 3: + raise ValueError( + "Expected at least 3D Tensor (..., freq, channel, channel) for psd_s and psd_n. " + f"Found {psd_s.shape} for psd_s and {psd_n.shape} for psd_n." + ) + if not (psd_s.is_complex() and psd_n.is_complex()): + raise TypeError( + "The type of psd_s and psd_n must be ``torch.cfloat`` or ``torch.cdouble``. " + f"Found {psd_s.dtype} for psd_s and {psd_n.dtype} for psd_n." + ) + if psd_s.shape != psd_n.shape: + raise ValueError( + f"The dimensions of psd_s and psd_n should be the same. Found {psd_s.shape} and {psd_n.shape}." + ) + if psd_s.shape[-1] != psd_s.shape[-2]: + raise ValueError(f"The last two dimensions of psd_s should be the same. Found {psd_s.shape}.") + + +def mvdr_weights_souden( + psd_s: Tensor, + psd_n: Tensor, + reference_channel: Union[int, Tensor], + diagonal_loading: bool = True, + diag_eps: float = 1e-7, + eps: float = 1e-8, +) -> Tensor: + r"""Compute the Minimum Variance Distortionless Response (*MVDR* :cite:`capon1969high`) beamforming weights + by the method proposed by *Souden et, al.* :cite:`souden2009optimal`. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Given the power spectral density (PSD) matrix of target speech :math:`\bf{\Phi}_{\textbf{SS}}`, + the PSD matrix of noise :math:`\bf{\Phi}_{\textbf{NN}}`, and a one-hot vector that represents the + reference channel :math:`\bf{u}`, the method computes the MVDR beamforming weight martrix + :math:`\textbf{w}_{\text{MVDR}}`. The formula is defined as: + + .. math:: + \textbf{w}_{\text{MVDR}}(f) = + \frac{{{\bf{\Phi}_{\textbf{NN}}^{-1}}(f){\bf{\Phi}_{\textbf{SS}}}}(f)} + {\text{Trace}({{{\bf{\Phi}_{\textbf{NN}}^{-1}}(f) \bf{\Phi}_{\textbf{SS}}}(f))}}\bm{u} + + Args: + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + reference_channel (int or torch.Tensor): Specifies the reference channel. + If the dtype is ``int``, it represents the reference channel index. + If the dtype is ``torch.Tensor``, its shape is `(..., channel)`, where the ``channel`` dimension + is one-hot. + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + eps (float, optional): Value to add to the denominator in the beamforming weight formula. + (Default: ``1e-8``) + + Returns: + torch.Tensor: The complex-valued MVDR beamforming weight matrix with dimensions `(..., freq, channel)`. + """ + _assert_psd_matrices(psd_s, psd_n) + + if diagonal_loading: + psd_n = _tik_reg(psd_n, reg=diag_eps) + numerator = torch.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s + # ws: (..., C, C) / (...,) -> (..., C, C) + ws = numerator / (_compute_mat_trace(numerator)[..., None, None] + eps) + if torch.jit.isinstance(reference_channel, int): + beamform_weights = ws[..., :, reference_channel] + elif torch.jit.isinstance(reference_channel, Tensor): + reference_channel = reference_channel.to(psd_n.dtype) + # h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1) + beamform_weights = torch.einsum("...c,...c->...", [ws, reference_channel[..., None, None, :]]) + else: + raise TypeError(f'Expected "int" or "Tensor" for reference_channel. Found: {type(reference_channel)}.') + + return beamform_weights + + +def mvdr_weights_rtf( + rtf: Tensor, + psd_n: Tensor, + reference_channel: Optional[Union[int, Tensor]] = None, + diagonal_loading: bool = True, + diag_eps: float = 1e-7, + eps: float = 1e-8, +) -> Tensor: + r"""Compute the Minimum Variance Distortionless Response (*MVDR* :cite:`capon1969high`) beamforming weights + based on the relative transfer function (RTF) and power spectral density (PSD) matrix of noise. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Given the relative transfer function (RTF) matrix or the steering vector of target speech :math:`\bm{v}`, + the PSD matrix of noise :math:`\bf{\Phi}_{\textbf{NN}}`, and a one-hot vector that represents the + reference channel :math:`\bf{u}`, the method computes the MVDR beamforming weight martrix + :math:`\textbf{w}_{\text{MVDR}}`. The formula is defined as: + + .. math:: + \textbf{w}_{\text{MVDR}}(f) = + \frac{{{\bf{\Phi}_{\textbf{NN}}^{-1}}(f){\bm{v}}(f)}} + {{\bm{v}^{\mathsf{H}}}(f){\bf{\Phi}_{\textbf{NN}}^{-1}}(f){\bm{v}}(f)} + + where :math:`(.)^{\mathsf{H}}` denotes the Hermitian Conjugate operation. + + Args: + rtf (torch.Tensor): The complex-valued RTF vector of target speech. + Tensor with dimensions `(..., freq, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + reference_channel (int or torch.Tensor): Specifies the reference channel. + If the dtype is ``int``, it represents the reference channel index. + If the dtype is ``torch.Tensor``, its shape is `(..., channel)`, where the ``channel`` dimension + is one-hot. + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + eps (float, optional): Value to add to the denominator in the beamforming weight formula. + (Default: ``1e-8``) + + Returns: + torch.Tensor: The complex-valued MVDR beamforming weight matrix with dimensions `(..., freq, channel)`. + """ + if rtf.ndim < 2: + raise ValueError(f"Expected at least 2D Tensor (..., freq, channel) for rtf. Found {rtf.shape}.") + if psd_n.ndim < 3: + raise ValueError(f"Expected at least 3D Tensor (..., freq, channel, channel) for psd_n. Found {psd_n.shape}.") + if not (rtf.is_complex() and psd_n.is_complex()): + raise TypeError( + "The type of rtf and psd_n must be ``torch.cfloat`` or ``torch.cdouble``. " + f"Found {rtf.dtype} for rtf and {psd_n.dtype} for psd_n." + ) + if rtf.shape != psd_n.shape[:-1]: + raise ValueError( + "The dimensions of rtf and the dimensions withou the last dimension of psd_n should be the same. " + f"Found {rtf.shape} for rtf and {psd_n.shape} for psd_n." + ) + if psd_n.shape[-1] != psd_n.shape[-2]: + raise ValueError(f"The last two dimensions of psd_n should be the same. Found {psd_n.shape}.") + + if diagonal_loading: + psd_n = _tik_reg(psd_n, reg=diag_eps) + # numerator = psd_n.inv() @ stv + numerator = torch.linalg.solve(psd_n, rtf.unsqueeze(-1)).squeeze(-1) # (..., freq, channel) + # denominator = stv^H @ psd_n.inv() @ stv + denominator = torch.einsum("...d,...d->...", [rtf.conj(), numerator]) + beamform_weights = numerator / (denominator.real.unsqueeze(-1) + eps) + # normalize the numerator + if reference_channel is not None: + if torch.jit.isinstance(reference_channel, int): + scale = rtf[..., reference_channel].conj() + elif torch.jit.isinstance(reference_channel, Tensor): + reference_channel = reference_channel.to(psd_n.dtype) + scale = torch.einsum("...c,...c->...", [rtf.conj(), reference_channel[..., None, :]]) + else: + raise TypeError(f'Expected "int" or "Tensor" for reference_channel. Found: {type(reference_channel)}.') + + beamform_weights = beamform_weights * scale[..., None] + + return beamform_weights + + +def rtf_evd(psd_s: Tensor) -> Tensor: + r"""Estimate the relative transfer function (RTF) or the steering vector by eigenvalue decomposition. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + psd_s (Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor of dimension `(..., freq, channel, channel)` + + Returns: + Tensor: The estimated complex-valued RTF of target speech. + Tensor of dimension `(..., freq, channel)` + """ + if not psd_s.is_complex(): + raise TypeError(f"The type of psd_s must be ``torch.cfloat`` or ``torch.cdouble``. Found {psd_s.dtype}.") + if psd_s.shape[-1] != psd_s.shape[-2]: + raise ValueError(f"The last two dimensions of psd_s should be the same. Found {psd_s.shape}.") + _, v = torch.linalg.eigh(psd_s) # v is sorted along with eigenvalues in ascending order + rtf = v[..., -1] # choose the eigenvector with max eigenvalue + return rtf + + +def rtf_power( + psd_s: Tensor, + psd_n: Tensor, + reference_channel: Union[int, Tensor], + n_iter: int = 3, + diagonal_loading: bool = True, + diag_eps: float = 1e-7, +) -> Tensor: + r"""Estimate the relative transfer function (RTF) or the steering vector by the power method. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + reference_channel (int or torch.Tensor): Specifies the reference channel. + If the dtype is ``int``, it represents the reference channel index. + If the dtype is ``torch.Tensor``, its shape is `(..., channel)`, where the ``channel`` dimension + is one-hot. + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + + Returns: + torch.Tensor: The estimated complex-valued RTF of target speech. + Tensor of dimension `(..., freq, channel)`. + """ + _assert_psd_matrices(psd_s, psd_n) + if n_iter <= 0: + raise ValueError("The number of iteration must be greater than 0.") + + # Apply diagonal loading to psd_n to improve robustness. + if diagonal_loading: + psd_n = _tik_reg(psd_n, reg=diag_eps) + # phi is regarded as the first iteration + phi = torch.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s + if torch.jit.isinstance(reference_channel, int): + rtf = phi[..., reference_channel] + elif torch.jit.isinstance(reference_channel, Tensor): + reference_channel = reference_channel.to(psd_n.dtype) + rtf = torch.einsum("...c,...c->...", [phi, reference_channel[..., None, None, :]]) + else: + raise TypeError(f'Expected "int" or "Tensor" for reference_channel. Found: {type(reference_channel)}.') + rtf = rtf.unsqueeze(-1) # (..., freq, channel, 1) + if n_iter >= 2: + # The number of iterations in the for loop is `n_iter - 2` + # because the `phi` above and `torch.matmul(psd_s, rtf)` are regarded as + # two iterations. + for _ in range(n_iter - 2): + rtf = torch.matmul(phi, rtf) + rtf = torch.matmul(psd_s, rtf) + else: + # if there is only one iteration, the rtf is the psd_s[..., referenc_channel] + # which is psd_n @ phi @ ref_channel + rtf = torch.matmul(psd_n, rtf) + return rtf.squeeze(-1) + + +def apply_beamforming(beamform_weights: Tensor, specgram: Tensor) -> Tensor: + r"""Apply the beamforming weight to the multi-channel noisy spectrum to obtain the single-channel enhanced spectrum. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + .. math:: + \hat{\textbf{S}}(f) = \textbf{w}_{\text{bf}}(f)^{\mathsf{H}} \textbf{Y}(f) + + where :math:`\textbf{w}_{\text{bf}}(f)` is the beamforming weight for the :math:`f`-th frequency bin, + :math:`\textbf{Y}` is the multi-channel spectrum for the :math:`f`-th frequency bin. + + Args: + beamform_weights (Tensor): The complex-valued beamforming weight matrix. + Tensor of dimension `(..., freq, channel)` + specgram (Tensor): The multi-channel complex-valued noisy spectrum. + Tensor of dimension `(..., channel, freq, time)` + + Returns: + Tensor: The single-channel complex-valued enhanced spectrum. + Tensor of dimension `(..., freq, time)` + """ + if beamform_weights.shape[:-2] != specgram.shape[:-3]: + raise ValueError( + "The dimensions except the last two dimensions of beamform_weights should be the same " + "as the dimensions except the last three dimensions of specgram. " + f"Found {beamform_weights.shape} for beamform_weights and {specgram.shape} for specgram." + ) + + if not (beamform_weights.is_complex() and specgram.is_complex()): + raise TypeError( + "The type of beamform_weights and specgram must be ``torch.cfloat`` or ``torch.cdouble``. " + f"Found {beamform_weights.dtype} for beamform_weights and {specgram.dtype} for specgram." + ) + + # (..., freq, channel) x (..., channel, freq, time) -> (..., freq, time) + specgram_enhanced = torch.einsum("...fc,...cft->...ft", [beamform_weights.conj(), specgram]) + return specgram_enhanced + + +def _check_shape_compatible(x: torch.Tensor, y: torch.Tensor) -> None: + if x.ndim != y.ndim: + raise ValueError(f"The operands must be the same dimension (got {x.ndim} and {y.ndim}).") + + for i in range(x.ndim - 1): + xi = x.size(i) + yi = y.size(i) + if xi == yi or xi == 1 or yi == 1: + continue + raise ValueError(f"Leading dimensions of x and y are not broadcastable (got {x.shape} and {y.shape}).") + + +def _check_convolve_mode(mode: str) -> None: + valid_convolve_modes = ["full", "valid", "same"] + if mode not in valid_convolve_modes: + raise ValueError(f"Unrecognized mode value '{mode}'. Please specify one of {valid_convolve_modes}.") + + +def _apply_convolve_mode(conv_result: torch.Tensor, x_length: int, y_length: int, mode: str) -> torch.Tensor: + valid_convolve_modes = ["full", "valid", "same"] + if mode == "full": + return conv_result + elif mode == "valid": + target_length = max(x_length, y_length) - min(x_length, y_length) + 1 + start_idx = (conv_result.size(-1) - target_length) // 2 + return conv_result[..., start_idx : start_idx + target_length] + elif mode == "same": + start_idx = (conv_result.size(-1) - x_length) // 2 + return conv_result[..., start_idx : start_idx + x_length] + else: + raise ValueError(f"Unrecognized mode value '{mode}'. Please specify one of {valid_convolve_modes}.") + + +def fftconvolve(x: torch.Tensor, y: torch.Tensor, mode: str = "full") -> torch.Tensor: + r""" + Convolves inputs along their last dimension using FFT. For inputs with large last dimensions, this function + is generally much faster than :meth:`convolve`. + Note that, in contrast to :meth:`torch.nn.functional.conv1d`, which actually applies the valid cross-correlation + operator, this function applies the true `convolution`_ operator. + Also note that this function can only output float tensors (int tensor inputs will be cast to float). + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + x (torch.Tensor): First convolution operand, with shape `(..., N)`. + y (torch.Tensor): Second convolution operand, with shape `(..., M)` + (leading dimensions must be broadcast-able with those of ``x``). + mode (str, optional): Must be one of ("full", "valid", "same"). + + * "full": Returns the full convolution result, with shape `(..., N + M - 1)`. (Default) + * "valid": Returns the segment of the full convolution result corresponding to where + the two inputs overlap completely, with shape `(..., max(N, M) - min(N, M) + 1)`. + * "same": Returns the center segment of the full convolution result, with shape `(..., N)`. + + Returns: + torch.Tensor: Result of convolving ``x`` and ``y``, with shape `(..., L)`, where + the leading dimensions match those of ``x`` and `L` is dictated by ``mode``. + + .. _convolution: + https://en.wikipedia.org/wiki/Convolution + """ + _check_shape_compatible(x, y) + _check_convolve_mode(mode) + + n = x.size(-1) + y.size(-1) - 1 + fresult = torch.fft.rfft(x, n=n) * torch.fft.rfft(y, n=n) + result = torch.fft.irfft(fresult, n=n) + return _apply_convolve_mode(result, x.size(-1), y.size(-1), mode) + + +def convolve(x: torch.Tensor, y: torch.Tensor, mode: str = "full") -> torch.Tensor: + r""" + Convolves inputs along their last dimension using the direct method. + Note that, in contrast to :meth:`torch.nn.functional.conv1d`, which actually applies the valid cross-correlation + operator, this function applies the true `convolution`_ operator. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + x (torch.Tensor): First convolution operand, with shape `(..., N)`. + y (torch.Tensor): Second convolution operand, with shape `(..., M)` + (leading dimensions must be broadcast-able with those of ``x``). + mode (str, optional): Must be one of ("full", "valid", "same"). + + * "full": Returns the full convolution result, with shape `(..., N + M - 1)`. (Default) + * "valid": Returns the segment of the full convolution result corresponding to where + the two inputs overlap completely, with shape `(..., max(N, M) - min(N, M) + 1)`. + * "same": Returns the center segment of the full convolution result, with shape `(..., N)`. + + Returns: + torch.Tensor: Result of convolving ``x`` and ``y``, with shape `(..., L)`, where + the leading dimensions match those of ``x`` and `L` is dictated by ``mode``. + + .. _convolution: + https://en.wikipedia.org/wiki/Convolution + """ + _check_shape_compatible(x, y) + _check_convolve_mode(mode) + + x_size, y_size = x.size(-1), y.size(-1) + + if x.size(-1) < y.size(-1): + x, y = y, x + + if x.shape[:-1] != y.shape[:-1]: + new_shape = [max(i, j) for i, j in zip(x.shape[:-1], y.shape[:-1])] + x = x.broadcast_to(new_shape + [x.shape[-1]]) + y = y.broadcast_to(new_shape + [y.shape[-1]]) + + num_signals = torch.tensor(x.shape[:-1]).prod() + reshaped_x = x.reshape((int(num_signals), x.size(-1))) + reshaped_y = y.reshape((int(num_signals), y.size(-1))) + output = torch.nn.functional.conv1d( + input=reshaped_x, + weight=reshaped_y.flip(-1).unsqueeze(1), + stride=1, + groups=reshaped_x.size(0), + padding=reshaped_y.size(-1) - 1, + ) + output_shape = x.shape[:-1] + (-1,) + result = output.reshape(output_shape) + return _apply_convolve_mode(result, x_size, y_size, mode) + + +def add_noise( + waveform: torch.Tensor, noise: torch.Tensor, snr: torch.Tensor, lengths: Optional[torch.Tensor] = None +) -> torch.Tensor: + r"""Scales and adds noise to waveform per signal-to-noise ratio. + + Specifically, for each pair of waveform vector :math:`x \in \mathbb{R}^L` and noise vector + :math:`n \in \mathbb{R}^L`, the function computes output :math:`y` as + + .. math:: + y = x + a n \, \text{,} + + where + + .. math:: + a = \sqrt{ \frac{ ||x||_{2}^{2} }{ ||n||_{2}^{2} } \cdot 10^{-\frac{\text{SNR}}{10}} } \, \text{,} + + with :math:`\text{SNR}` being the desired signal-to-noise ratio between :math:`x` and :math:`n`, in dB. + + Note that this function broadcasts singleton leading dimensions in its inputs in a manner that is + consistent with the above formulae and PyTorch's broadcasting semantics. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (torch.Tensor): Input waveform, with shape `(..., L)`. + noise (torch.Tensor): Noise, with shape `(..., L)` (same shape as ``waveform``). + snr (torch.Tensor): Signal-to-noise ratios in dB, with shape `(...,)`. + lengths (torch.Tensor or None, optional): Valid lengths of signals in ``waveform`` and ``noise``, with shape + `(...,)` (leading dimensions must match those of ``waveform``). If ``None``, all elements in ``waveform`` + and ``noise`` are treated as valid. (Default: ``None``) + + Returns: + torch.Tensor: Result of scaling and adding ``noise`` to ``waveform``, with shape `(..., L)` + (same shape as ``waveform``). + """ + + if not (waveform.ndim - 1 == noise.ndim - 1 == snr.ndim and (lengths is None or lengths.ndim == snr.ndim)): + raise ValueError("Input leading dimensions don't match.") + + L = waveform.size(-1) + + if L != noise.size(-1): + raise ValueError(f"Length dimensions of waveform and noise don't match (got {L} and {noise.size(-1)}).") + + # compute scale + if lengths is not None: + mask = torch.arange(0, L, device=lengths.device).expand(waveform.shape) < lengths.unsqueeze( + -1 + ) # (*, L) < (*, 1) = (*, L) + masked_waveform = waveform * mask + masked_noise = noise * mask + else: + masked_waveform = waveform + masked_noise = noise + + energy_signal = torch.linalg.vector_norm(masked_waveform, ord=2, dim=-1) ** 2 # (*,) + energy_noise = torch.linalg.vector_norm(masked_noise, ord=2, dim=-1) ** 2 # (*,) + original_snr_db = 10 * (torch.log10(energy_signal) - torch.log10(energy_noise)) + scale = 10 ** ((original_snr_db - snr) / 20.0) # (*,) + + # scale noise + scaled_noise = scale.unsqueeze(-1) * noise # (*, 1) * (*, L) = (*, L) + + return waveform + scaled_noise # (*, L) + + +def speed( + waveform: torch.Tensor, orig_freq: int, factor: float, lengths: Optional[torch.Tensor] = None +) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + r"""Adjusts waveform speed. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (torch.Tensor): Input signals, with shape `(..., time)`. + orig_freq (int): Original frequency of the signals in ``waveform``. + factor (float): Factor by which to adjust speed of input. Values greater than 1.0 + compress ``waveform`` in time, whereas values less than 1.0 stretch ``waveform`` in time. + lengths (torch.Tensor or None, optional): Valid lengths of signals in ``waveform``, with shape `(...)`. + If ``None``, all elements in ``waveform`` are treated as valid. (Default: ``None``) + + Returns: + (torch.Tensor, torch.Tensor or None): + torch.Tensor + Speed-adjusted waveform, with shape `(..., new_time).` + torch.Tensor or None + If ``lengths`` is not ``None``, valid lengths of signals in speed-adjusted waveform, + with shape `(...)`; otherwise, ``None``. + """ + + source_sample_rate = int(factor * orig_freq) + target_sample_rate = int(orig_freq) + + gcd = math.gcd(source_sample_rate, target_sample_rate) + source_sample_rate = source_sample_rate // gcd + target_sample_rate = target_sample_rate // gcd + + if lengths is None: + out_lengths = None + else: + out_lengths = torch.ceil(lengths * target_sample_rate / source_sample_rate).to(lengths.dtype) + + return resample(waveform, source_sample_rate, target_sample_rate), out_lengths + + +def preemphasis(waveform, coeff: float = 0.97) -> torch.Tensor: + r"""Pre-emphasizes a waveform along its last dimension, i.e. + for each signal :math:`x` in ``waveform``, computes + output :math:`y` as + + .. math:: + y[i] = x[i] - \text{coeff} \cdot x[i - 1] + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (torch.Tensor): Waveform, with shape `(..., N)`. + coeff (float, optional): Pre-emphasis coefficient. Typically between 0.0 and 1.0. + (Default: 0.97) + + Returns: + torch.Tensor: Pre-emphasized waveform, with shape `(..., N)`. + """ + waveform = waveform.clone() + waveform[..., 1:] -= coeff * waveform[..., :-1] + return waveform + + +def deemphasis(waveform, coeff: float = 0.97) -> torch.Tensor: + r"""De-emphasizes a waveform along its last dimension. + Inverse of :meth:`preemphasis`. Concretely, for each signal + :math:`x` in ``waveform``, computes output :math:`y` as + + .. math:: + y[i] = x[i] + \text{coeff} \cdot y[i - 1] + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + waveform (torch.Tensor): Waveform, with shape `(..., N)`. + coeff (float, optional): De-emphasis coefficient. Typically between 0.0 and 1.0. + (Default: 0.97) + + Returns: + torch.Tensor: De-emphasized waveform, with shape `(..., N)`. + """ + a_coeffs = torch.tensor([1.0, -coeff], dtype=waveform.dtype, device=waveform.device) + b_coeffs = torch.tensor([1.0, 0.0], dtype=waveform.dtype, device=waveform.device) + return torchaudio.functional.lfilter(waveform, a_coeffs=a_coeffs, b_coeffs=b_coeffs) + + +def frechet_distance(mu_x, sigma_x, mu_y, sigma_y): + r"""Computes the Fréchet distance between two multivariate normal distributions :cite:`dowson1982frechet`. + + Concretely, for multivariate Gaussians :math:`X(\mu_X, \Sigma_X)` + and :math:`Y(\mu_Y, \Sigma_Y)`, the function computes and returns :math:`F` as + + .. math:: + F(X, Y) = || \mu_X - \mu_Y ||_2^2 + + \text{Tr}\left( \Sigma_X + \Sigma_Y - 2 \sqrt{\Sigma_X \Sigma_Y} \right) + + Args: + mu_x (torch.Tensor): mean :math:`\mu_X` of multivariate Gaussian :math:`X`, with shape `(N,)`. + sigma_x (torch.Tensor): covariance matrix :math:`\Sigma_X` of :math:`X`, with shape `(N, N)`. + mu_y (torch.Tensor): mean :math:`\mu_Y` of multivariate Gaussian :math:`Y`, with shape `(N,)`. + sigma_y (torch.Tensor): covariance matrix :math:`\Sigma_Y` of :math:`Y`, with shape `(N, N)`. + + Returns: + torch.Tensor: the Fréchet distance between :math:`X` and :math:`Y`. + """ + if len(mu_x.size()) != 1: + raise ValueError(f"Input mu_x must be one-dimensional; got dimension {len(mu_x.size())}.") + if len(sigma_x.size()) != 2: + raise ValueError(f"Input sigma_x must be two-dimensional; got dimension {len(sigma_x.size())}.") + if sigma_x.size(0) != sigma_x.size(1) != mu_x.size(0): + raise ValueError("Each of sigma_x's dimensions must match mu_x's size.") + if mu_x.size() != mu_y.size(): + raise ValueError(f"Inputs mu_x and mu_y must have the same shape; got {mu_x.size()} and {mu_y.size()}.") + if sigma_x.size() != sigma_y.size(): + raise ValueError( + f"Inputs sigma_x and sigma_y must have the same shape; got {sigma_x.size()} and {sigma_y.size()}." + ) + + a = (mu_x - mu_y).square().sum() + b = sigma_x.trace() + sigma_y.trace() + c = torch.linalg.eigvals(sigma_x @ sigma_y).sqrt().real.sum() + return a + b - 2 * c diff --git a/venv/lib/python3.10/site-packages/torchaudio/io/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5486424dd21a0f1aeb8ea2cead737309fb7f9a7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/io/__init__.py @@ -0,0 +1,13 @@ +from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter + +from ._effector import AudioEffector +from ._playback import play_audio + + +__all__ = [ + "AudioEffector", + "StreamReader", + "StreamWriter", + "CodecConfig", + "play_audio", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db89782d249cfd6f7b0d5de9d8b0b629276b6e6b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/_effector.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/_effector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df30129f6b8f16dc5f0a2952efce71d1cee0d1c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/_effector.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/_playback.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/_playback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de57a98e634e42b4b43ea09869236df92d7369d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/io/__pycache__/_playback.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/io/_effector.py b/venv/lib/python3.10/site-packages/torchaudio/io/_effector.py new file mode 100644 index 0000000000000000000000000000000000000000..74255684c8fa75789e88fc224bcdac12aa1b29cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/io/_effector.py @@ -0,0 +1,347 @@ +import io +from typing import Iterator, List, Optional + +import torch +from torch import Tensor + +from torio.io._streaming_media_decoder import _get_afilter_desc, StreamingMediaDecoder as StreamReader +from torio.io._streaming_media_encoder import CodecConfig, StreamingMediaEncoder as StreamWriter + + +class _StreamingIOBuffer: + """Streaming Bytes IO buffer. Data are dropped when read.""" + + def __init__(self): + self._buffer: List(bytes) = [] + + def write(self, b: bytes): + if b: + self._buffer.append(b) + return len(b) + + def pop(self, n): + """Pop the oldest byte string. It does not necessary return the requested amount""" + if not self._buffer: + return b"" + if len(self._buffer[0]) <= n: + return self._buffer.pop(0) + ret = self._buffer[0][:n] + self._buffer[0] = self._buffer[0][n:] + return ret + + +def _get_sample_fmt(dtype: torch.dtype): + types = { + torch.uint8: "u8", + torch.int16: "s16", + torch.int32: "s32", + torch.float32: "flt", + torch.float64: "dbl", + } + if dtype not in types: + raise ValueError(f"Unsupported dtype is provided {dtype}. Supported dtypes are: {types.keys()}") + return types[dtype] + + +class _AudioStreamingEncoder: + """Given a waveform, encode on-demand and return bytes""" + + def __init__( + self, + src: Tensor, + sample_rate: int, + effect: str, + muxer: str, + encoder: Optional[str], + codec_config: Optional[CodecConfig], + frames_per_chunk: int, + ): + self.src = src + self.buffer = _StreamingIOBuffer() + self.writer = StreamWriter(self.buffer, format=muxer) + self.writer.add_audio_stream( + num_channels=src.size(1), + sample_rate=sample_rate, + format=_get_sample_fmt(src.dtype), + encoder=encoder, + filter_desc=effect, + codec_config=codec_config, + ) + self.writer.open() + self.fpc = frames_per_chunk + + # index on the input tensor (along time-axis) + # we use -1 to indicate that we finished iterating the tensor and + # the writer is closed. + self.i_iter = 0 + + def read(self, n): + while not self.buffer._buffer and self.i_iter >= 0: + self.writer.write_audio_chunk(0, self.src[self.i_iter : self.i_iter + self.fpc]) + self.i_iter += self.fpc + if self.i_iter >= self.src.size(0): + self.writer.flush() + self.writer.close() + self.i_iter = -1 + return self.buffer.pop(n) + + +def _encode( + src: Tensor, + sample_rate: int, + effect: str, + muxer: str, + encoder: Optional[str], + codec_config: Optional[CodecConfig], +): + buffer = io.BytesIO() + writer = StreamWriter(buffer, format=muxer) + writer.add_audio_stream( + num_channels=src.size(1), + sample_rate=sample_rate, + format=_get_sample_fmt(src.dtype), + encoder=encoder, + filter_desc=effect, + codec_config=codec_config, + ) + with writer.open(): + writer.write_audio_chunk(0, src) + buffer.seek(0) + return buffer + + +def _get_muxer(dtype: torch.dtype): + # TODO: check if this works in Windows. + types = { + torch.uint8: "u8", + torch.int16: "s16le", + torch.int32: "s32le", + torch.float32: "f32le", + torch.float64: "f64le", + } + if dtype not in types: + raise ValueError(f"Unsupported dtype is provided {dtype}. Supported dtypes are: {types.keys()}") + return types[dtype] + + +class AudioEffector: + """Apply various filters and/or codecs to waveforms. + + .. versionadded:: 2.1 + + Args: + effect (str or None, optional): Filter expressions or ``None`` to apply no filter. + See https://ffmpeg.org/ffmpeg-filters.html#Audio-Filters for the + details of filter syntax. + + format (str or None, optional): When provided, encode the audio into the + corresponding format. Default: ``None``. + + encoder (str or None, optional): When provided, override the encoder used + by the ``format``. Default: ``None``. + + codec_config (CodecConfig or None, optional): When provided, configure the encoding codec. + Should be provided in conjunction with ``format`` option. + + pad_end (bool, optional): When enabled, and if the waveform becomes shorter after applying + effects/codec, then pad the end with silence. + + Example - Basic usage + To use ``AudioEffector``, first instantiate it with a set of + ``effect`` and ``format``. + + >>> # instantiate the effector + >>> effector = AudioEffector(effect=..., format=...) + + Then, use :py:meth:`~AudioEffector.apply` or :py:meth:`~AudioEffector.stream` + method to apply them. + + >>> # Apply the effect to the whole waveform + >>> applied = effector.apply(waveform, sample_rate) + + >>> # Apply the effect chunk-by-chunk + >>> for chunk in effector.stream(waveform, sample_rate): + >>> ... + + Example - Applying effects + Please refer to + https://ffmpeg.org/ffmpeg-filters.html#Filtergraph-description + for the overview of filter description, and + https://ffmpeg.org/ffmpeg-filters.html#toc-Audio-Filters + for the list of available filters. + + Tempo - https://ffmpeg.org/ffmpeg-filters.html#atempo + + >>> AudioEffector(effect="atempo=1.5") + + Echo - https://ffmpeg.org/ffmpeg-filters.html#aecho + + >>> AudioEffector(effect="aecho=0.8:0.88:60:0.4") + + Flanger - https://ffmpeg.org/ffmpeg-filters.html#flanger + + >>> AudioEffector(effect="aflanger") + + Vibrato - https://ffmpeg.org/ffmpeg-filters.html#vibrato + + >>> AudioEffector(effect="vibrato") + + Tremolo - https://ffmpeg.org/ffmpeg-filters.html#tremolo + + >>> AudioEffector(effect="vibrato") + + You can also apply multiple effects at once. + + >>> AudioEffector(effect="") + + Example - Applying codec + One can apply codec using ``format`` argument. ``format`` can be + audio format or container format. If the container format supports + multiple encoders, you can specify it with ``encoder`` argument. + + Wav format + (no compression is applied but samples are converted to + 16-bit signed integer) + + >>> AudioEffector(format="wav") + + Ogg format with default encoder + + >>> AudioEffector(format="ogg") + + Ogg format with vorbis + + >>> AudioEffector(format="ogg", encoder="vorbis") + + Ogg format with opus + + >>> AudioEffector(format="ogg", encoder="opus") + + Webm format with opus + + >>> AudioEffector(format="webm", encoder="opus") + + Example - Applying codec with configuration + Reference: https://trac.ffmpeg.org/wiki/Encode/MP3 + + MP3 with default config + + >>> AudioEffector(format="mp3") + + MP3 with variable bitrate + + >>> AudioEffector(format="mp3", codec_config=CodecConfig(qscale=5)) + + MP3 with constant bitrate + + >>> AudioEffector(format="mp3", codec_config=CodecConfig(bit_rate=32_000)) + """ + + def __init__( + self, + effect: Optional[str] = None, + format: Optional[str] = None, + *, + encoder: Optional[str] = None, + codec_config: Optional[CodecConfig] = None, + pad_end: bool = True, + ): + if format is None: + if encoder is not None or codec_config is not None: + raise ValueError("`encoder` and/or `condec_config` opions are provided without `format` option.") + self.effect = effect + self.format = format + self.encoder = encoder + self.codec_config = codec_config + self.pad_end = pad_end + + def _get_reader(self, waveform, sample_rate, output_sample_rate, frames_per_chunk=None): + num_frames, num_channels = waveform.shape + + if self.format is not None: + muxer = self.format + encoder = self.encoder + option = {} + # Some formats are headerless, so need to provide these infomation. + if self.format == "mulaw": + option = {"sample_rate": f"{sample_rate}", "channels": f"{num_channels}"} + + else: # PCM + muxer = _get_muxer(waveform.dtype) + encoder = None + option = {"sample_rate": f"{sample_rate}", "channels": f"{num_channels}"} + + if frames_per_chunk is None: + src = _encode(waveform, sample_rate, self.effect, muxer, encoder, self.codec_config) + else: + src = _AudioStreamingEncoder( + waveform, sample_rate, self.effect, muxer, encoder, self.codec_config, frames_per_chunk + ) + + output_sr = sample_rate if output_sample_rate is None else output_sample_rate + filter_desc = _get_afilter_desc(output_sr, _get_sample_fmt(waveform.dtype), num_channels) + if self.pad_end: + filter_desc = f"{filter_desc},apad=whole_len={num_frames}" + + reader = StreamReader(src, format=muxer, option=option) + reader.add_audio_stream(frames_per_chunk or -1, -1, filter_desc=filter_desc) + return reader + + def apply(self, waveform: Tensor, sample_rate: int, output_sample_rate: Optional[int] = None) -> Tensor: + """Apply the effect and/or codecs to the whole tensor. + + Args: + waveform (Tensor): The input waveform. Shape: ``(time, channel)`` + sample_rate (int): Sample rate of the input waveform. + output_sample_rate (int or None, optional): Output sample rate. + If provided, override the output sample rate. + Otherwise, the resulting tensor is resampled to have + the same sample rate as the input. + Default: ``None``. + + Returns: + Tensor: + Resulting Tensor. Shape: ``(time, channel)``. The number of frames + could be different from that of the input. + """ + if waveform.ndim != 2: + raise ValueError(f"Expected the input waveform to be 2D. Found: {waveform.ndim}") + + if waveform.numel() == 0: + return waveform + + reader = self._get_reader(waveform, sample_rate, output_sample_rate) + reader.process_all_packets() + (applied,) = reader.pop_chunks() + return Tensor(applied) + + def stream( + self, waveform: Tensor, sample_rate: int, frames_per_chunk: int, output_sample_rate: Optional[int] = None + ) -> Iterator[Tensor]: + """Apply the effect and/or codecs to the given tensor chunk by chunk. + + Args: + waveform (Tensor): The input waveform. Shape: ``(time, channel)`` + sample_rate (int): Sample rate of the waveform. + frames_per_chunk (int): The number of frames to return at a time. + output_sample_rate (int or None, optional): Output sample rate. + If provided, override the output sample rate. + Otherwise, the resulting tensor is resampled to have + the same sample rate as the input. + Default: ``None``. + + Returns: + Iterator[Tensor]: + Series of processed chunks. Shape: ``(time, channel)``, where the + the number of frames matches ``frames_per_chunk`` except the + last chunk, which could be shorter. + """ + if waveform.ndim != 2: + raise ValueError(f"Expected the input waveform to be 2D. Found: {waveform.ndim}") + + if waveform.numel() == 0: + return waveform + + reader = self._get_reader(waveform, sample_rate, output_sample_rate, frames_per_chunk) + for (applied,) in reader.stream(): + yield Tensor(applied) diff --git a/venv/lib/python3.10/site-packages/torchaudio/io/_playback.py b/venv/lib/python3.10/site-packages/torchaudio/io/_playback.py new file mode 100644 index 0000000000000000000000000000000000000000..7183ee3ba8cad8e842d066f6aaa9067687b9476b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/io/_playback.py @@ -0,0 +1,72 @@ +import warnings +from sys import platform +from typing import Optional + +import torch +import torchaudio + +dict_format = { + torch.uint8: "u8", + torch.int16: "s16", + torch.int32: "s32", + torch.int64: "s64", + torch.float32: "flt", + torch.float64: "dbl", +} + + +def play_audio( + waveform: torch.Tensor, + sample_rate: Optional[float], + device: Optional[str] = None, +) -> None: + """Plays audio through specified or available output device. + + .. warning:: + This function is currently only supported on MacOS, and requires + libavdevice (FFmpeg) with ``audiotoolbox`` output device. + + .. note:: + This function can play up to two audio channels. + + Args: + waveform: Tensor containing the audio to play. + Expected shape: `(time, num_channels)`. + sample_rate: Sample rate of the audio to play. + device: Output device to use. If None, the default device is used. + """ + + if platform == "darwin": + device = device or "audiotoolbox" + path = "-" + else: + raise ValueError(f"This function only supports MacOS, but current OS is {platform}") + + available_devices = list(torchaudio.utils.ffmpeg_utils.get_output_devices().keys()) + if device not in available_devices: + raise ValueError(f"Device {device} is not available. Available devices are: {available_devices}") + + if waveform.dtype not in dict_format: + raise ValueError(f"Unsupported type {waveform.dtype}. The list of supported types is: {dict_format.keys()}") + format = dict_format[waveform.dtype] + + if waveform.ndim != 2: + raise ValueError(f"Expected 2D tensor with shape `(time, num_channels)`, got {waveform.ndim}D tensor instead") + + time, num_channels = waveform.size() + if num_channels > 2: + warnings.warn( + f"Expected up to 2 channels, got {num_channels} channels instead. " + "Only the first 2 channels will be played.", + stacklevel=2, + ) + + # Write to speaker device + s = torchaudio.io.StreamWriter(dst=path, format=device) + s.add_audio_stream(sample_rate, num_channels, format=format) + + # write audio to the device + block_size = 256 + with s.open(): + for i in range(0, time, block_size): + s.write_audio_chunk(0, waveform[i : i + block_size, :]) diff --git a/venv/lib/python3.10/site-packages/torchaudio/kaldi_io.py b/venv/lib/python3.10/site-packages/torchaudio/kaldi_io.py new file mode 100644 index 0000000000000000000000000000000000000000..4d372429dcfd23fdcbe8cd0f38abef1086d8a5eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/kaldi_io.py @@ -0,0 +1,144 @@ +# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python) +# needs to be installed. This is a light wrapper around kaldi_io that returns +# torch.Tensors. +from typing import Any, Callable, Iterable, Tuple + +import torch +from torch import Tensor +from torchaudio._internal import module_utils as _mod_utils + +if _mod_utils.is_module_available("numpy"): + import numpy as np + + +__all__ = [ + "read_vec_int_ark", + "read_vec_flt_scp", + "read_vec_flt_ark", + "read_mat_scp", + "read_mat_ark", +] + + +def _convert_method_output_to_tensor( + file_or_fd: Any, fn: Callable, convert_contiguous: bool = False +) -> Iterable[Tuple[str, Tensor]]: + r"""Takes a method invokes it. The output is converted to a tensor. + + Args: + file_or_fd (str/FileDescriptor): File name or file descriptor + fn (Callable): Function that has the signature (file name/descriptor) and converts it to + Iterable[Tuple[str, Tensor]]. + convert_contiguous (bool, optional): Determines whether the array should be converted into a + contiguous layout. (Default: ``False``) + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat + """ + for key, np_arr in fn(file_or_fd): + if convert_contiguous: + np_arr = np.ascontiguousarray(np_arr) + yield key, torch.from_numpy(np_arr) + + +@_mod_utils.requires_module("kaldi_io", "numpy") +def read_vec_int_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,vector) tuples, which reads from the ark file/stream. + + Args: + file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file + + Example + >>> # read ark to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) } + """ + + import kaldi_io + + # Requires convert_contiguous to be True because elements from int32 vector are + # sorted in tuples: (sizeof(int32), value) so strides are (5,) instead of (4,) which will throw an error + # in from_numpy as it expects strides to be a multiple of 4 (int32). + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_int_ark, convert_contiguous=True) + + +@_mod_utils.requires_module("kaldi_io", "numpy") +def read_vec_flt_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,vector) tuples, read according to Kaldi scp. + + Args: + file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file + + Example + >>> # read scp to a 'dictionary' + >>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) } + """ + + import kaldi_io + + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_scp) + + +@_mod_utils.requires_module("kaldi_io", "numpy") +def read_vec_flt_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,vector) tuples, which reads from the ark file/stream. + + Args: + file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file + + Example + >>> # read ark to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) } + """ + + import kaldi_io + + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_ark) + + +@_mod_utils.requires_module("kaldi_io", "numpy") +def read_mat_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,matrix) tuples, read according to Kaldi scp. + + Args: + file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file + + Example + >>> # read scp to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) } + """ + + import kaldi_io + + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_scp) + + +@_mod_utils.requires_module("kaldi_io", "numpy") +def read_mat_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]: + r"""Create generator of (key,matrix) tuples, which reads from the ark file/stream. + + Args: + file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor + + Returns: + Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file + + Example + >>> # read ark to a 'dictionary' + >>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) } + """ + + import kaldi_io + + return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_ark) diff --git a/venv/lib/python3.10/site-packages/torchaudio/lib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ba0a8988fcc992f62c2136964e521e01be66921 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5d344400d3b8771d2c1b93ba48def361615a132f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/__init__.py @@ -0,0 +1,85 @@ +from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium +from .conformer import Conformer +from .conv_tasnet import conv_tasnet_base, ConvTasNet +from .deepspeech import DeepSpeech +from .emformer import Emformer +from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT +from .rnnt_decoder import Hypothesis, RNNTBeamSearch +from .squim import ( + squim_objective_base, + squim_objective_model, + squim_subjective_base, + squim_subjective_model, + SquimObjective, + SquimSubjective, +) +from .tacotron2 import Tacotron2 +from .wav2letter import Wav2Letter +from .wav2vec2 import ( + hubert_base, + hubert_large, + hubert_pretrain_base, + hubert_pretrain_large, + hubert_pretrain_model, + hubert_pretrain_xlarge, + hubert_xlarge, + HuBERTPretrainModel, + wav2vec2_base, + wav2vec2_large, + wav2vec2_large_lv60k, + wav2vec2_model, + wav2vec2_xlsr_1b, + wav2vec2_xlsr_2b, + wav2vec2_xlsr_300m, + Wav2Vec2Model, + wavlm_base, + wavlm_large, + wavlm_model, +) +from .wavernn import WaveRNN + + +__all__ = [ + "Wav2Letter", + "WaveRNN", + "ConvTasNet", + "conv_tasnet_base", + "DeepSpeech", + "Wav2Vec2Model", + "HuBERTPretrainModel", + "wavlm_model", + "wavlm_base", + "wavlm_large", + "wav2vec2_model", + "wav2vec2_base", + "wav2vec2_large", + "wav2vec2_large_lv60k", + "hubert_base", + "hubert_large", + "hubert_xlarge", + "hubert_pretrain_model", + "hubert_pretrain_base", + "hubert_pretrain_large", + "hubert_pretrain_xlarge", + "wav2vec2_xlsr_300m", + "wav2vec2_xlsr_1b", + "wav2vec2_xlsr_2b", + "Tacotron2", + "Conformer", + "Emformer", + "Hypothesis", + "RNNT", + "RNNTBeamSearch", + "emformer_rnnt_base", + "emformer_rnnt_model", + "HDemucs", + "hdemucs_low", + "hdemucs_medium", + "hdemucs_high", + "squim_objective_base", + "squim_objective_model", + "squim_subjective_base", + "squim_subjective_model", + "SquimObjective", + "SquimSubjective", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/__pycache__/conformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/__pycache__/conformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..705fa44017265d254a38a2de1f14e3320d82ea46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/__pycache__/conformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/_hdemucs.py b/venv/lib/python3.10/site-packages/torchaudio/models/_hdemucs.py new file mode 100644 index 0000000000000000000000000000000000000000..74a3ebd1d609e67edd09f4356a8cefa305c1fc49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/_hdemucs.py @@ -0,0 +1,1008 @@ +# ***************************************************************************** +# MIT License +# +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# ***************************************************************************** + + +import math +import typing as tp +from typing import Any, Dict, List, Optional + +import torch +from torch import nn +from torch.nn import functional as F + + +class _ScaledEmbedding(torch.nn.Module): + r"""Make continuous embeddings and boost learning rate + + Args: + num_embeddings (int): number of embeddings + embedding_dim (int): embedding dimensions + scale (float, optional): amount to scale learning rate (Default: 10.0) + smooth (bool, optional): choose to apply smoothing (Default: ``False``) + """ + + def __init__(self, num_embeddings: int, embedding_dim: int, scale: float = 10.0, smooth: bool = False): + super().__init__() + self.embedding = nn.Embedding(num_embeddings, embedding_dim) + if smooth: + weight = torch.cumsum(self.embedding.weight.data, dim=0) + # when summing gaussian, scale raises as sqrt(n), so we normalize by that. + weight = weight / torch.arange(1, num_embeddings + 1).sqrt()[:, None] + self.embedding.weight.data[:] = weight + self.embedding.weight.data /= scale + self.scale = scale + + @property + def weight(self) -> torch.Tensor: + return self.embedding.weight * self.scale + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r"""Forward pass for embedding with scale. + Args: + x (torch.Tensor): input tensor of shape `(num_embeddings)` + + Returns: + (Tensor): + Embedding output of shape `(num_embeddings, embedding_dim)` + """ + out = self.embedding(x) * self.scale + return out + + +class _HEncLayer(torch.nn.Module): + + r"""Encoder layer. This used both by the time and the frequency branch. + Args: + chin (int): number of input channels. + chout (int): number of output channels. + kernel_size (int, optional): Kernel size for encoder (Default: 8) + stride (int, optional): Stride for encoder layer (Default: 4) + norm_groups (int, optional): number of groups for group norm. (Default: 4) + empty (bool, optional): used to make a layer with just the first conv. this is used + before merging the time and freq. branches. (Default: ``False``) + freq (bool, optional): boolean for whether conv layer is for frequency domain (Default: ``True``) + norm_type (string, optional): Norm type, either ``group_norm `` or ``none`` (Default: ``group_norm``) + context (int, optional): context size for the 1x1 conv. (Default: 0) + dconv_kw (Dict[str, Any] or None, optional): dictionary of kwargs for the DConv class. (Default: ``None``) + pad (bool, optional): true to pad the input. Padding is done so that the output size is + always the input size / stride. (Default: ``True``) + """ + + def __init__( + self, + chin: int, + chout: int, + kernel_size: int = 8, + stride: int = 4, + norm_groups: int = 4, + empty: bool = False, + freq: bool = True, + norm_type: str = "group_norm", + context: int = 0, + dconv_kw: Optional[Dict[str, Any]] = None, + pad: bool = True, + ): + super().__init__() + if dconv_kw is None: + dconv_kw = {} + norm_fn = lambda d: nn.Identity() # noqa + if norm_type == "group_norm": + norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa + pad_val = kernel_size // 4 if pad else 0 + klass = nn.Conv1d + self.freq = freq + self.kernel_size = kernel_size + self.stride = stride + self.empty = empty + self.pad = pad_val + if freq: + kernel_size = [kernel_size, 1] + stride = [stride, 1] + pad_val = [pad_val, 0] + klass = nn.Conv2d + self.conv = klass(chin, chout, kernel_size, stride, pad_val) + self.norm1 = norm_fn(chout) + + if self.empty: + self.rewrite = nn.Identity() + self.norm2 = nn.Identity() + self.dconv = nn.Identity() + else: + self.rewrite = klass(chout, 2 * chout, 1 + 2 * context, 1, context) + self.norm2 = norm_fn(2 * chout) + self.dconv = _DConv(chout, **dconv_kw) + + def forward(self, x: torch.Tensor, inject: Optional[torch.Tensor] = None) -> torch.Tensor: + r"""Forward pass for encoding layer. + + Size depends on whether frequency or time + + Args: + x (torch.Tensor): tensor input of shape `(B, C, F, T)` for frequency and shape + `(B, C, T)` for time + inject (torch.Tensor, optional): on last layer, combine frequency and time branches through inject param, + same shape as x (default: ``None``) + + Returns: + Tensor + output tensor after encoder layer of shape `(B, C, F / stride, T)` for frequency + and shape `(B, C, ceil(T / stride))` for time + """ + + if not self.freq and x.dim() == 4: + B, C, Fr, T = x.shape + x = x.view(B, -1, T) + + if not self.freq: + le = x.shape[-1] + if not le % self.stride == 0: + x = F.pad(x, (0, self.stride - (le % self.stride))) + y = self.conv(x) + if self.empty: + return y + if inject is not None: + if inject.shape[-1] != y.shape[-1]: + raise ValueError("Injection shapes do not align") + if inject.dim() == 3 and y.dim() == 4: + inject = inject[:, :, None] + y = y + inject + y = F.gelu(self.norm1(y)) + if self.freq: + B, C, Fr, T = y.shape + y = y.permute(0, 2, 1, 3).reshape(-1, C, T) + y = self.dconv(y) + y = y.view(B, Fr, C, T).permute(0, 2, 1, 3) + else: + y = self.dconv(y) + z = self.norm2(self.rewrite(y)) + z = F.glu(z, dim=1) + return z + + +class _HDecLayer(torch.nn.Module): + r"""Decoder layer. This used both by the time and the frequency branches. + Args: + chin (int): number of input channels. + chout (int): number of output channels. + last (bool, optional): whether current layer is final layer (Default: ``False``) + kernel_size (int, optional): Kernel size for encoder (Default: 8) + stride (int): Stride for encoder layer (Default: 4) + norm_groups (int, optional): number of groups for group norm. (Default: 1) + empty (bool, optional): used to make a layer with just the first conv. this is used + before merging the time and freq. branches. (Default: ``False``) + freq (bool, optional): boolean for whether conv layer is for frequency (Default: ``True``) + norm_type (str, optional): Norm type, either ``group_norm `` or ``none`` (Default: ``group_norm``) + context (int, optional): context size for the 1x1 conv. (Default: 1) + dconv_kw (Dict[str, Any] or None, optional): dictionary of kwargs for the DConv class. (Default: ``None``) + pad (bool, optional): true to pad the input. Padding is done so that the output size is + always the input size / stride. (Default: ``True``) + """ + + def __init__( + self, + chin: int, + chout: int, + last: bool = False, + kernel_size: int = 8, + stride: int = 4, + norm_groups: int = 1, + empty: bool = False, + freq: bool = True, + norm_type: str = "group_norm", + context: int = 1, + dconv_kw: Optional[Dict[str, Any]] = None, + pad: bool = True, + ): + super().__init__() + if dconv_kw is None: + dconv_kw = {} + norm_fn = lambda d: nn.Identity() # noqa + if norm_type == "group_norm": + norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa + if pad: + if (kernel_size - stride) % 2 != 0: + raise ValueError("Kernel size and stride do not align") + pad = (kernel_size - stride) // 2 + else: + pad = 0 + self.pad = pad + self.last = last + self.freq = freq + self.chin = chin + self.empty = empty + self.stride = stride + self.kernel_size = kernel_size + klass = nn.Conv1d + klass_tr = nn.ConvTranspose1d + if freq: + kernel_size = [kernel_size, 1] + stride = [stride, 1] + klass = nn.Conv2d + klass_tr = nn.ConvTranspose2d + self.conv_tr = klass_tr(chin, chout, kernel_size, stride) + self.norm2 = norm_fn(chout) + if self.empty: + self.rewrite = nn.Identity() + self.norm1 = nn.Identity() + else: + self.rewrite = klass(chin, 2 * chin, 1 + 2 * context, 1, context) + self.norm1 = norm_fn(2 * chin) + + def forward(self, x: torch.Tensor, skip: Optional[torch.Tensor], length): + r"""Forward pass for decoding layer. + + Size depends on whether frequency or time + + Args: + x (torch.Tensor): tensor input of shape `(B, C, F, T)` for frequency and shape + `(B, C, T)` for time + skip (torch.Tensor, optional): on first layer, separate frequency and time branches using param + (default: ``None``) + length (int): Size of tensor for output + + Returns: + (Tensor, Tensor): + Tensor + output tensor after decoder layer of shape `(B, C, F * stride, T)` for frequency domain except last + frequency layer shape is `(B, C, kernel_size, T)`. Shape is `(B, C, stride * T)` + for time domain. + Tensor + contains the output just before final transposed convolution, which is used when the + freq. and time branch separate. Otherwise, does not matter. Shape is + `(B, C, F, T)` for frequency and `(B, C, T)` for time. + """ + if self.freq and x.dim() == 3: + B, C, T = x.shape + x = x.view(B, self.chin, -1, T) + + if not self.empty: + x = x + skip + y = F.glu(self.norm1(self.rewrite(x)), dim=1) + else: + y = x + if skip is not None: + raise ValueError("Skip must be none when empty is true.") + + z = self.norm2(self.conv_tr(y)) + if self.freq: + if self.pad: + z = z[..., self.pad : -self.pad, :] + else: + z = z[..., self.pad : self.pad + length] + if z.shape[-1] != length: + raise ValueError("Last index of z must be equal to length") + if not self.last: + z = F.gelu(z) + + return z, y + + +class HDemucs(torch.nn.Module): + r"""Hybrid Demucs model from + *Hybrid Spectrogram and Waveform Source Separation* :cite:`defossez2021hybrid`. + + See Also: + * :class:`torchaudio.pipelines.SourceSeparationBundle`: Source separation pipeline with pre-trained models. + + Args: + sources (List[str]): list of source names. List can contain the following source + options: [``"bass"``, ``"drums"``, ``"other"``, ``"mixture"``, ``"vocals"``]. + audio_channels (int, optional): input/output audio channels. (Default: 2) + channels (int, optional): initial number of hidden channels. (Default: 48) + growth (int, optional): increase the number of hidden channels by this factor at each layer. (Default: 2) + nfft (int, optional): number of fft bins. Note that changing this requires careful computation of + various shape parameters and will not work out of the box for hybrid models. (Default: 4096) + depth (int, optional): number of layers in encoder and decoder (Default: 6) + freq_emb (float, optional): add frequency embedding after the first frequency layer if > 0, + the actual value controls the weight of the embedding. (Default: 0.2) + emb_scale (int, optional): equivalent to scaling the embedding learning rate (Default: 10) + emb_smooth (bool, optional): initialize the embedding with a smooth one (with respect to frequencies). + (Default: ``True``) + kernel_size (int, optional): kernel_size for encoder and decoder layers. (Default: 8) + time_stride (int, optional): stride for the final time layer, after the merge. (Default: 2) + stride (int, optional): stride for encoder and decoder layers. (Default: 4) + context (int, optional): context for 1x1 conv in the decoder. (Default: 4) + context_enc (int, optional): context for 1x1 conv in the encoder. (Default: 0) + norm_starts (int, optional): layer at which group norm starts being used. + decoder layers are numbered in reverse order. (Default: 4) + norm_groups (int, optional): number of groups for group norm. (Default: 4) + dconv_depth (int, optional): depth of residual DConv branch. (Default: 2) + dconv_comp (int, optional): compression of DConv branch. (Default: 4) + dconv_attn (int, optional): adds attention layers in DConv branch starting at this layer. (Default: 4) + dconv_lstm (int, optional): adds a LSTM layer in DConv branch starting at this layer. (Default: 4) + dconv_init (float, optional): initial scale for the DConv branch LayerScale. (Default: 1e-4) + """ + + def __init__( + self, + sources: List[str], + audio_channels: int = 2, + channels: int = 48, + growth: int = 2, + nfft: int = 4096, + depth: int = 6, + freq_emb: float = 0.2, + emb_scale: int = 10, + emb_smooth: bool = True, + kernel_size: int = 8, + time_stride: int = 2, + stride: int = 4, + context: int = 1, + context_enc: int = 0, + norm_starts: int = 4, + norm_groups: int = 4, + dconv_depth: int = 2, + dconv_comp: int = 4, + dconv_attn: int = 4, + dconv_lstm: int = 4, + dconv_init: float = 1e-4, + ): + super().__init__() + self.depth = depth + self.nfft = nfft + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.channels = channels + + self.hop_length = self.nfft // 4 + self.freq_emb = None + + self.freq_encoder = nn.ModuleList() + self.freq_decoder = nn.ModuleList() + + self.time_encoder = nn.ModuleList() + self.time_decoder = nn.ModuleList() + + chin = audio_channels + chin_z = chin * 2 # number of channels for the freq branch + chout = channels + chout_z = channels + freqs = self.nfft // 2 + + for index in range(self.depth): + lstm = index >= dconv_lstm + attn = index >= dconv_attn + norm_type = "group_norm" if index >= norm_starts else "none" + freq = freqs > 1 + stri = stride + ker = kernel_size + if not freq: + if freqs != 1: + raise ValueError("When freq is false, freqs must be 1.") + ker = time_stride * 2 + stri = time_stride + + pad = True + last_freq = False + if freq and freqs <= kernel_size: + ker = freqs + pad = False + last_freq = True + + kw = { + "kernel_size": ker, + "stride": stri, + "freq": freq, + "pad": pad, + "norm_type": norm_type, + "norm_groups": norm_groups, + "dconv_kw": { + "lstm": lstm, + "attn": attn, + "depth": dconv_depth, + "compress": dconv_comp, + "init": dconv_init, + }, + } + kwt = dict(kw) + kwt["freq"] = 0 + kwt["kernel_size"] = kernel_size + kwt["stride"] = stride + kwt["pad"] = True + kw_dec = dict(kw) + + if last_freq: + chout_z = max(chout, chout_z) + chout = chout_z + + enc = _HEncLayer(chin_z, chout_z, context=context_enc, **kw) + if freq: + if last_freq is True and nfft == 2048: + kwt["stride"] = 2 + kwt["kernel_size"] = 4 + tenc = _HEncLayer(chin, chout, context=context_enc, empty=last_freq, **kwt) + self.time_encoder.append(tenc) + + self.freq_encoder.append(enc) + if index == 0: + chin = self.audio_channels * len(self.sources) + chin_z = chin * 2 + dec = _HDecLayer(chout_z, chin_z, last=index == 0, context=context, **kw_dec) + if freq: + tdec = _HDecLayer(chout, chin, empty=last_freq, last=index == 0, context=context, **kwt) + self.time_decoder.insert(0, tdec) + self.freq_decoder.insert(0, dec) + + chin = chout + chin_z = chout_z + chout = int(growth * chout) + chout_z = int(growth * chout_z) + if freq: + if freqs <= kernel_size: + freqs = 1 + else: + freqs //= stride + if index == 0 and freq_emb: + self.freq_emb = _ScaledEmbedding(freqs, chin_z, smooth=emb_smooth, scale=emb_scale) + self.freq_emb_scale = freq_emb + + _rescale_module(self) + + def _spec(self, x): + hl = self.hop_length + nfft = self.nfft + x0 = x # noqa + + # We re-pad the signal in order to keep the property + # that the size of the output is exactly the size of the input + # divided by the stride (here hop_length), when divisible. + # This is achieved by padding by 1/4th of the kernel size (here nfft). + # which is not supported by torch.stft. + # Having all convolution operations follow this convention allow to easily + # align the time and frequency branches later on. + if hl != nfft // 4: + raise ValueError("Hop length must be nfft // 4") + le = int(math.ceil(x.shape[-1] / hl)) + pad = hl // 2 * 3 + x = self._pad1d(x, pad, pad + le * hl - x.shape[-1], mode="reflect") + + z = _spectro(x, nfft, hl)[..., :-1, :] + if z.shape[-1] != le + 4: + raise ValueError("Spectrogram's last dimension must be 4 + input size divided by stride") + z = z[..., 2 : 2 + le] + return z + + def _ispec(self, z, length=None): + hl = self.hop_length + z = F.pad(z, [0, 0, 0, 1]) + z = F.pad(z, [2, 2]) + pad = hl // 2 * 3 + le = hl * int(math.ceil(length / hl)) + 2 * pad + x = _ispectro(z, hl, length=le) + x = x[..., pad : pad + length] + return x + + def _pad1d(self, x: torch.Tensor, padding_left: int, padding_right: int, mode: str = "zero", value: float = 0.0): + """Wrapper around F.pad, in order for reflect padding when num_frames is shorter than max_pad. + Add extra zero padding around in order for padding to not break.""" + length = x.shape[-1] + if mode == "reflect": + max_pad = max(padding_left, padding_right) + if length <= max_pad: + x = F.pad(x, (0, max_pad - length + 1)) + return F.pad(x, (padding_left, padding_right), mode, value) + + def _magnitude(self, z): + # move the complex dimension to the channel one. + B, C, Fr, T = z.shape + m = torch.view_as_real(z).permute(0, 1, 4, 2, 3) + m = m.reshape(B, C * 2, Fr, T) + return m + + def _mask(self, m): + # `m` is a full spectrogram and `z` is ignored. + B, S, C, Fr, T = m.shape + out = m.view(B, S, -1, 2, Fr, T).permute(0, 1, 2, 4, 5, 3) + out = torch.view_as_complex(out.contiguous()) + return out + + def forward(self, input: torch.Tensor): + + r"""HDemucs forward call + + Args: + input (torch.Tensor): input mixed tensor of shape `(batch_size, channel, num_frames)` + + Returns: + Tensor + output tensor split into sources of shape `(batch_size, num_sources, channel, num_frames)` + """ + + if input.ndim != 3: + raise ValueError(f"Expected 3D tensor with dimensions (batch, channel, frames). Found: {input.shape}") + + if input.shape[1] != self.audio_channels: + raise ValueError( + f"The channel dimension of input Tensor must match `audio_channels` of HDemucs model. " + f"Found:{input.shape[1]}." + ) + + x = input + length = x.shape[-1] + + z = self._spec(input) + mag = self._magnitude(z) + x = mag + + B, C, Fq, T = x.shape + + # unlike previous Demucs, we always normalize because it is easier. + mean = x.mean(dim=(1, 2, 3), keepdim=True) + std = x.std(dim=(1, 2, 3), keepdim=True) + x = (x - mean) / (1e-5 + std) + # x will be the freq. branch input. + + # Prepare the time branch input. + xt = input + meant = xt.mean(dim=(1, 2), keepdim=True) + stdt = xt.std(dim=(1, 2), keepdim=True) + xt = (xt - meant) / (1e-5 + stdt) + + saved = [] # skip connections, freq. + saved_t = [] # skip connections, time. + lengths: List[int] = [] # saved lengths to properly remove padding, freq branch. + lengths_t: List[int] = [] # saved lengths for time branch. + + for idx, encode in enumerate(self.freq_encoder): + lengths.append(x.shape[-1]) + inject = None + if idx < len(self.time_encoder): + # we have not yet merged branches. + lengths_t.append(xt.shape[-1]) + tenc = self.time_encoder[idx] + xt = tenc(xt) + if not tenc.empty: + # save for skip connection + saved_t.append(xt) + else: + # tenc contains just the first conv., so that now time and freq. + # branches have the same shape and can be merged. + inject = xt + x = encode(x, inject) + if idx == 0 and self.freq_emb is not None: + # add frequency embedding to allow for non equivariant convolutions + # over the frequency axis. + frs = torch.arange(x.shape[-2], device=x.device) + emb = self.freq_emb(frs).t()[None, :, :, None].expand_as(x) + x = x + self.freq_emb_scale * emb + + saved.append(x) + + x = torch.zeros_like(x) + xt = torch.zeros_like(x) + # initialize everything to zero (signal will go through u-net skips). + + for idx, decode in enumerate(self.freq_decoder): + skip = saved.pop(-1) + x, pre = decode(x, skip, lengths.pop(-1)) + # `pre` contains the output just before final transposed convolution, + # which is used when the freq. and time branch separate. + offset = self.depth - len(self.time_decoder) + if idx >= offset: + tdec = self.time_decoder[idx - offset] + length_t = lengths_t.pop(-1) + if tdec.empty: + if pre.shape[2] != 1: + raise ValueError(f"If tdec empty is True, pre shape does not match {pre.shape}") + pre = pre[:, :, 0] + xt, _ = tdec(pre, None, length_t) + else: + skip = saved_t.pop(-1) + xt, _ = tdec(xt, skip, length_t) + + if len(saved) != 0: + raise AssertionError("saved is not empty") + if len(lengths_t) != 0: + raise AssertionError("lengths_t is not empty") + if len(saved_t) != 0: + raise AssertionError("saved_t is not empty") + + S = len(self.sources) + x = x.view(B, S, -1, Fq, T) + x = x * std[:, None] + mean[:, None] + + zout = self._mask(x) + x = self._ispec(zout, length) + + xt = xt.view(B, S, -1, length) + xt = xt * stdt[:, None] + meant[:, None] + x = xt + x + return x + + +class _DConv(torch.nn.Module): + r""" + New residual branches in each encoder layer. + This alternates dilated convolutions, potentially with LSTMs and attention. + Also before entering each residual branch, dimension is projected on a smaller subspace, + e.g. of dim `channels // compress`. + + Args: + channels (int): input/output channels for residual branch. + compress (float, optional): amount of channel compression inside the branch. (default: 4) + depth (int, optional): number of layers in the residual branch. Each layer has its own + projection, and potentially LSTM and attention.(default: 2) + init (float, optional): initial scale for LayerNorm. (default: 1e-4) + norm_type (bool, optional): Norm type, either ``group_norm `` or ``none`` (Default: ``group_norm``) + attn (bool, optional): use LocalAttention. (Default: ``False``) + heads (int, optional): number of heads for the LocalAttention. (default: 4) + ndecay (int, optional): number of decay controls in the LocalAttention. (default: 4) + lstm (bool, optional): use LSTM. (Default: ``False``) + kernel_size (int, optional): kernel size for the (dilated) convolutions. (default: 3) + """ + + def __init__( + self, + channels: int, + compress: float = 4, + depth: int = 2, + init: float = 1e-4, + norm_type: str = "group_norm", + attn: bool = False, + heads: int = 4, + ndecay: int = 4, + lstm: bool = False, + kernel_size: int = 3, + ): + + super().__init__() + if kernel_size % 2 == 0: + raise ValueError("Kernel size should not be divisible by 2") + self.channels = channels + self.compress = compress + self.depth = abs(depth) + dilate = depth > 0 + + norm_fn: tp.Callable[[int], nn.Module] + norm_fn = lambda d: nn.Identity() # noqa + if norm_type == "group_norm": + norm_fn = lambda d: nn.GroupNorm(1, d) # noqa + + hidden = int(channels / compress) + + act = nn.GELU + + self.layers = nn.ModuleList([]) + for d in range(self.depth): + dilation = pow(2, d) if dilate else 1 + padding = dilation * (kernel_size // 2) + mods = [ + nn.Conv1d(channels, hidden, kernel_size, dilation=dilation, padding=padding), + norm_fn(hidden), + act(), + nn.Conv1d(hidden, 2 * channels, 1), + norm_fn(2 * channels), + nn.GLU(1), + _LayerScale(channels, init), + ] + if attn: + mods.insert(3, _LocalState(hidden, heads=heads, ndecay=ndecay)) + if lstm: + mods.insert(3, _BLSTM(hidden, layers=2, skip=True)) + layer = nn.Sequential(*mods) + self.layers.append(layer) + + def forward(self, x): + r"""DConv forward call + + Args: + x (torch.Tensor): input tensor for convolution + + Returns: + Tensor + Output after being run through layers. + """ + for layer in self.layers: + x = x + layer(x) + return x + + +class _BLSTM(torch.nn.Module): + r""" + BiLSTM with same hidden units as input dim. + If `max_steps` is not None, input will be splitting in overlapping + chunks and the LSTM applied separately on each chunk. + Args: + dim (int): dimensions at LSTM layer. + layers (int, optional): number of LSTM layers. (default: 1) + skip (bool, optional): (default: ``False``) + """ + + def __init__(self, dim, layers: int = 1, skip: bool = False): + super().__init__() + self.max_steps = 200 + self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) + self.linear = nn.Linear(2 * dim, dim) + self.skip = skip + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r"""BLSTM forward call + + Args: + x (torch.Tensor): input tensor for BLSTM shape is `(batch_size, dim, time_steps)` + + Returns: + Tensor + Output after being run through bidirectional LSTM. Shape is `(batch_size, dim, time_steps)` + """ + B, C, T = x.shape + y = x + framed = False + width = 0 + stride = 0 + nframes = 0 + if self.max_steps is not None and T > self.max_steps: + width = self.max_steps + stride = width // 2 + frames = _unfold(x, width, stride) + nframes = frames.shape[2] + framed = True + x = frames.permute(0, 2, 1, 3).reshape(-1, C, width) + + x = x.permute(2, 0, 1) + + x = self.lstm(x)[0] + x = self.linear(x) + x = x.permute(1, 2, 0) + if framed: + out = [] + frames = x.reshape(B, -1, C, width) + limit = stride // 2 + for k in range(nframes): + if k == 0: + out.append(frames[:, k, :, :-limit]) + elif k == nframes - 1: + out.append(frames[:, k, :, limit:]) + else: + out.append(frames[:, k, :, limit:-limit]) + out = torch.cat(out, -1) + out = out[..., :T] + x = out + if self.skip: + x = x + y + + return x + + +class _LocalState(nn.Module): + """Local state allows to have attention based only on data (no positional embedding), + but while setting a constraint on the time window (e.g. decaying penalty term). + Also a failed experiments with trying to provide some frequency based attention. + """ + + def __init__(self, channels: int, heads: int = 4, ndecay: int = 4): + r""" + Args: + channels (int): Size of Conv1d layers. + heads (int, optional): (default: 4) + ndecay (int, optional): (default: 4) + """ + super(_LocalState, self).__init__() + if channels % heads != 0: + raise ValueError("Channels must be divisible by heads.") + self.heads = heads + self.ndecay = ndecay + self.content = nn.Conv1d(channels, channels, 1) + self.query = nn.Conv1d(channels, channels, 1) + self.key = nn.Conv1d(channels, channels, 1) + + self.query_decay = nn.Conv1d(channels, heads * ndecay, 1) + if ndecay: + # Initialize decay close to zero (there is a sigmoid), for maximum initial window. + self.query_decay.weight.data *= 0.01 + if self.query_decay.bias is None: + raise ValueError("bias must not be None.") + self.query_decay.bias.data[:] = -2 + self.proj = nn.Conv1d(channels + heads * 0, channels, 1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r"""LocalState forward call + + Args: + x (torch.Tensor): input tensor for LocalState + + Returns: + Tensor + Output after being run through LocalState layer. + """ + B, C, T = x.shape + heads = self.heads + indexes = torch.arange(T, device=x.device, dtype=x.dtype) + # left index are keys, right index are queries + delta = indexes[:, None] - indexes[None, :] + + queries = self.query(x).view(B, heads, -1, T) + keys = self.key(x).view(B, heads, -1, T) + # t are keys, s are queries + dots = torch.einsum("bhct,bhcs->bhts", keys, queries) + dots /= math.sqrt(keys.shape[2]) + if self.ndecay: + decays = torch.arange(1, self.ndecay + 1, device=x.device, dtype=x.dtype) + decay_q = self.query_decay(x).view(B, heads, -1, T) + decay_q = torch.sigmoid(decay_q) / 2 + decay_kernel = -decays.view(-1, 1, 1) * delta.abs() / math.sqrt(self.ndecay) + dots += torch.einsum("fts,bhfs->bhts", decay_kernel, decay_q) + + # Kill self reference. + dots.masked_fill_(torch.eye(T, device=dots.device, dtype=torch.bool), -100) + weights = torch.softmax(dots, dim=2) + + content = self.content(x).view(B, heads, -1, T) + result = torch.einsum("bhts,bhct->bhcs", weights, content) + result = result.reshape(B, -1, T) + return x + self.proj(result) + + +class _LayerScale(nn.Module): + """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). + This rescales diagonally residual outputs close to 0 initially, then learnt. + """ + + def __init__(self, channels: int, init: float = 0): + r""" + Args: + channels (int): Size of rescaling + init (float, optional): Scale to default to (default: 0) + """ + super().__init__() + self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True)) + self.scale.data[:] = init + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r"""LayerScale forward call + + Args: + x (torch.Tensor): input tensor for LayerScale + + Returns: + Tensor + Output after rescaling tensor. + """ + return self.scale[:, None] * x + + +def _unfold(a: torch.Tensor, kernel_size: int, stride: int) -> torch.Tensor: + """Given input of size [*OT, T], output Tensor of size [*OT, F, K] + with K the kernel size, by extracting frames with the given stride. + This will pad the input so that `F = ceil(T / K)`. + see https://github.com/pytorch/pytorch/issues/60466 + """ + shape = list(a.shape[:-1]) + length = int(a.shape[-1]) + n_frames = math.ceil(length / stride) + tgt_length = (n_frames - 1) * stride + kernel_size + a = F.pad(input=a, pad=[0, tgt_length - length]) + strides = [a.stride(dim) for dim in range(a.dim())] + if strides[-1] != 1: + raise ValueError("Data should be contiguous.") + strides = strides[:-1] + [stride, 1] + shape.append(n_frames) + shape.append(kernel_size) + return a.as_strided(shape, strides) + + +def _rescale_module(module): + r""" + Rescales initial weight scale for all models within the module. + """ + for sub in module.modules(): + if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)): + std = sub.weight.std().detach() + scale = (std / 0.1) ** 0.5 + sub.weight.data /= scale + if sub.bias is not None: + sub.bias.data /= scale + + +def _spectro(x: torch.Tensor, n_fft: int = 512, hop_length: int = 0, pad: int = 0) -> torch.Tensor: + other = list(x.shape[:-1]) + length = int(x.shape[-1]) + x = x.reshape(-1, length) + z = torch.stft( + x, + n_fft * (1 + pad), + hop_length, + window=torch.hann_window(n_fft).to(x), + win_length=n_fft, + normalized=True, + center=True, + return_complex=True, + pad_mode="reflect", + ) + _, freqs, frame = z.shape + other.extend([freqs, frame]) + return z.view(other) + + +def _ispectro(z: torch.Tensor, hop_length: int = 0, length: int = 0, pad: int = 0) -> torch.Tensor: + other = list(z.shape[:-2]) + freqs = int(z.shape[-2]) + frames = int(z.shape[-1]) + + n_fft = 2 * freqs - 2 + z = z.view(-1, freqs, frames) + win_length = n_fft // (1 + pad) + x = torch.istft( + z, + n_fft, + hop_length, + window=torch.hann_window(win_length).to(z.real), + win_length=win_length, + normalized=True, + length=length, + center=True, + ) + _, length = x.shape + other.append(length) + return x.view(other) + + +def hdemucs_low(sources: List[str]) -> HDemucs: + """Builds low nfft (1024) version of :class:`HDemucs`, suitable for sample rates around 8 kHz. + + Args: + sources (List[str]): See :py:func:`HDemucs`. + + Returns: + HDemucs: + HDemucs model. + """ + + return HDemucs(sources=sources, nfft=1024, depth=5) + + +def hdemucs_medium(sources: List[str]) -> HDemucs: + r"""Builds medium nfft (2048) version of :class:`HDemucs`, suitable for sample rates of 16-32 kHz. + + .. note:: + + Medium HDemucs has not been tested against the original Hybrid Demucs as this nfft and depth configuration is + not compatible with the original implementation in https://github.com/facebookresearch/demucs + + Args: + sources (List[str]): See :py:func:`HDemucs`. + + Returns: + HDemucs: + HDemucs model. + """ + + return HDemucs(sources=sources, nfft=2048, depth=6) + + +def hdemucs_high(sources: List[str]) -> HDemucs: + r"""Builds medium nfft (4096) version of :class:`HDemucs`, suitable for sample rates of 44.1-48 kHz. + + Args: + sources (List[str]): See :py:func:`HDemucs`. + + Returns: + HDemucs: + HDemucs model. + """ + + return HDemucs(sources=sources, nfft=4096, depth=6) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/conformer.py b/venv/lib/python3.10/site-packages/torchaudio/models/conformer.py new file mode 100644 index 0000000000000000000000000000000000000000..3da0d24fac977a65cc97f4b0afae0ab64932d4b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/conformer.py @@ -0,0 +1,293 @@ +from typing import Optional, Tuple + +import torch + + +__all__ = ["Conformer"] + + +def _lengths_to_padding_mask(lengths: torch.Tensor) -> torch.Tensor: + batch_size = lengths.shape[0] + max_length = int(torch.max(lengths).item()) + padding_mask = torch.arange(max_length, device=lengths.device, dtype=lengths.dtype).expand( + batch_size, max_length + ) >= lengths.unsqueeze(1) + return padding_mask + + +class _ConvolutionModule(torch.nn.Module): + r"""Conformer convolution module. + + Args: + input_dim (int): input dimension. + num_channels (int): number of depthwise convolution layer input channels. + depthwise_kernel_size (int): kernel size of depthwise convolution layer. + dropout (float, optional): dropout probability. (Default: 0.0) + bias (bool, optional): indicates whether to add bias term to each convolution layer. (Default: ``False``) + use_group_norm (bool, optional): use GroupNorm rather than BatchNorm. (Default: ``False``) + """ + + def __init__( + self, + input_dim: int, + num_channels: int, + depthwise_kernel_size: int, + dropout: float = 0.0, + bias: bool = False, + use_group_norm: bool = False, + ) -> None: + super().__init__() + if (depthwise_kernel_size - 1) % 2 != 0: + raise ValueError("depthwise_kernel_size must be odd to achieve 'SAME' padding.") + self.layer_norm = torch.nn.LayerNorm(input_dim) + self.sequential = torch.nn.Sequential( + torch.nn.Conv1d( + input_dim, + 2 * num_channels, + 1, + stride=1, + padding=0, + bias=bias, + ), + torch.nn.GLU(dim=1), + torch.nn.Conv1d( + num_channels, + num_channels, + depthwise_kernel_size, + stride=1, + padding=(depthwise_kernel_size - 1) // 2, + groups=num_channels, + bias=bias, + ), + torch.nn.GroupNorm(num_groups=1, num_channels=num_channels) + if use_group_norm + else torch.nn.BatchNorm1d(num_channels), + torch.nn.SiLU(), + torch.nn.Conv1d( + num_channels, + input_dim, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ), + torch.nn.Dropout(dropout), + ) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + r""" + Args: + input (torch.Tensor): with shape `(B, T, D)`. + + Returns: + torch.Tensor: output, with shape `(B, T, D)`. + """ + x = self.layer_norm(input) + x = x.transpose(1, 2) + x = self.sequential(x) + return x.transpose(1, 2) + + +class _FeedForwardModule(torch.nn.Module): + r"""Positionwise feed forward layer. + + Args: + input_dim (int): input dimension. + hidden_dim (int): hidden dimension. + dropout (float, optional): dropout probability. (Default: 0.0) + """ + + def __init__(self, input_dim: int, hidden_dim: int, dropout: float = 0.0) -> None: + super().__init__() + self.sequential = torch.nn.Sequential( + torch.nn.LayerNorm(input_dim), + torch.nn.Linear(input_dim, hidden_dim, bias=True), + torch.nn.SiLU(), + torch.nn.Dropout(dropout), + torch.nn.Linear(hidden_dim, input_dim, bias=True), + torch.nn.Dropout(dropout), + ) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + r""" + Args: + input (torch.Tensor): with shape `(*, D)`. + + Returns: + torch.Tensor: output, with shape `(*, D)`. + """ + return self.sequential(input) + + +class ConformerLayer(torch.nn.Module): + r"""Conformer layer that constitutes Conformer. + + Args: + input_dim (int): input dimension. + ffn_dim (int): hidden layer dimension of feedforward network. + num_attention_heads (int): number of attention heads. + depthwise_conv_kernel_size (int): kernel size of depthwise convolution layer. + dropout (float, optional): dropout probability. (Default: 0.0) + use_group_norm (bool, optional): use ``GroupNorm`` rather than ``BatchNorm1d`` + in the convolution module. (Default: ``False``) + convolution_first (bool, optional): apply the convolution module ahead of + the attention module. (Default: ``False``) + """ + + def __init__( + self, + input_dim: int, + ffn_dim: int, + num_attention_heads: int, + depthwise_conv_kernel_size: int, + dropout: float = 0.0, + use_group_norm: bool = False, + convolution_first: bool = False, + ) -> None: + super().__init__() + + self.ffn1 = _FeedForwardModule(input_dim, ffn_dim, dropout=dropout) + + self.self_attn_layer_norm = torch.nn.LayerNorm(input_dim) + self.self_attn = torch.nn.MultiheadAttention(input_dim, num_attention_heads, dropout=dropout) + self.self_attn_dropout = torch.nn.Dropout(dropout) + + self.conv_module = _ConvolutionModule( + input_dim=input_dim, + num_channels=input_dim, + depthwise_kernel_size=depthwise_conv_kernel_size, + dropout=dropout, + bias=True, + use_group_norm=use_group_norm, + ) + + self.ffn2 = _FeedForwardModule(input_dim, ffn_dim, dropout=dropout) + self.final_layer_norm = torch.nn.LayerNorm(input_dim) + self.convolution_first = convolution_first + + def _apply_convolution(self, input: torch.Tensor) -> torch.Tensor: + residual = input + input = input.transpose(0, 1) + input = self.conv_module(input) + input = input.transpose(0, 1) + input = residual + input + return input + + def forward(self, input: torch.Tensor, key_padding_mask: Optional[torch.Tensor]) -> torch.Tensor: + r""" + Args: + input (torch.Tensor): input, with shape `(T, B, D)`. + key_padding_mask (torch.Tensor or None): key padding mask to use in self attention layer. + + Returns: + torch.Tensor: output, with shape `(T, B, D)`. + """ + residual = input + x = self.ffn1(input) + x = x * 0.5 + residual + + if self.convolution_first: + x = self._apply_convolution(x) + + residual = x + x = self.self_attn_layer_norm(x) + x, _ = self.self_attn( + query=x, + key=x, + value=x, + key_padding_mask=key_padding_mask, + need_weights=False, + ) + x = self.self_attn_dropout(x) + x = x + residual + + if not self.convolution_first: + x = self._apply_convolution(x) + + residual = x + x = self.ffn2(x) + x = x * 0.5 + residual + + x = self.final_layer_norm(x) + return x + + +class Conformer(torch.nn.Module): + r"""Conformer architecture introduced in + *Conformer: Convolution-augmented Transformer for Speech Recognition* + :cite:`gulati2020conformer`. + + Args: + input_dim (int): input dimension. + num_heads (int): number of attention heads in each Conformer layer. + ffn_dim (int): hidden layer dimension of feedforward networks. + num_layers (int): number of Conformer layers to instantiate. + depthwise_conv_kernel_size (int): kernel size of each Conformer layer's depthwise convolution layer. + dropout (float, optional): dropout probability. (Default: 0.0) + use_group_norm (bool, optional): use ``GroupNorm`` rather than ``BatchNorm1d`` + in the convolution module. (Default: ``False``) + convolution_first (bool, optional): apply the convolution module ahead of + the attention module. (Default: ``False``) + + Examples: + >>> conformer = Conformer( + >>> input_dim=80, + >>> num_heads=4, + >>> ffn_dim=128, + >>> num_layers=4, + >>> depthwise_conv_kernel_size=31, + >>> ) + >>> lengths = torch.randint(1, 400, (10,)) # (batch,) + >>> input = torch.rand(10, int(lengths.max()), input_dim) # (batch, num_frames, input_dim) + >>> output = conformer(input, lengths) + """ + + def __init__( + self, + input_dim: int, + num_heads: int, + ffn_dim: int, + num_layers: int, + depthwise_conv_kernel_size: int, + dropout: float = 0.0, + use_group_norm: bool = False, + convolution_first: bool = False, + ): + super().__init__() + + self.conformer_layers = torch.nn.ModuleList( + [ + ConformerLayer( + input_dim, + ffn_dim, + num_heads, + depthwise_conv_kernel_size, + dropout=dropout, + use_group_norm=use_group_norm, + convolution_first=convolution_first, + ) + for _ in range(num_layers) + ] + ) + + def forward(self, input: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Args: + input (torch.Tensor): with shape `(B, T, input_dim)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``input``. + + Returns: + (torch.Tensor, torch.Tensor) + torch.Tensor + output frames, with shape `(B, T, input_dim)` + torch.Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in output frames. + """ + encoder_padding_mask = _lengths_to_padding_mask(lengths) + + x = input.transpose(0, 1) + for layer in self.conformer_layers: + x = layer(x, encoder_padding_mask) + return x.transpose(0, 1), lengths diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/conv_tasnet.py b/venv/lib/python3.10/site-packages/torchaudio/models/conv_tasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..770746dd46b34c47736e4607d4344672d0335ef2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/conv_tasnet.py @@ -0,0 +1,330 @@ +"""Implements Conv-TasNet with building blocks of it. + +Based on https://github.com/naplab/Conv-TasNet/tree/e66d82a8f956a69749ec8a4ae382217faa097c5c +""" + +from typing import Optional, Tuple + +import torch + + +class ConvBlock(torch.nn.Module): + """1D Convolutional block. + + Args: + io_channels (int): The number of input/output channels, + hidden_channels (int): The number of channels in the internal layers, . + kernel_size (int): The convolution kernel size of the middle layer,

    . + padding (int): Padding value of the convolution in the middle layer. + dilation (int, optional): Dilation value of the convolution in the middle layer. + no_redisual (bool, optional): Disable residual block/output. + + Note: + This implementation corresponds to the "non-causal" setting in the paper. + """ + + def __init__( + self, + io_channels: int, + hidden_channels: int, + kernel_size: int, + padding: int, + dilation: int = 1, + no_residual: bool = False, + ): + super().__init__() + + self.conv_layers = torch.nn.Sequential( + torch.nn.Conv1d(in_channels=io_channels, out_channels=hidden_channels, kernel_size=1), + torch.nn.PReLU(), + torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08), + torch.nn.Conv1d( + in_channels=hidden_channels, + out_channels=hidden_channels, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + groups=hidden_channels, + ), + torch.nn.PReLU(), + torch.nn.GroupNorm(num_groups=1, num_channels=hidden_channels, eps=1e-08), + ) + + self.res_out = ( + None + if no_residual + else torch.nn.Conv1d(in_channels=hidden_channels, out_channels=io_channels, kernel_size=1) + ) + self.skip_out = torch.nn.Conv1d(in_channels=hidden_channels, out_channels=io_channels, kernel_size=1) + + def forward(self, input: torch.Tensor) -> Tuple[Optional[torch.Tensor], torch.Tensor]: + feature = self.conv_layers(input) + if self.res_out is None: + residual = None + else: + residual = self.res_out(feature) + skip_out = self.skip_out(feature) + return residual, skip_out + + +class MaskGenerator(torch.nn.Module): + """TCN (Temporal Convolution Network) Separation Module + + Generates masks for separation. + + Args: + input_dim (int): Input feature dimension, . + num_sources (int): The number of sources to separate. + kernel_size (int): The convolution kernel size of conv blocks,

    . + num_featrs (int): Input/output feature dimenstion of conv blocks, . + num_hidden (int): Intermediate feature dimention of conv blocks, + num_layers (int): The number of conv blocks in one stack, . + num_stacks (int): The number of conv block stacks, . + msk_activate (str): The activation function of the mask output. + + Note: + This implementation corresponds to the "non-causal" setting in the paper. + """ + + def __init__( + self, + input_dim: int, + num_sources: int, + kernel_size: int, + num_feats: int, + num_hidden: int, + num_layers: int, + num_stacks: int, + msk_activate: str, + ): + super().__init__() + + self.input_dim = input_dim + self.num_sources = num_sources + + self.input_norm = torch.nn.GroupNorm(num_groups=1, num_channels=input_dim, eps=1e-8) + self.input_conv = torch.nn.Conv1d(in_channels=input_dim, out_channels=num_feats, kernel_size=1) + + self.receptive_field = 0 + self.conv_layers = torch.nn.ModuleList([]) + for s in range(num_stacks): + for l in range(num_layers): + multi = 2**l + self.conv_layers.append( + ConvBlock( + io_channels=num_feats, + hidden_channels=num_hidden, + kernel_size=kernel_size, + dilation=multi, + padding=multi, + # The last ConvBlock does not need residual + no_residual=(l == (num_layers - 1) and s == (num_stacks - 1)), + ) + ) + self.receptive_field += kernel_size if s == 0 and l == 0 else (kernel_size - 1) * multi + self.output_prelu = torch.nn.PReLU() + self.output_conv = torch.nn.Conv1d( + in_channels=num_feats, + out_channels=input_dim * num_sources, + kernel_size=1, + ) + if msk_activate == "sigmoid": + self.mask_activate = torch.nn.Sigmoid() + elif msk_activate == "relu": + self.mask_activate = torch.nn.ReLU() + else: + raise ValueError(f"Unsupported activation {msk_activate}") + + def forward(self, input: torch.Tensor) -> torch.Tensor: + """Generate separation mask. + + Args: + input (torch.Tensor): 3D Tensor with shape [batch, features, frames] + + Returns: + Tensor: shape [batch, num_sources, features, frames] + """ + batch_size = input.shape[0] + feats = self.input_norm(input) + feats = self.input_conv(feats) + output = 0.0 + for layer in self.conv_layers: + residual, skip = layer(feats) + if residual is not None: # the last conv layer does not produce residual + feats = feats + residual + output = output + skip + output = self.output_prelu(output) + output = self.output_conv(output) + output = self.mask_activate(output) + return output.view(batch_size, self.num_sources, self.input_dim, -1) + + +class ConvTasNet(torch.nn.Module): + """Conv-TasNet architecture introduced in + *Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation* + :cite:`Luo_2019`. + + Note: + This implementation corresponds to the "non-causal" setting in the paper. + + See Also: + * :class:`torchaudio.pipelines.SourceSeparationBundle`: Source separation pipeline with pre-trained models. + + Args: + num_sources (int, optional): The number of sources to split. + enc_kernel_size (int, optional): The convolution kernel size of the encoder/decoder, . + enc_num_feats (int, optional): The feature dimensions passed to mask generator, . + msk_kernel_size (int, optional): The convolution kernel size of the mask generator,

    . + msk_num_feats (int, optional): The input/output feature dimension of conv block in the mask generator, . + msk_num_hidden_feats (int, optional): The internal feature dimension of conv block of the mask generator, . + msk_num_layers (int, optional): The number of layers in one conv block of the mask generator, . + msk_num_stacks (int, optional): The numbr of conv blocks of the mask generator, . + msk_activate (str, optional): The activation function of the mask output (Default: ``sigmoid``). + """ + + def __init__( + self, + num_sources: int = 2, + # encoder/decoder parameters + enc_kernel_size: int = 16, + enc_num_feats: int = 512, + # mask generator parameters + msk_kernel_size: int = 3, + msk_num_feats: int = 128, + msk_num_hidden_feats: int = 512, + msk_num_layers: int = 8, + msk_num_stacks: int = 3, + msk_activate: str = "sigmoid", + ): + super().__init__() + + self.num_sources = num_sources + self.enc_num_feats = enc_num_feats + self.enc_kernel_size = enc_kernel_size + self.enc_stride = enc_kernel_size // 2 + + self.encoder = torch.nn.Conv1d( + in_channels=1, + out_channels=enc_num_feats, + kernel_size=enc_kernel_size, + stride=self.enc_stride, + padding=self.enc_stride, + bias=False, + ) + self.mask_generator = MaskGenerator( + input_dim=enc_num_feats, + num_sources=num_sources, + kernel_size=msk_kernel_size, + num_feats=msk_num_feats, + num_hidden=msk_num_hidden_feats, + num_layers=msk_num_layers, + num_stacks=msk_num_stacks, + msk_activate=msk_activate, + ) + self.decoder = torch.nn.ConvTranspose1d( + in_channels=enc_num_feats, + out_channels=1, + kernel_size=enc_kernel_size, + stride=self.enc_stride, + padding=self.enc_stride, + bias=False, + ) + + def _align_num_frames_with_strides(self, input: torch.Tensor) -> Tuple[torch.Tensor, int]: + """Pad input Tensor so that the end of the input tensor corresponds with + + 1. (if kernel size is odd) the center of the last convolution kernel + or 2. (if kernel size is even) the end of the first half of the last convolution kernel + + Assumption: + The resulting Tensor will be padded with the size of stride (== kernel_width // 2) + on the both ends in Conv1D + + |<--- k_1 --->| + | | |<-- k_n-1 -->| + | | | |<--- k_n --->| + | | | | | + | | | | | + | v v v | + |<---->|<--- input signal --->|<--->|<---->| + stride PAD stride + + Args: + input (torch.Tensor): 3D Tensor with shape (batch_size, channels==1, frames) + + Returns: + Tensor: Padded Tensor + int: Number of paddings performed + """ + batch_size, num_channels, num_frames = input.shape + is_odd = self.enc_kernel_size % 2 + num_strides = (num_frames - is_odd) // self.enc_stride + num_remainings = num_frames - (is_odd + num_strides * self.enc_stride) + if num_remainings == 0: + return input, 0 + + num_paddings = self.enc_stride - num_remainings + pad = torch.zeros( + batch_size, + num_channels, + num_paddings, + dtype=input.dtype, + device=input.device, + ) + return torch.cat([input, pad], 2), num_paddings + + def forward(self, input: torch.Tensor) -> torch.Tensor: + """Perform source separation. Generate audio source waveforms. + + Args: + input (torch.Tensor): 3D Tensor with shape [batch, channel==1, frames] + + Returns: + Tensor: 3D Tensor with shape [batch, channel==num_sources, frames] + """ + if input.ndim != 3 or input.shape[1] != 1: + raise ValueError(f"Expected 3D tensor (batch, channel==1, frames). Found: {input.shape}") + + # B: batch size + # L: input frame length + # L': padded input frame length + # F: feature dimension + # M: feature frame length + # S: number of sources + + padded, num_pads = self._align_num_frames_with_strides(input) # B, 1, L' + batch_size, num_padded_frames = padded.shape[0], padded.shape[2] + feats = self.encoder(padded) # B, F, M + masked = self.mask_generator(feats) * feats.unsqueeze(1) # B, S, F, M + masked = masked.view(batch_size * self.num_sources, self.enc_num_feats, -1) # B*S, F, M + decoded = self.decoder(masked) # B*S, 1, L' + output = decoded.view(batch_size, self.num_sources, num_padded_frames) # B, S, L' + if num_pads > 0: + output = output[..., :-num_pads] # B, S, L + return output + + +def conv_tasnet_base(num_sources: int = 2) -> ConvTasNet: + r"""Builds non-causal version of :class:`~torchaudio.models.ConvTasNet`. + + The parameter settings follow the ones with the highest Si-SNR metirc score in the paper, + except the mask activation function is changed from "sigmoid" to "relu" for performance improvement. + + Args: + num_sources (int, optional): Number of sources in the output. + (Default: 2) + Returns: + ConvTasNet: + ConvTasNet model. + """ + return ConvTasNet( + num_sources=num_sources, + enc_kernel_size=16, + enc_num_feats=512, + msk_kernel_size=3, + msk_num_feats=128, + msk_num_hidden_feats=512, + msk_num_layers=8, + msk_num_stacks=3, + msk_activate="relu", + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8254d244346727c2155f2da68b842170bea0f42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/_ctc_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/_ctc_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7124393bef501139787194ca8cfc8bab6b1e2110 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/_ctc_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/_cuda_ctc_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/_cuda_ctc_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52ab0bd8e3d0def62a76853b7f8ad6ce5cc3bf2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/__pycache__/_cuda_ctc_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/decoder/_cuda_ctc_decoder.py b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/_cuda_ctc_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..9b1f509644091e04ea3bdc4301a74c546044f31d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/decoder/_cuda_ctc_decoder.py @@ -0,0 +1,187 @@ +from __future__ import annotations + +import math + +from typing import List, NamedTuple, Union + +import torch +import torchaudio + +torchaudio._extension._load_lib("libctc_prefix_decoder") +import torchaudio.lib.pybind11_prefixctc as cuctc + + +__all__ = ["CUCTCHypothesis", "CUCTCDecoder", "cuda_ctc_decoder"] + + +def _get_vocab_list(vocab_file): + vocab = [] + with open(vocab_file, "r", encoding="utf-8") as f: + for line in f: + line = line.strip().split() + vocab.append(line[0]) + return vocab + + +class CUCTCHypothesis(NamedTuple): + r"""Represents hypothesis generated by CUCTC beam search decoder :class:`CUCTCDecoder`.""" + tokens: List[int] + """Predicted sequence of token IDs. Shape `(L, )`, where `L` is the length of the output sequence""" + + words: List[str] + """List of predicted tokens. Algin with modeling unit. + """ + + score: float + """Score corresponding to hypothesis""" + + +_DEFAULT_BLANK_SKIP_THREASHOLD = 0.95 + + +class CUCTCDecoder: + """CUDA CTC beam search decoder. + + .. devices:: CUDA + + Note: + To build the decoder, please use the factory function :func:`cuda_ctc_decoder`. + """ + + def __init__( + self, + vocab_list: List[str], + blank_id: int = 0, + beam_size: int = 10, + nbest: int = 1, + blank_skip_threshold: float = _DEFAULT_BLANK_SKIP_THREASHOLD, + cuda_stream: torch.cuda.streams.Stream = None, + ): + """ + Args: + blank_id (int): token id corresopnding to blank, only support 0 for now. (Default: 0) + vocab_list (List[str]): list of vocabulary tokens + beam_size (int, optional): max number of hypos to hold after each decode step (Default: 10) + nbest (int): number of best decodings to return + blank_skip_threshold (float): + skip frames if log_prob(blank) > log(blank_skip_threshold), to speed up decoding. + (Default: 0.95). + cuda_stream (torch.cuda.streams.Stream): using assigned cuda stream (Default: using default stream) + + """ + if cuda_stream: + if not isinstance(cuda_stream, torch.cuda.streams.Stream): + raise AssertionError("cuda_stream must be torch.cuda.streams.Stream") + cuda_stream_ = cuda_stream.cuda_stream if cuda_stream else torch.cuda.current_stream().cuda_stream + self.internal_data = cuctc.prefixCTC_alloc(cuda_stream_) + self.memory = torch.empty(0, dtype=torch.int8, device=torch.device("cuda")) + if blank_id != 0: + raise AssertionError("blank_id must be 0") + self.blank_id = blank_id + self.vocab_list = vocab_list + self.space_id = 0 + self.nbest = nbest + if not (blank_skip_threshold >= 0 and blank_skip_threshold <= 1): + raise AssertionError("blank_skip_threshold must be between 0 and 1") + self.blank_skip_threshold = math.log(blank_skip_threshold) + self.beam_size = min(beam_size, len(vocab_list)) # beam size must be smaller than vocab size + + def __del__(self): + if cuctc is not None: + cuctc.prefixCTC_free(self.internal_data) + + def __call__(self, log_prob: torch.Tensor, encoder_out_lens: torch.Tensor): + """ + Args: + log_prob (torch.FloatTensor): GPU tensor of shape `(batch, frame, num_tokens)` storing sequences of + probability distribution over labels; log_softmax(output of acoustic model). + lengths (dtype torch.int32): GPU tensor of shape `(batch, )` storing the valid length of + in time axis of the output Tensor in each batch. + + Returns: + List[List[CUCTCHypothesis]]: + List of sorted best hypotheses for each audio sequence in the batch. + """ + if not encoder_out_lens.dtype == torch.int32: + raise AssertionError("encoder_out_lens must be torch.int32") + if not log_prob.dtype == torch.float32: + raise AssertionError("log_prob must be torch.float32") + if not (log_prob.is_cuda and encoder_out_lens.is_cuda): + raise AssertionError("inputs must be cuda tensors") + if not (log_prob.is_contiguous() and encoder_out_lens.is_contiguous()): + raise AssertionError("input tensors must be contiguous") + required_size, score_hyps = cuctc.ctc_beam_search_decoder_batch_gpu_v2( + self.internal_data, + self.memory.data_ptr(), + self.memory.size(0), + log_prob.data_ptr(), + encoder_out_lens.data_ptr(), + log_prob.size(), + log_prob.stride(), + self.beam_size, + self.blank_id, + self.space_id, + self.blank_skip_threshold, + ) + if required_size > 0: + self.memory = torch.empty(required_size, dtype=torch.int8, device=log_prob.device).contiguous() + _, score_hyps = cuctc.ctc_beam_search_decoder_batch_gpu_v2( + self.internal_data, + self.memory.data_ptr(), + self.memory.size(0), + log_prob.data_ptr(), + encoder_out_lens.data_ptr(), + log_prob.size(), + log_prob.stride(), + self.beam_size, + self.blank_id, + self.space_id, + self.blank_skip_threshold, + ) + batch_size = len(score_hyps) + hypos = [] + for i in range(batch_size): + hypos.append( + [ + CUCTCHypothesis( + tokens=score_hyps[i][j][1], + words=[self.vocab_list[word_id] for word_id in score_hyps[i][j][1]], + score=score_hyps[i][j][0], + ) + for j in range(self.nbest) + ] + ) + return hypos + + +def cuda_ctc_decoder( + tokens: Union[str, List[str]], + nbest: int = 1, + beam_size: int = 10, + blank_skip_threshold: float = _DEFAULT_BLANK_SKIP_THREASHOLD, +) -> CUCTCDecoder: + """Builds an instance of :class:`CUCTCDecoder`. + + Args: + tokens (str or List[str]): File or list containing valid tokens. + If using a file, the expected format is for tokens mapping to the same index to be on the same line + beam_size (int, optional): The maximum number of hypos to hold after each decode step (Default: 10) + nbest (int): The number of best decodings to return + blank_id (int): The token ID corresopnding to the blank symbol. + blank_skip_threshold (float): skip frames if log_prob(blank) > log(blank_skip_threshold), to speed up decoding + (Default: 0.95). + + Returns: + CUCTCDecoder: decoder + + Example + >>> decoder = cuda_ctc_decoder( + >>> vocab_file="tokens.txt", + >>> blank_skip_threshold=0.95, + >>> ) + >>> results = decoder(log_probs, encoder_out_lens) # List of shape (B, nbest) of Hypotheses + """ + if type(tokens) == str: + tokens = _get_vocab_list(tokens) + + return CUCTCDecoder(vocab_list=tokens, beam_size=beam_size, nbest=nbest, blank_skip_threshold=blank_skip_threshold) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/deepspeech.py b/venv/lib/python3.10/site-packages/torchaudio/models/deepspeech.py new file mode 100644 index 0000000000000000000000000000000000000000..ef23d1d351bde615cb2b1b38ffdd7782fbb5b627 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/deepspeech.py @@ -0,0 +1,84 @@ +import torch + +__all__ = ["DeepSpeech"] + + +class FullyConnected(torch.nn.Module): + """ + Args: + n_feature: Number of input features + n_hidden: Internal hidden unit size. + """ + + def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None: + super(FullyConnected, self).__init__() + self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True) + self.relu_max_clip = relu_max_clip + self.dropout = dropout + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.fc(x) + x = torch.nn.functional.relu(x) + x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip) + if self.dropout: + x = torch.nn.functional.dropout(x, self.dropout, self.training) + return x + + +class DeepSpeech(torch.nn.Module): + """DeepSpeech architecture introduced in + *Deep Speech: Scaling up end-to-end speech recognition* :cite:`hannun2014deep`. + + Args: + n_feature: Number of input features + n_hidden: Internal hidden unit size. + n_class: Number of output classes + """ + + def __init__( + self, + n_feature: int, + n_hidden: int = 2048, + n_class: int = 40, + dropout: float = 0.0, + ) -> None: + super(DeepSpeech, self).__init__() + self.n_hidden = n_hidden + self.fc1 = FullyConnected(n_feature, n_hidden, dropout) + self.fc2 = FullyConnected(n_hidden, n_hidden, dropout) + self.fc3 = FullyConnected(n_hidden, n_hidden, dropout) + self.bi_rnn = torch.nn.RNN(n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True) + self.fc4 = FullyConnected(n_hidden, n_hidden, dropout) + self.out = torch.nn.Linear(n_hidden, n_class) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x (torch.Tensor): Tensor of dimension (batch, channel, time, feature). + Returns: + Tensor: Predictor tensor of dimension (batch, time, class). + """ + # N x C x T x F + x = self.fc1(x) + # N x C x T x H + x = self.fc2(x) + # N x C x T x H + x = self.fc3(x) + # N x C x T x H + x = x.squeeze(1) + # N x T x H + x = x.transpose(0, 1) + # T x N x H + x, _ = self.bi_rnn(x) + # The fifth (non-recurrent) layer takes both the forward and backward units as inputs + x = x[:, :, : self.n_hidden] + x[:, :, self.n_hidden :] + # T x N x H + x = self.fc4(x) + # T x N x H + x = self.out(x) + # T x N x n_class + x = x.permute(1, 0, 2) + # N x T x n_class + x = torch.nn.functional.log_softmax(x, dim=2) + # N x T x n_class + return x diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/emformer.py b/venv/lib/python3.10/site-packages/torchaudio/models/emformer.py new file mode 100644 index 0000000000000000000000000000000000000000..9ddd257552ecda94cb55bbc1eed1dae8a5382380 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/emformer.py @@ -0,0 +1,884 @@ +import math +from typing import List, Optional, Tuple + +import torch + + +__all__ = ["Emformer"] + + +def _lengths_to_padding_mask(lengths: torch.Tensor) -> torch.Tensor: + batch_size = lengths.shape[0] + max_length = int(torch.max(lengths).item()) + padding_mask = torch.arange(max_length, device=lengths.device, dtype=lengths.dtype).expand( + batch_size, max_length + ) >= lengths.unsqueeze(1) + return padding_mask + + +def _gen_padding_mask( + utterance: torch.Tensor, + right_context: torch.Tensor, + summary: torch.Tensor, + lengths: torch.Tensor, + mems: torch.Tensor, + left_context_key: Optional[torch.Tensor] = None, +) -> Optional[torch.Tensor]: + T = right_context.size(0) + utterance.size(0) + summary.size(0) + B = right_context.size(1) + if B == 1: + padding_mask = None + else: + right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0) + left_context_blocks_length = left_context_key.size(0) if left_context_key is not None else 0 + klengths = lengths + mems.size(0) + right_context_blocks_length + left_context_blocks_length + padding_mask = _lengths_to_padding_mask(lengths=klengths) + return padding_mask + + +def _get_activation_module(activation: str) -> torch.nn.Module: + if activation == "relu": + return torch.nn.ReLU() + elif activation == "gelu": + return torch.nn.GELU() + elif activation == "silu": + return torch.nn.SiLU() + else: + raise ValueError(f"Unsupported activation {activation}") + + +def _get_weight_init_gains(weight_init_scale_strategy: Optional[str], num_layers: int) -> List[Optional[float]]: + if weight_init_scale_strategy is None: + return [None for _ in range(num_layers)] + elif weight_init_scale_strategy == "depthwise": + return [1.0 / math.sqrt(layer_idx + 1) for layer_idx in range(num_layers)] + elif weight_init_scale_strategy == "constant": + return [1.0 / math.sqrt(2) for layer_idx in range(num_layers)] + else: + raise ValueError(f"Unsupported weight_init_scale_strategy value {weight_init_scale_strategy}") + + +def _gen_attention_mask_block( + col_widths: List[int], col_mask: List[bool], num_rows: int, device: torch.device +) -> torch.Tensor: + if len(col_widths) != len(col_mask): + raise ValueError("Length of col_widths must match that of col_mask") + + mask_block = [ + torch.ones(num_rows, col_width, device=device) + if is_ones_col + else torch.zeros(num_rows, col_width, device=device) + for col_width, is_ones_col in zip(col_widths, col_mask) + ] + return torch.cat(mask_block, dim=1) + + +class _EmformerAttention(torch.nn.Module): + r"""Emformer layer attention module. + + Args: + input_dim (int): input dimension. + num_heads (int): number of attention heads in each Emformer layer. + dropout (float, optional): dropout probability. (Default: 0.0) + weight_init_gain (float or None, optional): scale factor to apply when initializing + attention module parameters. (Default: ``None``) + tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) + """ + + def __init__( + self, + input_dim: int, + num_heads: int, + dropout: float = 0.0, + weight_init_gain: Optional[float] = None, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + super().__init__() + + if input_dim % num_heads != 0: + raise ValueError(f"input_dim ({input_dim}) is not a multiple of num_heads ({num_heads}).") + + self.input_dim = input_dim + self.num_heads = num_heads + self.dropout = dropout + self.tanh_on_mem = tanh_on_mem + self.negative_inf = negative_inf + + self.scaling = (self.input_dim // self.num_heads) ** -0.5 + + self.emb_to_key_value = torch.nn.Linear(input_dim, 2 * input_dim, bias=True) + self.emb_to_query = torch.nn.Linear(input_dim, input_dim, bias=True) + self.out_proj = torch.nn.Linear(input_dim, input_dim, bias=True) + + if weight_init_gain: + torch.nn.init.xavier_uniform_(self.emb_to_key_value.weight, gain=weight_init_gain) + torch.nn.init.xavier_uniform_(self.emb_to_query.weight, gain=weight_init_gain) + + def _gen_key_value(self, input: torch.Tensor, mems: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + T, _, _ = input.shape + summary_length = mems.size(0) + 1 + right_ctx_utterance_block = input[: T - summary_length] + mems_right_ctx_utterance_block = torch.cat([mems, right_ctx_utterance_block]) + key, value = self.emb_to_key_value(mems_right_ctx_utterance_block).chunk(chunks=2, dim=2) + return key, value + + def _gen_attention_probs( + self, + attention_weights: torch.Tensor, + attention_mask: torch.Tensor, + padding_mask: Optional[torch.Tensor], + ) -> torch.Tensor: + attention_weights_float = attention_weights.float() + attention_weights_float = attention_weights_float.masked_fill(attention_mask.unsqueeze(0), self.negative_inf) + T = attention_weights.size(1) + B = attention_weights.size(0) // self.num_heads + if padding_mask is not None: + attention_weights_float = attention_weights_float.view(B, self.num_heads, T, -1) + attention_weights_float = attention_weights_float.masked_fill( + padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), self.negative_inf + ) + attention_weights_float = attention_weights_float.view(B * self.num_heads, T, -1) + attention_probs = torch.nn.functional.softmax(attention_weights_float, dim=-1).type_as(attention_weights) + return torch.nn.functional.dropout(attention_probs, p=float(self.dropout), training=self.training) + + def _forward_impl( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + summary: torch.Tensor, + mems: torch.Tensor, + attention_mask: torch.Tensor, + left_context_key: Optional[torch.Tensor] = None, + left_context_val: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + B = utterance.size(1) + T = right_context.size(0) + utterance.size(0) + summary.size(0) + + # Compute query with [right context, utterance, summary]. + query = self.emb_to_query(torch.cat([right_context, utterance, summary])) + + # Compute key and value with [mems, right context, utterance]. + key, value = self.emb_to_key_value(torch.cat([mems, right_context, utterance])).chunk(chunks=2, dim=2) + + if left_context_key is not None and left_context_val is not None: + right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0) + key = torch.cat( + [ + key[: mems.size(0) + right_context_blocks_length], + left_context_key, + key[mems.size(0) + right_context_blocks_length :], + ], + ) + value = torch.cat( + [ + value[: mems.size(0) + right_context_blocks_length], + left_context_val, + value[mems.size(0) + right_context_blocks_length :], + ], + ) + + # Compute attention weights from query, key, and value. + reshaped_query, reshaped_key, reshaped_value = [ + tensor.contiguous().view(-1, B * self.num_heads, self.input_dim // self.num_heads).transpose(0, 1) + for tensor in [query, key, value] + ] + attention_weights = torch.bmm(reshaped_query * self.scaling, reshaped_key.transpose(1, 2)) + + # Compute padding mask. + padding_mask = _gen_padding_mask(utterance, right_context, summary, lengths, mems, left_context_key) + + # Compute attention probabilities. + attention_probs = self._gen_attention_probs(attention_weights, attention_mask, padding_mask) + + # Compute attention. + attention = torch.bmm(attention_probs, reshaped_value) + if attention.shape != ( + B * self.num_heads, + T, + self.input_dim // self.num_heads, + ): + raise AssertionError("Computed attention has incorrect dimensions") + attention = attention.transpose(0, 1).contiguous().view(T, B, self.input_dim) + + # Apply output projection. + output_right_context_mems = self.out_proj(attention) + + summary_length = summary.size(0) + output_right_context = output_right_context_mems[: T - summary_length] + output_mems = output_right_context_mems[T - summary_length :] + if self.tanh_on_mem: + output_mems = torch.tanh(output_mems) + else: + output_mems = torch.clamp(output_mems, min=-10, max=10) + + return output_right_context, output_mems, key, value + + def forward( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + summary: torch.Tensor, + mems: torch.Tensor, + attention_mask: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + D: feature dimension of each frame; + T: number of utterance frames; + R: number of right context frames; + S: number of summary elements; + M: number of memory elements. + + Args: + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``utterance``. + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + summary (torch.Tensor): summary elements, with shape `(S, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. + attention_mask (torch.Tensor): attention mask for underlying attention module. + + Returns: + (Tensor, Tensor): + Tensor + output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. + """ + output, output_mems, _, _ = self._forward_impl(utterance, lengths, right_context, summary, mems, attention_mask) + return output, output_mems[:-1] + + @torch.jit.export + def infer( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + summary: torch.Tensor, + mems: torch.Tensor, + left_context_key: torch.Tensor, + left_context_val: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Forward pass for inference. + + B: batch size; + D: feature dimension of each frame; + T: number of utterance frames; + R: number of right context frames; + S: number of summary elements; + M: number of memory elements. + + Args: + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``utterance``. + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + summary (torch.Tensor): summary elements, with shape `(S, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. + left_context_key (torch.Tensor): left context attention key computed from preceding invocation. + left_context_val (torch.Tensor): left context attention value computed from preceding invocation. + + Returns: + (Tensor, Tensor, Tensor, and Tensor): + Tensor + output frames corresponding to utterance and right_context, with shape `(T + R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. + Tensor + attention key computed for left context and utterance. + Tensor + attention value computed for left context and utterance. + """ + query_dim = right_context.size(0) + utterance.size(0) + summary.size(0) + key_dim = right_context.size(0) + utterance.size(0) + mems.size(0) + left_context_key.size(0) + attention_mask = torch.zeros(query_dim, key_dim).to(dtype=torch.bool, device=utterance.device) + attention_mask[-1, : mems.size(0)] = True + output, output_mems, key, value = self._forward_impl( + utterance, + lengths, + right_context, + summary, + mems, + attention_mask, + left_context_key=left_context_key, + left_context_val=left_context_val, + ) + return ( + output, + output_mems, + key[mems.size(0) + right_context.size(0) :], + value[mems.size(0) + right_context.size(0) :], + ) + + +class _EmformerLayer(torch.nn.Module): + r"""Emformer layer that constitutes Emformer. + + Args: + input_dim (int): input dimension. + num_heads (int): number of attention heads. + ffn_dim: (int): hidden layer dimension of feedforward network. + segment_length (int): length of each input segment. + dropout (float, optional): dropout probability. (Default: 0.0) + activation (str, optional): activation function to use in feedforward network. + Must be one of ("relu", "gelu", "silu"). (Default: "relu") + left_context_length (int, optional): length of left context. (Default: 0) + max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) + weight_init_gain (float or None, optional): scale factor to apply when initializing + attention module parameters. (Default: ``None``) + tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) + """ + + def __init__( + self, + input_dim: int, + num_heads: int, + ffn_dim: int, + segment_length: int, + dropout: float = 0.0, + activation: str = "relu", + left_context_length: int = 0, + max_memory_size: int = 0, + weight_init_gain: Optional[float] = None, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + super().__init__() + + self.attention = _EmformerAttention( + input_dim=input_dim, + num_heads=num_heads, + dropout=dropout, + weight_init_gain=weight_init_gain, + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + ) + self.dropout = torch.nn.Dropout(dropout) + self.memory_op = torch.nn.AvgPool1d(kernel_size=segment_length, stride=segment_length, ceil_mode=True) + + activation_module = _get_activation_module(activation) + self.pos_ff = torch.nn.Sequential( + torch.nn.LayerNorm(input_dim), + torch.nn.Linear(input_dim, ffn_dim), + activation_module, + torch.nn.Dropout(dropout), + torch.nn.Linear(ffn_dim, input_dim), + torch.nn.Dropout(dropout), + ) + self.layer_norm_input = torch.nn.LayerNorm(input_dim) + self.layer_norm_output = torch.nn.LayerNorm(input_dim) + + self.left_context_length = left_context_length + self.segment_length = segment_length + self.max_memory_size = max_memory_size + self.input_dim = input_dim + + self.use_mem = max_memory_size > 0 + + def _init_state(self, batch_size: int, device: Optional[torch.device]) -> List[torch.Tensor]: + empty_memory = torch.zeros(self.max_memory_size, batch_size, self.input_dim, device=device) + left_context_key = torch.zeros(self.left_context_length, batch_size, self.input_dim, device=device) + left_context_val = torch.zeros(self.left_context_length, batch_size, self.input_dim, device=device) + past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device) + return [empty_memory, left_context_key, left_context_val, past_length] + + def _unpack_state(self, state: List[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + past_length = state[3][0][0].item() + past_left_context_length = min(self.left_context_length, past_length) + past_mem_length = min(self.max_memory_size, math.ceil(past_length / self.segment_length)) + pre_mems = state[0][self.max_memory_size - past_mem_length :] + lc_key = state[1][self.left_context_length - past_left_context_length :] + lc_val = state[2][self.left_context_length - past_left_context_length :] + return pre_mems, lc_key, lc_val + + def _pack_state( + self, + next_k: torch.Tensor, + next_v: torch.Tensor, + update_length: int, + mems: torch.Tensor, + state: List[torch.Tensor], + ) -> List[torch.Tensor]: + new_k = torch.cat([state[1], next_k]) + new_v = torch.cat([state[2], next_v]) + state[0] = torch.cat([state[0], mems])[-self.max_memory_size :] + state[1] = new_k[new_k.shape[0] - self.left_context_length :] + state[2] = new_v[new_v.shape[0] - self.left_context_length :] + state[3] = state[3] + update_length + return state + + def _process_attention_output( + self, + rc_output: torch.Tensor, + utterance: torch.Tensor, + right_context: torch.Tensor, + ) -> torch.Tensor: + result = self.dropout(rc_output) + torch.cat([right_context, utterance]) + result = self.pos_ff(result) + result + result = self.layer_norm_output(result) + return result + + def _apply_pre_attention_layer_norm( + self, utterance: torch.Tensor, right_context: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + layer_norm_input = self.layer_norm_input(torch.cat([right_context, utterance])) + return ( + layer_norm_input[right_context.size(0) :], + layer_norm_input[: right_context.size(0)], + ) + + def _apply_post_attention_ffn( + self, rc_output: torch.Tensor, utterance: torch.Tensor, right_context: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + rc_output = self._process_attention_output(rc_output, utterance, right_context) + return rc_output[right_context.size(0) :], rc_output[: right_context.size(0)] + + def _apply_attention_forward( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + mems: torch.Tensor, + attention_mask: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + if attention_mask is None: + raise ValueError("attention_mask must be not None when for_inference is False") + + if self.use_mem: + summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) + else: + summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device) + rc_output, next_m = self.attention( + utterance=utterance, + lengths=lengths, + right_context=right_context, + summary=summary, + mems=mems, + attention_mask=attention_mask, + ) + return rc_output, next_m + + def _apply_attention_infer( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + mems: torch.Tensor, + state: Optional[List[torch.Tensor]], + ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]: + if state is None: + state = self._init_state(utterance.size(1), device=utterance.device) + pre_mems, lc_key, lc_val = self._unpack_state(state) + if self.use_mem: + summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) + summary = summary[:1] + else: + summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device) + rc_output, next_m, next_k, next_v = self.attention.infer( + utterance=utterance, + lengths=lengths, + right_context=right_context, + summary=summary, + mems=pre_mems, + left_context_key=lc_key, + left_context_val=lc_val, + ) + state = self._pack_state(next_k, next_v, utterance.size(0), mems, state) + return rc_output, next_m, state + + def forward( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + mems: torch.Tensor, + attention_mask: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + D: feature dimension of each frame; + T: number of utterance frames; + R: number of right context frames; + M: number of memory elements. + + Args: + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``utterance``. + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. + attention_mask (torch.Tensor): attention mask for underlying attention module. + + Returns: + (Tensor, Tensor, Tensor): + Tensor + encoded utterance frames, with shape `(T, B, D)`. + Tensor + updated right context frames, with shape `(R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. + """ + ( + layer_norm_utterance, + layer_norm_right_context, + ) = self._apply_pre_attention_layer_norm(utterance, right_context) + rc_output, output_mems = self._apply_attention_forward( + layer_norm_utterance, + lengths, + layer_norm_right_context, + mems, + attention_mask, + ) + output_utterance, output_right_context = self._apply_post_attention_ffn(rc_output, utterance, right_context) + return output_utterance, output_right_context, output_mems + + @torch.jit.export + def infer( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + state: Optional[List[torch.Tensor]], + mems: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor], torch.Tensor]: + r"""Forward pass for inference. + + B: batch size; + D: feature dimension of each frame; + T: number of utterance frames; + R: number of right context frames; + M: number of memory elements. + + Args: + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``utterance``. + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + state (List[torch.Tensor] or None): list of tensors representing layer internal state + generated in preceding invocation of ``infer``. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. + + Returns: + (Tensor, Tensor, List[torch.Tensor], Tensor): + Tensor + encoded utterance frames, with shape `(T, B, D)`. + Tensor + updated right context frames, with shape `(R, B, D)`. + List[Tensor] + list of tensors representing layer internal state + generated in current invocation of ``infer``. + Tensor + updated memory elements, with shape `(M, B, D)`. + """ + ( + layer_norm_utterance, + layer_norm_right_context, + ) = self._apply_pre_attention_layer_norm(utterance, right_context) + rc_output, output_mems, output_state = self._apply_attention_infer( + layer_norm_utterance, lengths, layer_norm_right_context, mems, state + ) + output_utterance, output_right_context = self._apply_post_attention_ffn(rc_output, utterance, right_context) + return output_utterance, output_right_context, output_state, output_mems + + +class _EmformerImpl(torch.nn.Module): + def __init__( + self, + emformer_layers: torch.nn.ModuleList, + segment_length: int, + left_context_length: int = 0, + right_context_length: int = 0, + max_memory_size: int = 0, + ): + super().__init__() + + self.use_mem = max_memory_size > 0 + self.memory_op = torch.nn.AvgPool1d( + kernel_size=segment_length, + stride=segment_length, + ceil_mode=True, + ) + self.emformer_layers = emformer_layers + self.left_context_length = left_context_length + self.right_context_length = right_context_length + self.segment_length = segment_length + self.max_memory_size = max_memory_size + + def _gen_right_context(self, input: torch.Tensor) -> torch.Tensor: + T = input.shape[0] + num_segs = math.ceil((T - self.right_context_length) / self.segment_length) + right_context_blocks = [] + for seg_idx in range(num_segs - 1): + start = (seg_idx + 1) * self.segment_length + end = start + self.right_context_length + right_context_blocks.append(input[start:end]) + right_context_blocks.append(input[T - self.right_context_length :]) + return torch.cat(right_context_blocks) + + def _gen_attention_mask_col_widths(self, seg_idx: int, utterance_length: int) -> List[int]: + num_segs = math.ceil(utterance_length / self.segment_length) + rc = self.right_context_length + lc = self.left_context_length + rc_start = seg_idx * rc + rc_end = rc_start + rc + seg_start = max(seg_idx * self.segment_length - lc, 0) + seg_end = min((seg_idx + 1) * self.segment_length, utterance_length) + rc_length = self.right_context_length * num_segs + + if self.use_mem: + m_start = max(seg_idx - self.max_memory_size, 0) + mem_length = num_segs - 1 + col_widths = [ + m_start, # before memory + seg_idx - m_start, # memory + mem_length - seg_idx, # after memory + rc_start, # before right context + rc, # right context + rc_length - rc_end, # after right context + seg_start, # before query segment + seg_end - seg_start, # query segment + utterance_length - seg_end, # after query segment + ] + else: + col_widths = [ + rc_start, # before right context + rc, # right context + rc_length - rc_end, # after right context + seg_start, # before query segment + seg_end - seg_start, # query segment + utterance_length - seg_end, # after query segment + ] + + return col_widths + + def _gen_attention_mask(self, input: torch.Tensor) -> torch.Tensor: + utterance_length = input.size(0) + num_segs = math.ceil(utterance_length / self.segment_length) + + rc_mask = [] + query_mask = [] + summary_mask = [] + + if self.use_mem: + num_cols = 9 + # memory, right context, query segment + rc_q_cols_mask = [idx in [1, 4, 7] for idx in range(num_cols)] + # right context, query segment + s_cols_mask = [idx in [4, 7] for idx in range(num_cols)] + masks_to_concat = [rc_mask, query_mask, summary_mask] + else: + num_cols = 6 + # right context, query segment + rc_q_cols_mask = [idx in [1, 4] for idx in range(num_cols)] + s_cols_mask = None + masks_to_concat = [rc_mask, query_mask] + + for seg_idx in range(num_segs): + col_widths = self._gen_attention_mask_col_widths(seg_idx, utterance_length) + + rc_mask_block = _gen_attention_mask_block( + col_widths, rc_q_cols_mask, self.right_context_length, input.device + ) + rc_mask.append(rc_mask_block) + + query_mask_block = _gen_attention_mask_block( + col_widths, + rc_q_cols_mask, + min( + self.segment_length, + utterance_length - seg_idx * self.segment_length, + ), + input.device, + ) + query_mask.append(query_mask_block) + + if s_cols_mask is not None: + summary_mask_block = _gen_attention_mask_block(col_widths, s_cols_mask, 1, input.device) + summary_mask.append(summary_mask_block) + + attention_mask = (1 - torch.cat([torch.cat(mask) for mask in masks_to_concat])).to(torch.bool) + return attention_mask + + def forward(self, input: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Forward pass for training and non-streaming inference. + + B: batch size; + T: max number of input frames in batch; + D: feature dimension of each frame. + + Args: + input (torch.Tensor): utterance frames right-padded with right context frames, with + shape `(B, T + right_context_length, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid utterance frames for i-th batch element in ``input``. + + Returns: + (Tensor, Tensor): + Tensor + output frames, with shape `(B, T, D)`. + Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in output frames. + """ + input = input.permute(1, 0, 2) + right_context = self._gen_right_context(input) + utterance = input[: input.size(0) - self.right_context_length] + attention_mask = self._gen_attention_mask(utterance) + mems = ( + self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)[:-1] + if self.use_mem + else torch.empty(0).to(dtype=input.dtype, device=input.device) + ) + output = utterance + for layer in self.emformer_layers: + output, right_context, mems = layer(output, lengths, right_context, mems, attention_mask) + return output.permute(1, 0, 2), lengths + + @torch.jit.export + def infer( + self, + input: torch.Tensor, + lengths: torch.Tensor, + states: Optional[List[List[torch.Tensor]]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + r"""Forward pass for streaming inference. + + B: batch size; + D: feature dimension of each frame. + + Args: + input (torch.Tensor): utterance frames right-padded with right context frames, with + shape `(B, segment_length + right_context_length, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``input``. + states (List[List[torch.Tensor]] or None, optional): list of lists of tensors + representing internal state generated in preceding invocation of ``infer``. (Default: ``None``) + + Returns: + (Tensor, Tensor, List[List[Tensor]]): + Tensor + output frames, with shape `(B, segment_length, D)`. + Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in output frames. + List[List[Tensor]] + output states; list of lists of tensors representing internal state + generated in current invocation of ``infer``. + """ + if input.size(1) != self.segment_length + self.right_context_length: + raise ValueError( + "Per configured segment_length and right_context_length" + f", expected size of {self.segment_length + self.right_context_length} for dimension 1 of input" + f", but got {input.size(1)}." + ) + input = input.permute(1, 0, 2) + right_context_start_idx = input.size(0) - self.right_context_length + right_context = input[right_context_start_idx:] + utterance = input[:right_context_start_idx] + output_lengths = torch.clamp(lengths - self.right_context_length, min=0) + mems = ( + self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) + if self.use_mem + else torch.empty(0).to(dtype=input.dtype, device=input.device) + ) + output = utterance + output_states: List[List[torch.Tensor]] = [] + for layer_idx, layer in enumerate(self.emformer_layers): + output, right_context, output_state, mems = layer.infer( + output, + output_lengths, + right_context, + None if states is None else states[layer_idx], + mems, + ) + output_states.append(output_state) + + return output.permute(1, 0, 2), output_lengths, output_states + + +class Emformer(_EmformerImpl): + r"""Emformer architecture introduced in + *Emformer: Efficient Memory Transformer Based Acoustic Model for Low Latency Streaming Speech Recognition* + :cite:`shi2021emformer`. + + See Also: + * :func:`~torchaudio.models.emformer_rnnt_model`, + :func:`~torchaudio.models.emformer_rnnt_base`: factory functions. + * :class:`torchaudio.pipelines.RNNTBundle`: ASR pipelines with pretrained model. + + Args: + input_dim (int): input dimension. + num_heads (int): number of attention heads in each Emformer layer. + ffn_dim (int): hidden layer dimension of each Emformer layer's feedforward network. + num_layers (int): number of Emformer layers to instantiate. + segment_length (int): length of each input segment. + dropout (float, optional): dropout probability. (Default: 0.0) + activation (str, optional): activation function to use in each Emformer layer's + feedforward network. Must be one of ("relu", "gelu", "silu"). (Default: "relu") + left_context_length (int, optional): length of left context. (Default: 0) + right_context_length (int, optional): length of right context. (Default: 0) + max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) + weight_init_scale_strategy (str or None, optional): per-layer weight initialization scaling + strategy. Must be one of ("depthwise", "constant", ``None``). (Default: "depthwise") + tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) + + Examples: + >>> emformer = Emformer(512, 8, 2048, 20, 4, right_context_length=1) + >>> input = torch.rand(128, 400, 512) # batch, num_frames, feature_dim + >>> lengths = torch.randint(1, 200, (128,)) # batch + >>> output, lengths = emformer(input, lengths) + >>> input = torch.rand(128, 5, 512) + >>> lengths = torch.ones(128) * 5 + >>> output, lengths, states = emformer.infer(input, lengths, None) + """ + + def __init__( + self, + input_dim: int, + num_heads: int, + ffn_dim: int, + num_layers: int, + segment_length: int, + dropout: float = 0.0, + activation: str = "relu", + left_context_length: int = 0, + right_context_length: int = 0, + max_memory_size: int = 0, + weight_init_scale_strategy: Optional[str] = "depthwise", + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + weight_init_gains = _get_weight_init_gains(weight_init_scale_strategy, num_layers) + emformer_layers = torch.nn.ModuleList( + [ + _EmformerLayer( + input_dim, + num_heads, + ffn_dim, + segment_length, + dropout=dropout, + activation=activation, + left_context_length=left_context_length, + max_memory_size=max_memory_size, + weight_init_gain=weight_init_gains[layer_idx], + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + ) + for layer_idx in range(num_layers) + ] + ) + super().__init__( + emformer_layers, + segment_length, + left_context_length=left_context_length, + right_context_length=right_context_length, + max_memory_size=max_memory_size, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/rnnt.py b/venv/lib/python3.10/site-packages/torchaudio/models/rnnt.py new file mode 100644 index 0000000000000000000000000000000000000000..f9dbe22c9fb4a97cf7f8779a953b5bd7b5bbffd9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/rnnt.py @@ -0,0 +1,816 @@ +from abc import ABC, abstractmethod +from typing import List, Optional, Tuple + +import torch +from torchaudio.models import Emformer + + +__all__ = ["RNNT", "emformer_rnnt_base", "emformer_rnnt_model"] + + +class _TimeReduction(torch.nn.Module): + r"""Coalesces frames along time dimension into a + fewer number of frames with higher feature dimensionality. + + Args: + stride (int): number of frames to merge for each output frame. + """ + + def __init__(self, stride: int) -> None: + super().__init__() + self.stride = stride + + def forward(self, input: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Forward pass. + + B: batch size; + T: maximum input sequence length in batch; + D: feature dimension of each input sequence frame. + + Args: + input (torch.Tensor): input sequences, with shape `(B, T, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``input``. + + Returns: + (torch.Tensor, torch.Tensor): + torch.Tensor + output sequences, with shape + `(B, T // stride, D * stride)` + torch.Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in output sequences. + """ + B, T, D = input.shape + num_frames = T - (T % self.stride) + input = input[:, :num_frames, :] + lengths = lengths.div(self.stride, rounding_mode="trunc") + T_max = num_frames // self.stride + + output = input.reshape(B, T_max, D * self.stride) + output = output.contiguous() + return output, lengths + + +class _CustomLSTM(torch.nn.Module): + r"""Custom long-short-term memory (LSTM) block that applies layer normalization + to internal nodes. + + Args: + input_dim (int): input dimension. + hidden_dim (int): hidden dimension. + layer_norm (bool, optional): if ``True``, enables layer normalization. (Default: ``False``) + layer_norm_epsilon (float, optional): value of epsilon to use in + layer normalization layers (Default: 1e-5) + """ + + def __init__( + self, + input_dim: int, + hidden_dim: int, + layer_norm: bool = False, + layer_norm_epsilon: float = 1e-5, + ) -> None: + super().__init__() + self.x2g = torch.nn.Linear(input_dim, 4 * hidden_dim, bias=(not layer_norm)) + self.p2g = torch.nn.Linear(hidden_dim, 4 * hidden_dim, bias=False) + if layer_norm: + self.c_norm = torch.nn.LayerNorm(hidden_dim, eps=layer_norm_epsilon) + self.g_norm = torch.nn.LayerNorm(4 * hidden_dim, eps=layer_norm_epsilon) + else: + self.c_norm = torch.nn.Identity() + self.g_norm = torch.nn.Identity() + + self.hidden_dim = hidden_dim + + def forward( + self, input: torch.Tensor, state: Optional[List[torch.Tensor]] + ) -> Tuple[torch.Tensor, List[torch.Tensor]]: + r"""Forward pass. + + B: batch size; + T: maximum sequence length in batch; + D: feature dimension of each input sequence element. + + Args: + input (torch.Tensor): with shape `(T, B, D)`. + state (List[torch.Tensor] or None): list of tensors + representing internal state generated in preceding invocation + of ``forward``. + + Returns: + (torch.Tensor, List[torch.Tensor]): + torch.Tensor + output, with shape `(T, B, hidden_dim)`. + List[torch.Tensor] + list of tensors representing internal state generated + in current invocation of ``forward``. + """ + if state is None: + B = input.size(1) + h = torch.zeros(B, self.hidden_dim, device=input.device, dtype=input.dtype) + c = torch.zeros(B, self.hidden_dim, device=input.device, dtype=input.dtype) + else: + h, c = state + + gated_input = self.x2g(input) + outputs = [] + for gates in gated_input.unbind(0): + gates = gates + self.p2g(h) + gates = self.g_norm(gates) + input_gate, forget_gate, cell_gate, output_gate = gates.chunk(4, 1) + input_gate = input_gate.sigmoid() + forget_gate = forget_gate.sigmoid() + cell_gate = cell_gate.tanh() + output_gate = output_gate.sigmoid() + c = forget_gate * c + input_gate * cell_gate + c = self.c_norm(c) + h = output_gate * c.tanh() + outputs.append(h) + + output = torch.stack(outputs, dim=0) + state = [h, c] + + return output, state + + +class _Transcriber(ABC): + @abstractmethod + def forward(self, input: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + pass + + @abstractmethod + def infer( + self, + input: torch.Tensor, + lengths: torch.Tensor, + states: Optional[List[List[torch.Tensor]]], + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + pass + + +class _EmformerEncoder(torch.nn.Module, _Transcriber): + r"""Emformer-based recurrent neural network transducer (RNN-T) encoder (transcription network). + + Args: + input_dim (int): feature dimension of each input sequence element. + output_dim (int): feature dimension of each output sequence element. + segment_length (int): length of input segment expressed as number of frames. + right_context_length (int): length of right context expressed as number of frames. + time_reduction_input_dim (int): dimension to scale each element in input sequences to + prior to applying time reduction block. + time_reduction_stride (int): factor by which to reduce length of input sequence. + transformer_num_heads (int): number of attention heads in each Emformer layer. + transformer_ffn_dim (int): hidden layer dimension of each Emformer layer's feedforward network. + transformer_num_layers (int): number of Emformer layers to instantiate. + transformer_left_context_length (int): length of left context. + transformer_dropout (float, optional): transformer dropout probability. (Default: 0.0) + transformer_activation (str, optional): activation function to use in each Emformer layer's + feedforward network. Must be one of ("relu", "gelu", "silu"). (Default: "relu") + transformer_max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) + transformer_weight_init_scale_strategy (str, optional): per-layer weight initialization scaling + strategy. Must be one of ("depthwise", "constant", ``None``). (Default: "depthwise") + transformer_tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) + """ + + def __init__( + self, + *, + input_dim: int, + output_dim: int, + segment_length: int, + right_context_length: int, + time_reduction_input_dim: int, + time_reduction_stride: int, + transformer_num_heads: int, + transformer_ffn_dim: int, + transformer_num_layers: int, + transformer_left_context_length: int, + transformer_dropout: float = 0.0, + transformer_activation: str = "relu", + transformer_max_memory_size: int = 0, + transformer_weight_init_scale_strategy: str = "depthwise", + transformer_tanh_on_mem: bool = False, + ) -> None: + super().__init__() + self.input_linear = torch.nn.Linear( + input_dim, + time_reduction_input_dim, + bias=False, + ) + self.time_reduction = _TimeReduction(time_reduction_stride) + transformer_input_dim = time_reduction_input_dim * time_reduction_stride + self.transformer = Emformer( + transformer_input_dim, + transformer_num_heads, + transformer_ffn_dim, + transformer_num_layers, + segment_length // time_reduction_stride, + dropout=transformer_dropout, + activation=transformer_activation, + left_context_length=transformer_left_context_length, + right_context_length=right_context_length // time_reduction_stride, + max_memory_size=transformer_max_memory_size, + weight_init_scale_strategy=transformer_weight_init_scale_strategy, + tanh_on_mem=transformer_tanh_on_mem, + ) + self.output_linear = torch.nn.Linear(transformer_input_dim, output_dim) + self.layer_norm = torch.nn.LayerNorm(output_dim) + + def forward(self, input: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + T: maximum input sequence length in batch; + D: feature dimension of each input sequence frame (input_dim). + + Args: + input (torch.Tensor): input frame sequences right-padded with right context, with + shape `(B, T + right context length, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``input``. + + Returns: + (torch.Tensor, torch.Tensor): + torch.Tensor + output frame sequences, with + shape `(B, T // time_reduction_stride, output_dim)`. + torch.Tensor + output input lengths, with shape `(B,)` and i-th element representing + number of valid elements for i-th batch element in output frame sequences. + """ + input_linear_out = self.input_linear(input) + time_reduction_out, time_reduction_lengths = self.time_reduction(input_linear_out, lengths) + transformer_out, transformer_lengths = self.transformer(time_reduction_out, time_reduction_lengths) + output_linear_out = self.output_linear(transformer_out) + layer_norm_out = self.layer_norm(output_linear_out) + return layer_norm_out, transformer_lengths + + @torch.jit.export + def infer( + self, + input: torch.Tensor, + lengths: torch.Tensor, + states: Optional[List[List[torch.Tensor]]], + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + r"""Forward pass for inference. + + B: batch size; + T: maximum input sequence segment length in batch; + D: feature dimension of each input sequence frame (input_dim). + + Args: + input (torch.Tensor): input frame sequence segments right-padded with right context, with + shape `(B, T + right context length, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``input``. + state (List[List[torch.Tensor]] or None): list of lists of tensors + representing internal state generated in preceding invocation + of ``infer``. + + Returns: + (torch.Tensor, torch.Tensor, List[List[torch.Tensor]]): + torch.Tensor + output frame sequences, with + shape `(B, T // time_reduction_stride, output_dim)`. + torch.Tensor + output input lengths, with shape `(B,)` and i-th element representing + number of valid elements for i-th batch element in output. + List[List[torch.Tensor]] + output states; list of lists of tensors + representing internal state generated in current invocation + of ``infer``. + """ + input_linear_out = self.input_linear(input) + time_reduction_out, time_reduction_lengths = self.time_reduction(input_linear_out, lengths) + ( + transformer_out, + transformer_lengths, + transformer_states, + ) = self.transformer.infer(time_reduction_out, time_reduction_lengths, states) + output_linear_out = self.output_linear(transformer_out) + layer_norm_out = self.layer_norm(output_linear_out) + return layer_norm_out, transformer_lengths, transformer_states + + +class _Predictor(torch.nn.Module): + r"""Recurrent neural network transducer (RNN-T) prediction network. + + Args: + num_symbols (int): size of target token lexicon. + output_dim (int): feature dimension of each output sequence element. + symbol_embedding_dim (int): dimension of each target token embedding. + num_lstm_layers (int): number of LSTM layers to instantiate. + lstm_hidden_dim (int): output dimension of each LSTM layer. + lstm_layer_norm (bool, optional): if ``True``, enables layer normalization + for LSTM layers. (Default: ``False``) + lstm_layer_norm_epsilon (float, optional): value of epsilon to use in + LSTM layer normalization layers. (Default: 1e-5) + lstm_dropout (float, optional): LSTM dropout probability. (Default: 0.0) + + """ + + def __init__( + self, + num_symbols: int, + output_dim: int, + symbol_embedding_dim: int, + num_lstm_layers: int, + lstm_hidden_dim: int, + lstm_layer_norm: bool = False, + lstm_layer_norm_epsilon: float = 1e-5, + lstm_dropout: float = 0.0, + ) -> None: + super().__init__() + self.embedding = torch.nn.Embedding(num_symbols, symbol_embedding_dim) + self.input_layer_norm = torch.nn.LayerNorm(symbol_embedding_dim) + self.lstm_layers = torch.nn.ModuleList( + [ + _CustomLSTM( + symbol_embedding_dim if idx == 0 else lstm_hidden_dim, + lstm_hidden_dim, + layer_norm=lstm_layer_norm, + layer_norm_epsilon=lstm_layer_norm_epsilon, + ) + for idx in range(num_lstm_layers) + ] + ) + self.dropout = torch.nn.Dropout(p=lstm_dropout) + self.linear = torch.nn.Linear(lstm_hidden_dim, output_dim) + self.output_layer_norm = torch.nn.LayerNorm(output_dim) + + self.lstm_dropout = lstm_dropout + + def forward( + self, + input: torch.Tensor, + lengths: torch.Tensor, + state: Optional[List[List[torch.Tensor]]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + r"""Forward pass. + + B: batch size; + U: maximum sequence length in batch; + D: feature dimension of each input sequence element. + + Args: + input (torch.Tensor): target sequences, with shape `(B, U)` and each element + mapping to a target symbol, i.e. in range `[0, num_symbols)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``input``. + state (List[List[torch.Tensor]] or None, optional): list of lists of tensors + representing internal state generated in preceding invocation + of ``forward``. (Default: ``None``) + + Returns: + (torch.Tensor, torch.Tensor, List[List[torch.Tensor]]): + torch.Tensor + output encoding sequences, with shape `(B, U, output_dim)` + torch.Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid elements for i-th batch element in output encoding sequences. + List[List[torch.Tensor]] + output states; list of lists of tensors + representing internal state generated in current invocation of ``forward``. + """ + input_tb = input.permute(1, 0) + embedding_out = self.embedding(input_tb) + input_layer_norm_out = self.input_layer_norm(embedding_out) + + lstm_out = input_layer_norm_out + state_out: List[List[torch.Tensor]] = [] + for layer_idx, lstm in enumerate(self.lstm_layers): + lstm_out, lstm_state_out = lstm(lstm_out, None if state is None else state[layer_idx]) + lstm_out = self.dropout(lstm_out) + state_out.append(lstm_state_out) + + linear_out = self.linear(lstm_out) + output_layer_norm_out = self.output_layer_norm(linear_out) + return output_layer_norm_out.permute(1, 0, 2), lengths, state_out + + +class _Joiner(torch.nn.Module): + r"""Recurrent neural network transducer (RNN-T) joint network. + + Args: + input_dim (int): source and target input dimension. + output_dim (int): output dimension. + activation (str, optional): activation function to use in the joiner. + Must be one of ("relu", "tanh"). (Default: "relu") + + """ + + def __init__(self, input_dim: int, output_dim: int, activation: str = "relu") -> None: + super().__init__() + self.linear = torch.nn.Linear(input_dim, output_dim, bias=True) + if activation == "relu": + self.activation = torch.nn.ReLU() + elif activation == "tanh": + self.activation = torch.nn.Tanh() + else: + raise ValueError(f"Unsupported activation {activation}") + + def forward( + self, + source_encodings: torch.Tensor, + source_lengths: torch.Tensor, + target_encodings: torch.Tensor, + target_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + T: maximum source sequence length in batch; + U: maximum target sequence length in batch; + D: dimension of each source and target sequence encoding. + + Args: + source_encodings (torch.Tensor): source encoding sequences, with + shape `(B, T, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``source_encodings``. + target_encodings (torch.Tensor): target encoding sequences, with shape `(B, U, D)`. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``target_encodings``. + + Returns: + (torch.Tensor, torch.Tensor, torch.Tensor): + torch.Tensor + joint network output, with shape `(B, T, U, output_dim)`. + torch.Tensor + output source lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 1 for i-th batch element in joint network output. + torch.Tensor + output target lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 2 for i-th batch element in joint network output. + """ + joint_encodings = source_encodings.unsqueeze(2).contiguous() + target_encodings.unsqueeze(1).contiguous() + activation_out = self.activation(joint_encodings) + output = self.linear(activation_out) + return output, source_lengths, target_lengths + + +class RNNT(torch.nn.Module): + r"""torchaudio.models.RNNT() + + Recurrent neural network transducer (RNN-T) model. + + Note: + To build the model, please use one of the factory functions. + + See Also: + :class:`torchaudio.pipelines.RNNTBundle`: ASR pipeline with pre-trained models. + + Args: + transcriber (torch.nn.Module): transcription network. + predictor (torch.nn.Module): prediction network. + joiner (torch.nn.Module): joint network. + """ + + def __init__(self, transcriber: _Transcriber, predictor: _Predictor, joiner: _Joiner) -> None: + super().__init__() + self.transcriber = transcriber + self.predictor = predictor + self.joiner = joiner + + def forward( + self, + sources: torch.Tensor, + source_lengths: torch.Tensor, + targets: torch.Tensor, + target_lengths: torch.Tensor, + predictor_state: Optional[List[List[torch.Tensor]]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + r"""Forward pass for training. + + B: batch size; + T: maximum source sequence length in batch; + U: maximum target sequence length in batch; + D: feature dimension of each source sequence element. + + Args: + sources (torch.Tensor): source frame sequences right-padded with right context, with + shape `(B, T, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``sources``. + targets (torch.Tensor): target sequences, with shape `(B, U)` and each element + mapping to a target symbol. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``targets``. + predictor_state (List[List[torch.Tensor]] or None, optional): list of lists of tensors + representing prediction network internal state generated in preceding invocation + of ``forward``. (Default: ``None``) + + Returns: + (torch.Tensor, torch.Tensor, torch.Tensor, List[List[torch.Tensor]]): + torch.Tensor + joint network output, with shape + `(B, max output source length, max output target length, output_dim (number of target symbols))`. + torch.Tensor + output source lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 1 for i-th batch element in joint network output. + torch.Tensor + output target lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 2 for i-th batch element in joint network output. + List[List[torch.Tensor]] + output states; list of lists of tensors + representing prediction network internal state generated in current invocation + of ``forward``. + """ + source_encodings, source_lengths = self.transcriber( + input=sources, + lengths=source_lengths, + ) + target_encodings, target_lengths, predictor_state = self.predictor( + input=targets, + lengths=target_lengths, + state=predictor_state, + ) + output, source_lengths, target_lengths = self.joiner( + source_encodings=source_encodings, + source_lengths=source_lengths, + target_encodings=target_encodings, + target_lengths=target_lengths, + ) + + return ( + output, + source_lengths, + target_lengths, + predictor_state, + ) + + @torch.jit.export + def transcribe_streaming( + self, + sources: torch.Tensor, + source_lengths: torch.Tensor, + state: Optional[List[List[torch.Tensor]]], + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + r"""Applies transcription network to sources in streaming mode. + + B: batch size; + T: maximum source sequence segment length in batch; + D: feature dimension of each source sequence frame. + + Args: + sources (torch.Tensor): source frame sequence segments right-padded with right context, with + shape `(B, T + right context length, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``sources``. + state (List[List[torch.Tensor]] or None): list of lists of tensors + representing transcription network internal state generated in preceding invocation + of ``transcribe_streaming``. + + Returns: + (torch.Tensor, torch.Tensor, List[List[torch.Tensor]]): + torch.Tensor + output frame sequences, with + shape `(B, T // time_reduction_stride, output_dim)`. + torch.Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid elements for i-th batch element in output. + List[List[torch.Tensor]] + output states; list of lists of tensors + representing transcription network internal state generated in current invocation + of ``transcribe_streaming``. + """ + return self.transcriber.infer(sources, source_lengths, state) + + @torch.jit.export + def transcribe( + self, + sources: torch.Tensor, + source_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Applies transcription network to sources in non-streaming mode. + + B: batch size; + T: maximum source sequence length in batch; + D: feature dimension of each source sequence frame. + + Args: + sources (torch.Tensor): source frame sequences right-padded with right context, with + shape `(B, T + right context length, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``sources``. + + Returns: + (torch.Tensor, torch.Tensor): + torch.Tensor + output frame sequences, with + shape `(B, T // time_reduction_stride, output_dim)`. + torch.Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid elements for i-th batch element in output frame sequences. + """ + return self.transcriber(sources, source_lengths) + + @torch.jit.export + def predict( + self, + targets: torch.Tensor, + target_lengths: torch.Tensor, + state: Optional[List[List[torch.Tensor]]], + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + r"""Applies prediction network to targets. + + B: batch size; + U: maximum target sequence length in batch; + D: feature dimension of each target sequence frame. + + Args: + targets (torch.Tensor): target sequences, with shape `(B, U)` and each element + mapping to a target symbol, i.e. in range `[0, num_symbols)`. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``targets``. + state (List[List[torch.Tensor]] or None): list of lists of tensors + representing internal state generated in preceding invocation + of ``predict``. + + Returns: + (torch.Tensor, torch.Tensor, List[List[torch.Tensor]]): + torch.Tensor + output frame sequences, with shape `(B, U, output_dim)`. + torch.Tensor + output lengths, with shape `(B,)` and i-th element representing + number of valid elements for i-th batch element in output. + List[List[torch.Tensor]] + output states; list of lists of tensors + representing internal state generated in current invocation of ``predict``. + """ + return self.predictor(input=targets, lengths=target_lengths, state=state) + + @torch.jit.export + def join( + self, + source_encodings: torch.Tensor, + source_lengths: torch.Tensor, + target_encodings: torch.Tensor, + target_lengths: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Applies joint network to source and target encodings. + + B: batch size; + T: maximum source sequence length in batch; + U: maximum target sequence length in batch; + D: dimension of each source and target sequence encoding. + + Args: + source_encodings (torch.Tensor): source encoding sequences, with + shape `(B, T, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``source_encodings``. + target_encodings (torch.Tensor): target encoding sequences, with shape `(B, U, D)`. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``target_encodings``. + + Returns: + (torch.Tensor, torch.Tensor, torch.Tensor): + torch.Tensor + joint network output, with shape `(B, T, U, output_dim)`. + torch.Tensor + output source lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 1 for i-th batch element in joint network output. + torch.Tensor + output target lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 2 for i-th batch element in joint network output. + """ + output, source_lengths, target_lengths = self.joiner( + source_encodings=source_encodings, + source_lengths=source_lengths, + target_encodings=target_encodings, + target_lengths=target_lengths, + ) + return output, source_lengths, target_lengths + + +def emformer_rnnt_model( + *, + input_dim: int, + encoding_dim: int, + num_symbols: int, + segment_length: int, + right_context_length: int, + time_reduction_input_dim: int, + time_reduction_stride: int, + transformer_num_heads: int, + transformer_ffn_dim: int, + transformer_num_layers: int, + transformer_dropout: float, + transformer_activation: str, + transformer_left_context_length: int, + transformer_max_memory_size: int, + transformer_weight_init_scale_strategy: str, + transformer_tanh_on_mem: bool, + symbol_embedding_dim: int, + num_lstm_layers: int, + lstm_layer_norm: bool, + lstm_layer_norm_epsilon: float, + lstm_dropout: float, +) -> RNNT: + r"""Builds Emformer-based :class:`~torchaudio.models.RNNT`. + + Note: + For non-streaming inference, the expectation is for `transcribe` to be called on input + sequences right-concatenated with `right_context_length` frames. + + For streaming inference, the expectation is for `transcribe_streaming` to be called + on input chunks comprising `segment_length` frames right-concatenated with `right_context_length` + frames. + + Args: + input_dim (int): dimension of input sequence frames passed to transcription network. + encoding_dim (int): dimension of transcription- and prediction-network-generated encodings + passed to joint network. + num_symbols (int): cardinality of set of target tokens. + segment_length (int): length of input segment expressed as number of frames. + right_context_length (int): length of right context expressed as number of frames. + time_reduction_input_dim (int): dimension to scale each element in input sequences to + prior to applying time reduction block. + time_reduction_stride (int): factor by which to reduce length of input sequence. + transformer_num_heads (int): number of attention heads in each Emformer layer. + transformer_ffn_dim (int): hidden layer dimension of each Emformer layer's feedforward network. + transformer_num_layers (int): number of Emformer layers to instantiate. + transformer_left_context_length (int): length of left context considered by Emformer. + transformer_dropout (float): Emformer dropout probability. + transformer_activation (str): activation function to use in each Emformer layer's + feedforward network. Must be one of ("relu", "gelu", "silu"). + transformer_max_memory_size (int): maximum number of memory elements to use. + transformer_weight_init_scale_strategy (str): per-layer weight initialization scaling + strategy. Must be one of ("depthwise", "constant", ``None``). + transformer_tanh_on_mem (bool): if ``True``, applies tanh to memory elements. + symbol_embedding_dim (int): dimension of each target token embedding. + num_lstm_layers (int): number of LSTM layers to instantiate. + lstm_layer_norm (bool): if ``True``, enables layer normalization for LSTM layers. + lstm_layer_norm_epsilon (float): value of epsilon to use in LSTM layer normalization layers. + lstm_dropout (float): LSTM dropout probability. + + Returns: + RNNT: + Emformer RNN-T model. + """ + encoder = _EmformerEncoder( + input_dim=input_dim, + output_dim=encoding_dim, + segment_length=segment_length, + right_context_length=right_context_length, + time_reduction_input_dim=time_reduction_input_dim, + time_reduction_stride=time_reduction_stride, + transformer_num_heads=transformer_num_heads, + transformer_ffn_dim=transformer_ffn_dim, + transformer_num_layers=transformer_num_layers, + transformer_dropout=transformer_dropout, + transformer_activation=transformer_activation, + transformer_left_context_length=transformer_left_context_length, + transformer_max_memory_size=transformer_max_memory_size, + transformer_weight_init_scale_strategy=transformer_weight_init_scale_strategy, + transformer_tanh_on_mem=transformer_tanh_on_mem, + ) + predictor = _Predictor( + num_symbols, + encoding_dim, + symbol_embedding_dim=symbol_embedding_dim, + num_lstm_layers=num_lstm_layers, + lstm_hidden_dim=symbol_embedding_dim, + lstm_layer_norm=lstm_layer_norm, + lstm_layer_norm_epsilon=lstm_layer_norm_epsilon, + lstm_dropout=lstm_dropout, + ) + joiner = _Joiner(encoding_dim, num_symbols) + return RNNT(encoder, predictor, joiner) + + +def emformer_rnnt_base(num_symbols: int) -> RNNT: + r"""Builds basic version of Emformer-based :class:`~torchaudio.models.RNNT`. + + Args: + num_symbols (int): The size of target token lexicon. + + Returns: + RNNT: + Emformer RNN-T model. + """ + return emformer_rnnt_model( + input_dim=80, + encoding_dim=1024, + num_symbols=num_symbols, + segment_length=16, + right_context_length=4, + time_reduction_input_dim=128, + time_reduction_stride=4, + transformer_num_heads=8, + transformer_ffn_dim=2048, + transformer_num_layers=20, + transformer_dropout=0.1, + transformer_activation="gelu", + transformer_left_context_length=30, + transformer_max_memory_size=0, + transformer_weight_init_scale_strategy="depthwise", + transformer_tanh_on_mem=True, + symbol_embedding_dim=512, + num_lstm_layers=3, + lstm_layer_norm=True, + lstm_layer_norm_epsilon=1e-3, + lstm_dropout=0.3, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/rnnt_decoder.py b/venv/lib/python3.10/site-packages/torchaudio/models/rnnt_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5a02b2ca907733a8e1ab404d1107bb702e977748 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/rnnt_decoder.py @@ -0,0 +1,339 @@ +from typing import Callable, Dict, List, Optional, Tuple + +import torch +from torchaudio.models import RNNT + + +__all__ = ["Hypothesis", "RNNTBeamSearch"] + + +Hypothesis = Tuple[List[int], torch.Tensor, List[List[torch.Tensor]], float] +Hypothesis.__doc__ = """Hypothesis generated by RNN-T beam search decoder, + represented as tuple of (tokens, prediction network output, prediction network state, score). + """ + + +def _get_hypo_tokens(hypo: Hypothesis) -> List[int]: + return hypo[0] + + +def _get_hypo_predictor_out(hypo: Hypothesis) -> torch.Tensor: + return hypo[1] + + +def _get_hypo_state(hypo: Hypothesis) -> List[List[torch.Tensor]]: + return hypo[2] + + +def _get_hypo_score(hypo: Hypothesis) -> float: + return hypo[3] + + +def _get_hypo_key(hypo: Hypothesis) -> str: + return str(hypo[0]) + + +def _batch_state(hypos: List[Hypothesis]) -> List[List[torch.Tensor]]: + states: List[List[torch.Tensor]] = [] + for i in range(len(_get_hypo_state(hypos[0]))): + batched_state_components: List[torch.Tensor] = [] + for j in range(len(_get_hypo_state(hypos[0])[i])): + batched_state_components.append(torch.cat([_get_hypo_state(hypo)[i][j] for hypo in hypos])) + states.append(batched_state_components) + return states + + +def _slice_state(states: List[List[torch.Tensor]], idx: int, device: torch.device) -> List[List[torch.Tensor]]: + idx_tensor = torch.tensor([idx], device=device) + return [[state.index_select(0, idx_tensor) for state in state_tuple] for state_tuple in states] + + +def _default_hypo_sort_key(hypo: Hypothesis) -> float: + return _get_hypo_score(hypo) / (len(_get_hypo_tokens(hypo)) + 1) + + +def _compute_updated_scores( + hypos: List[Hypothesis], + next_token_probs: torch.Tensor, + beam_width: int, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + hypo_scores = torch.tensor([_get_hypo_score(h) for h in hypos]).unsqueeze(1) + nonblank_scores = hypo_scores + next_token_probs[:, :-1] # [beam_width, num_tokens - 1] + nonblank_nbest_scores, nonblank_nbest_idx = nonblank_scores.reshape(-1).topk(beam_width) + nonblank_nbest_hypo_idx = nonblank_nbest_idx.div(nonblank_scores.shape[1], rounding_mode="trunc") + nonblank_nbest_token = nonblank_nbest_idx % nonblank_scores.shape[1] + return nonblank_nbest_scores, nonblank_nbest_hypo_idx, nonblank_nbest_token + + +def _remove_hypo(hypo: Hypothesis, hypo_list: List[Hypothesis]) -> None: + for i, elem in enumerate(hypo_list): + if _get_hypo_key(hypo) == _get_hypo_key(elem): + del hypo_list[i] + break + + +class RNNTBeamSearch(torch.nn.Module): + r"""Beam search decoder for RNN-T model. + + See Also: + * :class:`torchaudio.pipelines.RNNTBundle`: ASR pipeline with pretrained model. + + Args: + model (RNNT): RNN-T model to use. + blank (int): index of blank token in vocabulary. + temperature (float, optional): temperature to apply to joint network output. + Larger values yield more uniform samples. (Default: 1.0) + hypo_sort_key (Callable[[Hypothesis], float] or None, optional): callable that computes a score + for a given hypothesis to rank hypotheses by. If ``None``, defaults to callable that returns + hypothesis score normalized by token sequence length. (Default: None) + step_max_tokens (int, optional): maximum number of tokens to emit per input time step. (Default: 100) + """ + + def __init__( + self, + model: RNNT, + blank: int, + temperature: float = 1.0, + hypo_sort_key: Optional[Callable[[Hypothesis], float]] = None, + step_max_tokens: int = 100, + ) -> None: + super().__init__() + self.model = model + self.blank = blank + self.temperature = temperature + + if hypo_sort_key is None: + self.hypo_sort_key = _default_hypo_sort_key + else: + self.hypo_sort_key = hypo_sort_key + + self.step_max_tokens = step_max_tokens + + def _init_b_hypos(self, device: torch.device) -> List[Hypothesis]: + token = self.blank + state = None + + one_tensor = torch.tensor([1], device=device) + pred_out, _, pred_state = self.model.predict(torch.tensor([[token]], device=device), one_tensor, state) + init_hypo = ( + [token], + pred_out[0].detach(), + pred_state, + 0.0, + ) + return [init_hypo] + + def _gen_next_token_probs( + self, enc_out: torch.Tensor, hypos: List[Hypothesis], device: torch.device + ) -> torch.Tensor: + one_tensor = torch.tensor([1], device=device) + predictor_out = torch.stack([_get_hypo_predictor_out(h) for h in hypos], dim=0) + joined_out, _, _ = self.model.join( + enc_out, + one_tensor, + predictor_out, + torch.tensor([1] * len(hypos), device=device), + ) # [beam_width, 1, 1, num_tokens] + joined_out = torch.nn.functional.log_softmax(joined_out / self.temperature, dim=3) + return joined_out[:, 0, 0] + + def _gen_b_hypos( + self, + b_hypos: List[Hypothesis], + a_hypos: List[Hypothesis], + next_token_probs: torch.Tensor, + key_to_b_hypo: Dict[str, Hypothesis], + ) -> List[Hypothesis]: + for i in range(len(a_hypos)): + h_a = a_hypos[i] + append_blank_score = _get_hypo_score(h_a) + next_token_probs[i, -1] + if _get_hypo_key(h_a) in key_to_b_hypo: + h_b = key_to_b_hypo[_get_hypo_key(h_a)] + _remove_hypo(h_b, b_hypos) + score = float(torch.tensor(_get_hypo_score(h_b)).logaddexp(append_blank_score)) + else: + score = float(append_blank_score) + h_b = ( + _get_hypo_tokens(h_a), + _get_hypo_predictor_out(h_a), + _get_hypo_state(h_a), + score, + ) + b_hypos.append(h_b) + key_to_b_hypo[_get_hypo_key(h_b)] = h_b + _, sorted_idx = torch.tensor([_get_hypo_score(hypo) for hypo in b_hypos]).sort() + return [b_hypos[idx] for idx in sorted_idx] + + def _gen_a_hypos( + self, + a_hypos: List[Hypothesis], + b_hypos: List[Hypothesis], + next_token_probs: torch.Tensor, + t: int, + beam_width: int, + device: torch.device, + ) -> List[Hypothesis]: + ( + nonblank_nbest_scores, + nonblank_nbest_hypo_idx, + nonblank_nbest_token, + ) = _compute_updated_scores(a_hypos, next_token_probs, beam_width) + + if len(b_hypos) < beam_width: + b_nbest_score = -float("inf") + else: + b_nbest_score = _get_hypo_score(b_hypos[-beam_width]) + + base_hypos: List[Hypothesis] = [] + new_tokens: List[int] = [] + new_scores: List[float] = [] + for i in range(beam_width): + score = float(nonblank_nbest_scores[i]) + if score > b_nbest_score: + a_hypo_idx = int(nonblank_nbest_hypo_idx[i]) + base_hypos.append(a_hypos[a_hypo_idx]) + new_tokens.append(int(nonblank_nbest_token[i])) + new_scores.append(score) + + if base_hypos: + new_hypos = self._gen_new_hypos(base_hypos, new_tokens, new_scores, t, device) + else: + new_hypos: List[Hypothesis] = [] + + return new_hypos + + def _gen_new_hypos( + self, + base_hypos: List[Hypothesis], + tokens: List[int], + scores: List[float], + t: int, + device: torch.device, + ) -> List[Hypothesis]: + tgt_tokens = torch.tensor([[token] for token in tokens], device=device) + states = _batch_state(base_hypos) + pred_out, _, pred_states = self.model.predict( + tgt_tokens, + torch.tensor([1] * len(base_hypos), device=device), + states, + ) + new_hypos: List[Hypothesis] = [] + for i, h_a in enumerate(base_hypos): + new_tokens = _get_hypo_tokens(h_a) + [tokens[i]] + new_hypos.append((new_tokens, pred_out[i].detach(), _slice_state(pred_states, i, device), scores[i])) + return new_hypos + + def _search( + self, + enc_out: torch.Tensor, + hypo: Optional[List[Hypothesis]], + beam_width: int, + ) -> List[Hypothesis]: + n_time_steps = enc_out.shape[1] + device = enc_out.device + + a_hypos: List[Hypothesis] = [] + b_hypos = self._init_b_hypos(device) if hypo is None else hypo + for t in range(n_time_steps): + a_hypos = b_hypos + b_hypos = torch.jit.annotate(List[Hypothesis], []) + key_to_b_hypo: Dict[str, Hypothesis] = {} + symbols_current_t = 0 + + while a_hypos: + next_token_probs = self._gen_next_token_probs(enc_out[:, t : t + 1], a_hypos, device) + next_token_probs = next_token_probs.cpu() + b_hypos = self._gen_b_hypos(b_hypos, a_hypos, next_token_probs, key_to_b_hypo) + + if symbols_current_t == self.step_max_tokens: + break + + a_hypos = self._gen_a_hypos( + a_hypos, + b_hypos, + next_token_probs, + t, + beam_width, + device, + ) + if a_hypos: + symbols_current_t += 1 + + _, sorted_idx = torch.tensor([self.hypo_sort_key(hyp) for hyp in b_hypos]).topk(beam_width) + b_hypos = [b_hypos[idx] for idx in sorted_idx] + + return b_hypos + + def forward(self, input: torch.Tensor, length: torch.Tensor, beam_width: int) -> List[Hypothesis]: + r"""Performs beam search for the given input sequence. + + T: number of frames; + D: feature dimension of each frame. + + Args: + input (torch.Tensor): sequence of input frames, with shape (T, D) or (1, T, D). + length (torch.Tensor): number of valid frames in input + sequence, with shape () or (1,). + beam_width (int): beam size to use during search. + + Returns: + List[Hypothesis]: top-``beam_width`` hypotheses found by beam search. + """ + if input.dim() != 2 and not (input.dim() == 3 and input.shape[0] == 1): + raise ValueError("input must be of shape (T, D) or (1, T, D)") + if input.dim() == 2: + input = input.unsqueeze(0) + + if length.shape != () and length.shape != (1,): + raise ValueError("length must be of shape () or (1,)") + if length.dim() == 0: + length = length.unsqueeze(0) + + enc_out, _ = self.model.transcribe(input, length) + return self._search(enc_out, None, beam_width) + + @torch.jit.export + def infer( + self, + input: torch.Tensor, + length: torch.Tensor, + beam_width: int, + state: Optional[List[List[torch.Tensor]]] = None, + hypothesis: Optional[List[Hypothesis]] = None, + ) -> Tuple[List[Hypothesis], List[List[torch.Tensor]]]: + r"""Performs beam search for the given input sequence in streaming mode. + + T: number of frames; + D: feature dimension of each frame. + + Args: + input (torch.Tensor): sequence of input frames, with shape (T, D) or (1, T, D). + length (torch.Tensor): number of valid frames in input + sequence, with shape () or (1,). + beam_width (int): beam size to use during search. + state (List[List[torch.Tensor]] or None, optional): list of lists of tensors + representing transcription network internal state generated in preceding + invocation. (Default: ``None``) + hypothesis (List[Hypothesis] or None): hypotheses from preceding invocation to seed + search with. (Default: ``None``) + + Returns: + (List[Hypothesis], List[List[torch.Tensor]]): + List[Hypothesis] + top-``beam_width`` hypotheses found by beam search. + List[List[torch.Tensor]] + list of lists of tensors representing transcription network + internal state generated in current invocation. + """ + if input.dim() != 2 and not (input.dim() == 3 and input.shape[0] == 1): + raise ValueError("input must be of shape (T, D) or (1, T, D)") + if input.dim() == 2: + input = input.unsqueeze(0) + + if length.shape != () and length.shape != (1,): + raise ValueError("length must be of shape () or (1,)") + if length.dim() == 0: + length = length.unsqueeze(0) + + enc_out, _, state = self.model.transcribe_streaming(input, length, state) + return self._search(enc_out, hypothesis, beam_width), state diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d48969593e4857a35cebdab212a4f184d336dc9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/objective.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/objective.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db0540dfacf0c16ea762e0a88f11f9bd0655bc2a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/objective.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/subjective.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/subjective.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebaeb0d603c8d84b6ceb97ca84d3d713eb8469ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/squim/__pycache__/subjective.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/squim/subjective.py b/venv/lib/python3.10/site-packages/torchaudio/models/squim/subjective.py new file mode 100644 index 0000000000000000000000000000000000000000..4be681c91c5f67a2b888b49ec8269b74762360ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/squim/subjective.py @@ -0,0 +1,150 @@ +from typing import Tuple + +import torch +import torch.nn as nn +import torchaudio + + +class AttPool(nn.Module): + """Attention-Pooling module that estimates the attention score. + + Args: + input_dim (int): Input feature dimension. + att_dim (int): Attention Tensor dimension. + """ + + def __init__(self, input_dim: int, att_dim: int): + super(AttPool, self).__init__() + + self.linear1 = nn.Linear(input_dim, 1) + self.linear2 = nn.Linear(input_dim, att_dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Apply attention and pooling. + + Args: + x (torch.Tensor): Input Tensor with dimensions `(batch, time, feature_dim)`. + + Returns: + (torch.Tensor): Attention score with dimensions `(batch, att_dim)`. + """ + + att = self.linear1(x) # (batch, time, 1) + att = att.transpose(2, 1) # (batch, 1, time) + att = nn.functional.softmax(att, dim=2) + x = torch.matmul(att, x).squeeze(1) # (batch, input_dim) + x = self.linear2(x) # (batch, att_dim) + return x + + +class Predictor(nn.Module): + """Prediction module that apply pooling and attention, then predict subjective metric scores. + + Args: + input_dim (int): Input feature dimension. + att_dim (int): Attention Tensor dimension. + """ + + def __init__(self, input_dim: int, att_dim: int): + super(Predictor, self).__init__() + self.att_pool_layer = AttPool(input_dim, att_dim) + self.att_dim = att_dim + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Predict subjective evaluation metric score. + + Args: + x (torch.Tensor): Input Tensor with dimensions `(batch, time, feature_dim)`. + + Returns: + (torch.Tensor): Subjective metric score. Tensor with dimensions `(batch,)`. + """ + x = self.att_pool_layer(x) + x = nn.functional.softmax(x, dim=1) + B = torch.linspace(0, 4, steps=self.att_dim, device=x.device) + x = (x * B).sum(dim=1) + return x + + +class SquimSubjective(nn.Module): + """Speech Quality and Intelligibility Measures (SQUIM) model that predicts **subjective** metric scores + for speech enhancement (e.g., Mean Opinion Score (MOS)). The model is adopted from *NORESQA-MOS* + :cite:`manocha2022speech` which predicts MOS scores given the input speech and a non-matching reference. + + Args: + ssl_model (torch.nn.Module): The self-supervised learning model for feature extraction. + projector (torch.nn.Module): Projection layer that projects SSL feature to a lower dimension. + predictor (torch.nn.Module): Predict the subjective scores. + """ + + def __init__(self, ssl_model: nn.Module, projector: nn.Module, predictor: nn.Module): + super(SquimSubjective, self).__init__() + self.ssl_model = ssl_model + self.projector = projector + self.predictor = predictor + + def _align_shapes(self, waveform: torch.Tensor, reference: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Cut or pad the reference Tensor to make it aligned with waveform Tensor. + + Args: + waveform (torch.Tensor): Input waveform for evaluation. Tensor with dimensions `(batch, time)`. + reference (torch.Tensor): Non-matching clean reference. Tensor with dimensions `(batch, time_ref)`. + + Returns: + (torch.Tensor, torch.Tensor): The aligned waveform and reference Tensors + with same dimensions `(batch, time)`. + """ + T_waveform = waveform.shape[-1] + T_reference = reference.shape[-1] + if T_reference < T_waveform: + num_padding = T_waveform // T_reference + 1 + reference = torch.cat([reference for _ in range(num_padding)], dim=1) + return waveform, reference[:, :T_waveform] + + def forward(self, waveform: torch.Tensor, reference: torch.Tensor): + """Predict subjective evaluation metric score. + + Args: + waveform (torch.Tensor): Input waveform for evaluation. Tensor with dimensions `(batch, time)`. + reference (torch.Tensor): Non-matching clean reference. Tensor with dimensions `(batch, time_ref)`. + + Returns: + (torch.Tensor): Subjective metric score. Tensor with dimensions `(batch,)`. + """ + waveform, reference = self._align_shapes(waveform, reference) + waveform = self.projector(self.ssl_model.extract_features(waveform)[0][-1]) + reference = self.projector(self.ssl_model.extract_features(reference)[0][-1]) + concat = torch.cat((reference, waveform), dim=2) + score_diff = self.predictor(concat) # Score difference compared to the reference + return 5 - score_diff + + +def squim_subjective_model( + ssl_type: str, + feat_dim: int, + proj_dim: int, + att_dim: int, +) -> SquimSubjective: + """Build a custome :class:`torchaudio.prototype.models.SquimSubjective` model. + + Args: + ssl_type (str): Type of self-supervised learning (SSL) models. + Must be one of ["wav2vec2_base", "wav2vec2_large"]. + feat_dim (int): Feature dimension of the SSL feature representation. + proj_dim (int): Output dimension of projection layer. + att_dim (int): Dimension of attention scores. + """ + ssl_model = getattr(torchaudio.models, ssl_type)() + projector = nn.Linear(feat_dim, proj_dim) + predictor = Predictor(proj_dim * 2, att_dim) + return SquimSubjective(ssl_model, projector, predictor) + + +def squim_subjective_base() -> SquimSubjective: + """Build :class:`torchaudio.prototype.models.SquimSubjective` model with default arguments.""" + return squim_subjective_model( + ssl_type="wav2vec2_base", + feat_dim=768, + proj_dim=32, + att_dim=5, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/tacotron2.py b/venv/lib/python3.10/site-packages/torchaudio/models/tacotron2.py new file mode 100644 index 0000000000000000000000000000000000000000..978fb97c88db9c64a9b216a340e63075e53e2295 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/tacotron2.py @@ -0,0 +1,1046 @@ +# ***************************************************************************** +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the NVIDIA CORPORATION nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# ***************************************************************************** + +import warnings +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn, Tensor +from torch.nn import functional as F + + +__all__ = [ + "Tacotron2", +] + + +def _get_linear_layer(in_dim: int, out_dim: int, bias: bool = True, w_init_gain: str = "linear") -> torch.nn.Linear: + r"""Linear layer with xavier uniform initialization. + + Args: + in_dim (int): Size of each input sample. + out_dim (int): Size of each output sample. + bias (bool, optional): If set to ``False``, the layer will not learn an additive bias. (Default: ``True``) + w_init_gain (str, optional): Parameter passed to ``torch.nn.init.calculate_gain`` + for setting the gain parameter of ``xavier_uniform_``. (Default: ``linear``) + + Returns: + (torch.nn.Linear): The corresponding linear layer. + """ + linear = torch.nn.Linear(in_dim, out_dim, bias=bias) + torch.nn.init.xavier_uniform_(linear.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) + return linear + + +def _get_conv1d_layer( + in_channels: int, + out_channels: int, + kernel_size: int = 1, + stride: int = 1, + padding: Optional[Union[str, int, Tuple[int]]] = None, + dilation: int = 1, + bias: bool = True, + w_init_gain: str = "linear", +) -> torch.nn.Conv1d: + r"""1D convolution with xavier uniform initialization. + + Args: + in_channels (int): Number of channels in the input image. + out_channels (int): Number of channels produced by the convolution. + kernel_size (int, optional): Number of channels in the input image. (Default: ``1``) + stride (int, optional): Number of channels in the input image. (Default: ``1``) + padding (str, int or tuple, optional): Padding added to both sides of the input. + (Default: dilation * (kernel_size - 1) / 2) + dilation (int, optional): Number of channels in the input image. (Default: ``1``) + w_init_gain (str, optional): Parameter passed to ``torch.nn.init.calculate_gain`` + for setting the gain parameter of ``xavier_uniform_``. (Default: ``linear``) + + Returns: + (torch.nn.Conv1d): The corresponding Conv1D layer. + """ + if padding is None: + if kernel_size % 2 != 1: + raise ValueError("kernel_size must be odd") + padding = int(dilation * (kernel_size - 1) / 2) + + conv1d = torch.nn.Conv1d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + + torch.nn.init.xavier_uniform_(conv1d.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) + + return conv1d + + +def _get_mask_from_lengths(lengths: Tensor) -> Tensor: + r"""Returns a binary mask based on ``lengths``. The ``i``-th row and ``j``-th column of the mask + is ``1`` if ``j`` is smaller than ``i``-th element of ``lengths. + + Args: + lengths (Tensor): The length of each element in the batch, with shape (n_batch, ). + + Returns: + mask (Tensor): The binary mask, with shape (n_batch, max of ``lengths``). + """ + max_len = torch.max(lengths).item() + ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype) + mask = (ids < lengths.unsqueeze(1)).byte() + mask = torch.le(mask, 0) + return mask + + +class _LocationLayer(nn.Module): + r"""Location layer used in the Attention model. + + Args: + attention_n_filter (int): Number of filters for attention model. + attention_kernel_size (int): Kernel size for attention model. + attention_hidden_dim (int): Dimension of attention hidden representation. + """ + + def __init__( + self, + attention_n_filter: int, + attention_kernel_size: int, + attention_hidden_dim: int, + ): + super().__init__() + padding = int((attention_kernel_size - 1) / 2) + self.location_conv = _get_conv1d_layer( + 2, + attention_n_filter, + kernel_size=attention_kernel_size, + padding=padding, + bias=False, + stride=1, + dilation=1, + ) + self.location_dense = _get_linear_layer( + attention_n_filter, attention_hidden_dim, bias=False, w_init_gain="tanh" + ) + + def forward(self, attention_weights_cat: Tensor) -> Tensor: + r"""Location layer used in the Attention model. + + Args: + attention_weights_cat (Tensor): Cumulative and previous attention weights + with shape (n_batch, 2, max of ``text_lengths``). + + Returns: + processed_attention (Tensor): Cumulative and previous attention weights + with shape (n_batch, ``attention_hidden_dim``). + """ + # (n_batch, attention_n_filter, text_lengths.max()) + processed_attention = self.location_conv(attention_weights_cat) + processed_attention = processed_attention.transpose(1, 2) + # (n_batch, text_lengths.max(), attention_hidden_dim) + processed_attention = self.location_dense(processed_attention) + return processed_attention + + +class _Attention(nn.Module): + r"""Locally sensitive attention model. + + Args: + attention_rnn_dim (int): Number of hidden units for RNN. + encoder_embedding_dim (int): Number of embedding dimensions in the Encoder. + attention_hidden_dim (int): Dimension of attention hidden representation. + attention_location_n_filter (int): Number of filters for Attention model. + attention_location_kernel_size (int): Kernel size for Attention model. + """ + + def __init__( + self, + attention_rnn_dim: int, + encoder_embedding_dim: int, + attention_hidden_dim: int, + attention_location_n_filter: int, + attention_location_kernel_size: int, + ) -> None: + super().__init__() + self.query_layer = _get_linear_layer(attention_rnn_dim, attention_hidden_dim, bias=False, w_init_gain="tanh") + self.memory_layer = _get_linear_layer( + encoder_embedding_dim, attention_hidden_dim, bias=False, w_init_gain="tanh" + ) + self.v = _get_linear_layer(attention_hidden_dim, 1, bias=False) + self.location_layer = _LocationLayer( + attention_location_n_filter, + attention_location_kernel_size, + attention_hidden_dim, + ) + self.score_mask_value = -float("inf") + + def _get_alignment_energies(self, query: Tensor, processed_memory: Tensor, attention_weights_cat: Tensor) -> Tensor: + r"""Get the alignment vector. + + Args: + query (Tensor): Decoder output with shape (n_batch, n_mels * n_frames_per_step). + processed_memory (Tensor): Processed Encoder outputs + with shape (n_batch, max of ``text_lengths``, attention_hidden_dim). + attention_weights_cat (Tensor): Cumulative and previous attention weights + with shape (n_batch, 2, max of ``text_lengths``). + + Returns: + alignment (Tensor): attention weights, it is a tensor with shape (batch, max of ``text_lengths``). + """ + + processed_query = self.query_layer(query.unsqueeze(1)) + processed_attention_weights = self.location_layer(attention_weights_cat) + energies = self.v(torch.tanh(processed_query + processed_attention_weights + processed_memory)) + + alignment = energies.squeeze(2) + return alignment + + def forward( + self, + attention_hidden_state: Tensor, + memory: Tensor, + processed_memory: Tensor, + attention_weights_cat: Tensor, + mask: Tensor, + ) -> Tuple[Tensor, Tensor]: + r"""Pass the input through the Attention model. + + Args: + attention_hidden_state (Tensor): Attention rnn last output with shape (n_batch, ``attention_rnn_dim``). + memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + processed_memory (Tensor): Processed Encoder outputs + with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``). + attention_weights_cat (Tensor): Previous and cumulative attention weights + with shape (n_batch, current_num_frames * 2, max of ``text_lengths``). + mask (Tensor): Binary mask for padded data with shape (n_batch, current_num_frames). + + Returns: + attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). + attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). + """ + alignment = self._get_alignment_energies(attention_hidden_state, processed_memory, attention_weights_cat) + + alignment = alignment.masked_fill(mask, self.score_mask_value) + + attention_weights = F.softmax(alignment, dim=1) + attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) + attention_context = attention_context.squeeze(1) + + return attention_context, attention_weights + + +class _Prenet(nn.Module): + r"""Prenet Module. It is consists of ``len(output_size)`` linear layers. + + Args: + in_dim (int): The size of each input sample. + output_sizes (list): The output dimension of each linear layers. + """ + + def __init__(self, in_dim: int, out_sizes: List[int]) -> None: + super().__init__() + in_sizes = [in_dim] + out_sizes[:-1] + self.layers = nn.ModuleList( + [_get_linear_layer(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, out_sizes)] + ) + + def forward(self, x: Tensor) -> Tensor: + r"""Pass the input through Prenet. + + Args: + x (Tensor): The input sequence to Prenet with shape (n_batch, in_dim). + + Return: + x (Tensor): Tensor with shape (n_batch, sizes[-1]) + """ + + for linear in self.layers: + x = F.dropout(F.relu(linear(x)), p=0.5, training=True) + return x + + +class _Postnet(nn.Module): + r"""Postnet Module. + + Args: + n_mels (int): Number of mel bins. + postnet_embedding_dim (int): Postnet embedding dimension. + postnet_kernel_size (int): Postnet kernel size. + postnet_n_convolution (int): Number of postnet convolutions. + """ + + def __init__( + self, + n_mels: int, + postnet_embedding_dim: int, + postnet_kernel_size: int, + postnet_n_convolution: int, + ): + super().__init__() + self.convolutions = nn.ModuleList() + + for i in range(postnet_n_convolution): + in_channels = n_mels if i == 0 else postnet_embedding_dim + out_channels = n_mels if i == (postnet_n_convolution - 1) else postnet_embedding_dim + init_gain = "linear" if i == (postnet_n_convolution - 1) else "tanh" + num_features = n_mels if i == (postnet_n_convolution - 1) else postnet_embedding_dim + self.convolutions.append( + nn.Sequential( + _get_conv1d_layer( + in_channels, + out_channels, + kernel_size=postnet_kernel_size, + stride=1, + padding=int((postnet_kernel_size - 1) / 2), + dilation=1, + w_init_gain=init_gain, + ), + nn.BatchNorm1d(num_features), + ) + ) + + self.n_convs = len(self.convolutions) + + def forward(self, x: Tensor) -> Tensor: + r"""Pass the input through Postnet. + + Args: + x (Tensor): The input sequence with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). + + Return: + x (Tensor): Tensor with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). + """ + + for i, conv in enumerate(self.convolutions): + if i < self.n_convs - 1: + x = F.dropout(torch.tanh(conv(x)), 0.5, training=self.training) + else: + x = F.dropout(conv(x), 0.5, training=self.training) + + return x + + +class _Encoder(nn.Module): + r"""Encoder Module. + + Args: + encoder_embedding_dim (int): Number of embedding dimensions in the encoder. + encoder_n_convolution (int): Number of convolution layers in the encoder. + encoder_kernel_size (int): The kernel size in the encoder. + + Examples + >>> encoder = _Encoder(3, 512, 5) + >>> input = torch.rand(10, 20, 30) + >>> output = encoder(input) # shape: (10, 30, 512) + """ + + def __init__( + self, + encoder_embedding_dim: int, + encoder_n_convolution: int, + encoder_kernel_size: int, + ) -> None: + super().__init__() + + self.convolutions = nn.ModuleList() + for _ in range(encoder_n_convolution): + conv_layer = nn.Sequential( + _get_conv1d_layer( + encoder_embedding_dim, + encoder_embedding_dim, + kernel_size=encoder_kernel_size, + stride=1, + padding=int((encoder_kernel_size - 1) / 2), + dilation=1, + w_init_gain="relu", + ), + nn.BatchNorm1d(encoder_embedding_dim), + ) + self.convolutions.append(conv_layer) + + self.lstm = nn.LSTM( + encoder_embedding_dim, + int(encoder_embedding_dim / 2), + 1, + batch_first=True, + bidirectional=True, + ) + self.lstm.flatten_parameters() + + def forward(self, x: Tensor, input_lengths: Tensor) -> Tensor: + r"""Pass the input through the Encoder. + + Args: + x (Tensor): The input sequences with shape (n_batch, encoder_embedding_dim, n_seq). + input_lengths (Tensor): The length of each input sequence with shape (n_batch, ). + + Return: + x (Tensor): A tensor with shape (n_batch, n_seq, encoder_embedding_dim). + """ + + for conv in self.convolutions: + x = F.dropout(F.relu(conv(x)), 0.5, self.training) + + x = x.transpose(1, 2) + + input_lengths = input_lengths.cpu() + x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True) + + outputs, _ = self.lstm(x) + outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True) + + return outputs + + +class _Decoder(nn.Module): + r"""Decoder with Attention model. + + Args: + n_mels (int): number of mel bins + n_frames_per_step (int): number of frames processed per step, only 1 is supported + encoder_embedding_dim (int): the number of embedding dimensions in the encoder. + decoder_rnn_dim (int): number of units in decoder LSTM + decoder_max_step (int): maximum number of output mel spectrograms + decoder_dropout (float): dropout probability for decoder LSTM + decoder_early_stopping (bool): stop decoding when all samples are finished + attention_rnn_dim (int): number of units in attention LSTM + attention_hidden_dim (int): dimension of attention hidden representation + attention_location_n_filter (int): number of filters for attention model + attention_location_kernel_size (int): kernel size for attention model + attention_dropout (float): dropout probability for attention LSTM + prenet_dim (int): number of ReLU units in prenet layers + gate_threshold (float): probability threshold for stop token + """ + + def __init__( + self, + n_mels: int, + n_frames_per_step: int, + encoder_embedding_dim: int, + decoder_rnn_dim: int, + decoder_max_step: int, + decoder_dropout: float, + decoder_early_stopping: bool, + attention_rnn_dim: int, + attention_hidden_dim: int, + attention_location_n_filter: int, + attention_location_kernel_size: int, + attention_dropout: float, + prenet_dim: int, + gate_threshold: float, + ) -> None: + + super().__init__() + self.n_mels = n_mels + self.n_frames_per_step = n_frames_per_step + self.encoder_embedding_dim = encoder_embedding_dim + self.attention_rnn_dim = attention_rnn_dim + self.decoder_rnn_dim = decoder_rnn_dim + self.prenet_dim = prenet_dim + self.decoder_max_step = decoder_max_step + self.gate_threshold = gate_threshold + self.attention_dropout = attention_dropout + self.decoder_dropout = decoder_dropout + self.decoder_early_stopping = decoder_early_stopping + + self.prenet = _Prenet(n_mels * n_frames_per_step, [prenet_dim, prenet_dim]) + + self.attention_rnn = nn.LSTMCell(prenet_dim + encoder_embedding_dim, attention_rnn_dim) + + self.attention_layer = _Attention( + attention_rnn_dim, + encoder_embedding_dim, + attention_hidden_dim, + attention_location_n_filter, + attention_location_kernel_size, + ) + + self.decoder_rnn = nn.LSTMCell(attention_rnn_dim + encoder_embedding_dim, decoder_rnn_dim, True) + + self.linear_projection = _get_linear_layer(decoder_rnn_dim + encoder_embedding_dim, n_mels * n_frames_per_step) + + self.gate_layer = _get_linear_layer( + decoder_rnn_dim + encoder_embedding_dim, 1, bias=True, w_init_gain="sigmoid" + ) + + def _get_initial_frame(self, memory: Tensor) -> Tensor: + r"""Gets all zeros frames to use as the first decoder input. + + Args: + memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + + Returns: + decoder_input (Tensor): all zeros frames with shape + (n_batch, max of ``text_lengths``, ``n_mels * n_frames_per_step``). + """ + + n_batch = memory.size(0) + dtype = memory.dtype + device = memory.device + decoder_input = torch.zeros(n_batch, self.n_mels * self.n_frames_per_step, dtype=dtype, device=device) + return decoder_input + + def _initialize_decoder_states( + self, memory: Tensor + ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + r"""Initializes attention rnn states, decoder rnn states, attention + weights, attention cumulative weights, attention context, stores memory + and stores processed memory. + + Args: + memory (Tensor): Encoder outputs with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + + Returns: + attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). + attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). + decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). + decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). + attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). + attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``). + attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). + processed_memory (Tensor): Processed encoder outputs + with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``). + """ + n_batch = memory.size(0) + max_time = memory.size(1) + dtype = memory.dtype + device = memory.device + + attention_hidden = torch.zeros(n_batch, self.attention_rnn_dim, dtype=dtype, device=device) + attention_cell = torch.zeros(n_batch, self.attention_rnn_dim, dtype=dtype, device=device) + + decoder_hidden = torch.zeros(n_batch, self.decoder_rnn_dim, dtype=dtype, device=device) + decoder_cell = torch.zeros(n_batch, self.decoder_rnn_dim, dtype=dtype, device=device) + + attention_weights = torch.zeros(n_batch, max_time, dtype=dtype, device=device) + attention_weights_cum = torch.zeros(n_batch, max_time, dtype=dtype, device=device) + attention_context = torch.zeros(n_batch, self.encoder_embedding_dim, dtype=dtype, device=device) + + processed_memory = self.attention_layer.memory_layer(memory) + + return ( + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + processed_memory, + ) + + def _parse_decoder_inputs(self, decoder_inputs: Tensor) -> Tensor: + r"""Prepares decoder inputs. + + Args: + decoder_inputs (Tensor): Inputs used for teacher-forced training, i.e. mel-specs, + with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``) + + Returns: + inputs (Tensor): Processed decoder inputs with shape (max of ``mel_specgram_lengths``, n_batch, ``n_mels``). + """ + # (n_batch, n_mels, mel_specgram_lengths.max()) -> (n_batch, mel_specgram_lengths.max(), n_mels) + decoder_inputs = decoder_inputs.transpose(1, 2) + decoder_inputs = decoder_inputs.view( + decoder_inputs.size(0), + int(decoder_inputs.size(1) / self.n_frames_per_step), + -1, + ) + # (n_batch, mel_specgram_lengths.max(), n_mels) -> (mel_specgram_lengths.max(), n_batch, n_mels) + decoder_inputs = decoder_inputs.transpose(0, 1) + return decoder_inputs + + def _parse_decoder_outputs( + self, mel_specgram: Tensor, gate_outputs: Tensor, alignments: Tensor + ) -> Tuple[Tensor, Tensor, Tensor]: + r"""Prepares decoder outputs for output + + Args: + mel_specgram (Tensor): mel spectrogram with shape (max of ``mel_specgram_lengths``, n_batch, ``n_mels``) + gate_outputs (Tensor): predicted stop token with shape (max of ``mel_specgram_lengths``, n_batch) + alignments (Tensor): sequence of attention weights from the decoder + with shape (max of ``mel_specgram_lengths``, n_batch, max of ``text_lengths``) + + Returns: + mel_specgram (Tensor): mel spectrogram with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``) + gate_outputs (Tensor): predicted stop token with shape (n_batch, max of ``mel_specgram_lengths``) + alignments (Tensor): sequence of attention weights from the decoder + with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``) + """ + # (mel_specgram_lengths.max(), n_batch, text_lengths.max()) + # -> (n_batch, mel_specgram_lengths.max(), text_lengths.max()) + alignments = alignments.transpose(0, 1).contiguous() + # (mel_specgram_lengths.max(), n_batch) -> (n_batch, mel_specgram_lengths.max()) + gate_outputs = gate_outputs.transpose(0, 1).contiguous() + # (mel_specgram_lengths.max(), n_batch, n_mels) -> (n_batch, mel_specgram_lengths.max(), n_mels) + mel_specgram = mel_specgram.transpose(0, 1).contiguous() + # decouple frames per step + shape = (mel_specgram.shape[0], -1, self.n_mels) + mel_specgram = mel_specgram.view(*shape) + # (n_batch, mel_specgram_lengths.max(), n_mels) -> (n_batch, n_mels, T_out) + mel_specgram = mel_specgram.transpose(1, 2) + + return mel_specgram, gate_outputs, alignments + + def decode( + self, + decoder_input: Tensor, + attention_hidden: Tensor, + attention_cell: Tensor, + decoder_hidden: Tensor, + decoder_cell: Tensor, + attention_weights: Tensor, + attention_weights_cum: Tensor, + attention_context: Tensor, + memory: Tensor, + processed_memory: Tensor, + mask: Tensor, + ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + r"""Decoder step using stored states, attention and memory + + Args: + decoder_input (Tensor): Output of the Prenet with shape (n_batch, ``prenet_dim``). + attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). + attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). + decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). + decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). + attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). + attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``). + attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). + memory (Tensor): Encoder output with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + processed_memory (Tensor): Processed Encoder outputs + with shape (n_batch, max of ``text_lengths``, ``attention_hidden_dim``). + mask (Tensor): Binary mask for padded data with shape (n_batch, current_num_frames). + + Returns: + decoder_output: Predicted mel spectrogram for the current frame with shape (n_batch, ``n_mels``). + gate_prediction (Tensor): Prediction of the stop token with shape (n_batch, ``1``). + attention_hidden (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). + attention_cell (Tensor): Hidden state of the attention LSTM with shape (n_batch, ``attention_rnn_dim``). + decoder_hidden (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). + decoder_cell (Tensor): Hidden state of the decoder LSTM with shape (n_batch, ``decoder_rnn_dim``). + attention_weights (Tensor): Attention weights with shape (n_batch, max of ``text_lengths``). + attention_weights_cum (Tensor): Cumulated attention weights with shape (n_batch, max of ``text_lengths``). + attention_context (Tensor): Context vector with shape (n_batch, ``encoder_embedding_dim``). + """ + cell_input = torch.cat((decoder_input, attention_context), -1) + + attention_hidden, attention_cell = self.attention_rnn(cell_input, (attention_hidden, attention_cell)) + attention_hidden = F.dropout(attention_hidden, self.attention_dropout, self.training) + + attention_weights_cat = torch.cat((attention_weights.unsqueeze(1), attention_weights_cum.unsqueeze(1)), dim=1) + attention_context, attention_weights = self.attention_layer( + attention_hidden, memory, processed_memory, attention_weights_cat, mask + ) + + attention_weights_cum += attention_weights + decoder_input = torch.cat((attention_hidden, attention_context), -1) + + decoder_hidden, decoder_cell = self.decoder_rnn(decoder_input, (decoder_hidden, decoder_cell)) + decoder_hidden = F.dropout(decoder_hidden, self.decoder_dropout, self.training) + + decoder_hidden_attention_context = torch.cat((decoder_hidden, attention_context), dim=1) + decoder_output = self.linear_projection(decoder_hidden_attention_context) + + gate_prediction = self.gate_layer(decoder_hidden_attention_context) + + return ( + decoder_output, + gate_prediction, + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + ) + + def forward( + self, memory: Tensor, mel_specgram_truth: Tensor, memory_lengths: Tensor + ) -> Tuple[Tensor, Tensor, Tensor]: + r"""Decoder forward pass for training. + + Args: + memory (Tensor): Encoder outputs + with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + mel_specgram_truth (Tensor): Decoder ground-truth mel-specs for teacher forcing + with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). + memory_lengths (Tensor): Encoder output lengths for attention masking + (the same as ``text_lengths``) with shape (n_batch, ). + + Returns: + mel_specgram (Tensor): Predicted mel spectrogram + with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). + gate_outputs (Tensor): Predicted stop token for each timestep + with shape (n_batch, max of ``mel_specgram_lengths``). + alignments (Tensor): Sequence of attention weights from the decoder + with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``). + """ + + decoder_input = self._get_initial_frame(memory).unsqueeze(0) + decoder_inputs = self._parse_decoder_inputs(mel_specgram_truth) + decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) + decoder_inputs = self.prenet(decoder_inputs) + + mask = _get_mask_from_lengths(memory_lengths) + ( + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + processed_memory, + ) = self._initialize_decoder_states(memory) + + mel_outputs, gate_outputs, alignments = [], [], [] + while len(mel_outputs) < decoder_inputs.size(0) - 1: + decoder_input = decoder_inputs[len(mel_outputs)] + ( + mel_output, + gate_output, + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + ) = self.decode( + decoder_input, + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + memory, + processed_memory, + mask, + ) + + mel_outputs += [mel_output.squeeze(1)] + gate_outputs += [gate_output.squeeze(1)] + alignments += [attention_weights] + + mel_specgram, gate_outputs, alignments = self._parse_decoder_outputs( + torch.stack(mel_outputs), torch.stack(gate_outputs), torch.stack(alignments) + ) + + return mel_specgram, gate_outputs, alignments + + def _get_go_frame(self, memory: Tensor) -> Tensor: + """Gets all zeros frames to use as the first decoder input + + args: + memory (Tensor): Encoder outputs + with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + + returns: + decoder_input (Tensor): All zeros frames with shape(n_batch, ``n_mels`` * ``n_frame_per_step``). + """ + + n_batch = memory.size(0) + dtype = memory.dtype + device = memory.device + decoder_input = torch.zeros(n_batch, self.n_mels * self.n_frames_per_step, dtype=dtype, device=device) + return decoder_input + + @torch.jit.export + def infer(self, memory: Tensor, memory_lengths: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Decoder inference + + Args: + memory (Tensor): Encoder outputs + with shape (n_batch, max of ``text_lengths``, ``encoder_embedding_dim``). + memory_lengths (Tensor): Encoder output lengths for attention masking + (the same as ``text_lengths``) with shape (n_batch, ). + + Returns: + mel_specgram (Tensor): Predicted mel spectrogram + with shape (n_batch, ``n_mels``, max of ``mel_specgram_lengths``). + mel_specgram_lengths (Tensor): the length of the predicted mel spectrogram (n_batch, )) + gate_outputs (Tensor): Predicted stop token for each timestep + with shape (n_batch, max of ``mel_specgram_lengths``). + alignments (Tensor): Sequence of attention weights from the decoder + with shape (n_batch, max of ``mel_specgram_lengths``, max of ``text_lengths``). + """ + batch_size, device = memory.size(0), memory.device + + decoder_input = self._get_go_frame(memory) + + mask = _get_mask_from_lengths(memory_lengths) + ( + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + processed_memory, + ) = self._initialize_decoder_states(memory) + + mel_specgram_lengths = torch.zeros([batch_size], dtype=torch.int32, device=device) + finished = torch.zeros([batch_size], dtype=torch.bool, device=device) + mel_specgrams: List[Tensor] = [] + gate_outputs: List[Tensor] = [] + alignments: List[Tensor] = [] + for _ in range(self.decoder_max_step): + decoder_input = self.prenet(decoder_input) + ( + mel_specgram, + gate_output, + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + ) = self.decode( + decoder_input, + attention_hidden, + attention_cell, + decoder_hidden, + decoder_cell, + attention_weights, + attention_weights_cum, + attention_context, + memory, + processed_memory, + mask, + ) + + mel_specgrams.append(mel_specgram.unsqueeze(0)) + gate_outputs.append(gate_output.transpose(0, 1)) + alignments.append(attention_weights) + mel_specgram_lengths[~finished] += 1 + + finished |= torch.sigmoid(gate_output.squeeze(1)) > self.gate_threshold + if self.decoder_early_stopping and torch.all(finished): + break + + decoder_input = mel_specgram + + if len(mel_specgrams) == self.decoder_max_step: + warnings.warn( + "Reached max decoder steps. The generated spectrogram might not cover " "the whole transcript." + ) + + mel_specgrams = torch.cat(mel_specgrams, dim=0) + gate_outputs = torch.cat(gate_outputs, dim=0) + alignments = torch.cat(alignments, dim=0) + + mel_specgrams, gate_outputs, alignments = self._parse_decoder_outputs(mel_specgrams, gate_outputs, alignments) + + return mel_specgrams, mel_specgram_lengths, gate_outputs, alignments + + +class Tacotron2(nn.Module): + r"""Tacotron2 model from *Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions* + :cite:`shen2018natural` based on the implementation from + `Nvidia Deep Learning Examples `_. + + See Also: + * :class:`torchaudio.pipelines.Tacotron2TTSBundle`: TTS pipeline with pretrained model. + + Args: + mask_padding (bool, optional): Use mask padding (Default: ``False``). + n_mels (int, optional): Number of mel bins (Default: ``80``). + n_symbol (int, optional): Number of symbols for the input text (Default: ``148``). + n_frames_per_step (int, optional): Number of frames processed per step, only 1 is supported (Default: ``1``). + symbol_embedding_dim (int, optional): Input embedding dimension (Default: ``512``). + encoder_n_convolution (int, optional): Number of encoder convolutions (Default: ``3``). + encoder_kernel_size (int, optional): Encoder kernel size (Default: ``5``). + encoder_embedding_dim (int, optional): Encoder embedding dimension (Default: ``512``). + decoder_rnn_dim (int, optional): Number of units in decoder LSTM (Default: ``1024``). + decoder_max_step (int, optional): Maximum number of output mel spectrograms (Default: ``2000``). + decoder_dropout (float, optional): Dropout probability for decoder LSTM (Default: ``0.1``). + decoder_early_stopping (bool, optional): Continue decoding after all samples are finished (Default: ``True``). + attention_rnn_dim (int, optional): Number of units in attention LSTM (Default: ``1024``). + attention_hidden_dim (int, optional): Dimension of attention hidden representation (Default: ``128``). + attention_location_n_filter (int, optional): Number of filters for attention model (Default: ``32``). + attention_location_kernel_size (int, optional): Kernel size for attention model (Default: ``31``). + attention_dropout (float, optional): Dropout probability for attention LSTM (Default: ``0.1``). + prenet_dim (int, optional): Number of ReLU units in prenet layers (Default: ``256``). + postnet_n_convolution (int, optional): Number of postnet convolutions (Default: ``5``). + postnet_kernel_size (int, optional): Postnet kernel size (Default: ``5``). + postnet_embedding_dim (int, optional): Postnet embedding dimension (Default: ``512``). + gate_threshold (float, optional): Probability threshold for stop token (Default: ``0.5``). + """ + + def __init__( + self, + mask_padding: bool = False, + n_mels: int = 80, + n_symbol: int = 148, + n_frames_per_step: int = 1, + symbol_embedding_dim: int = 512, + encoder_embedding_dim: int = 512, + encoder_n_convolution: int = 3, + encoder_kernel_size: int = 5, + decoder_rnn_dim: int = 1024, + decoder_max_step: int = 2000, + decoder_dropout: float = 0.1, + decoder_early_stopping: bool = True, + attention_rnn_dim: int = 1024, + attention_hidden_dim: int = 128, + attention_location_n_filter: int = 32, + attention_location_kernel_size: int = 31, + attention_dropout: float = 0.1, + prenet_dim: int = 256, + postnet_n_convolution: int = 5, + postnet_kernel_size: int = 5, + postnet_embedding_dim: int = 512, + gate_threshold: float = 0.5, + ) -> None: + super().__init__() + + self.mask_padding = mask_padding + self.n_mels = n_mels + self.n_frames_per_step = n_frames_per_step + self.embedding = nn.Embedding(n_symbol, symbol_embedding_dim) + torch.nn.init.xavier_uniform_(self.embedding.weight) + self.encoder = _Encoder(encoder_embedding_dim, encoder_n_convolution, encoder_kernel_size) + self.decoder = _Decoder( + n_mels, + n_frames_per_step, + encoder_embedding_dim, + decoder_rnn_dim, + decoder_max_step, + decoder_dropout, + decoder_early_stopping, + attention_rnn_dim, + attention_hidden_dim, + attention_location_n_filter, + attention_location_kernel_size, + attention_dropout, + prenet_dim, + gate_threshold, + ) + self.postnet = _Postnet(n_mels, postnet_embedding_dim, postnet_kernel_size, postnet_n_convolution) + + def forward( + self, + tokens: Tensor, + token_lengths: Tensor, + mel_specgram: Tensor, + mel_specgram_lengths: Tensor, + ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + r"""Pass the input through the Tacotron2 model. This is in teacher + forcing mode, which is generally used for training. + + The input ``tokens`` should be padded with zeros to length max of ``token_lengths``. + The input ``mel_specgram`` should be padded with zeros to length max of ``mel_specgram_lengths``. + + Args: + tokens (Tensor): The input tokens to Tacotron2 with shape `(n_batch, max of token_lengths)`. + token_lengths (Tensor): The valid length of each sample in ``tokens`` with shape `(n_batch, )`. + mel_specgram (Tensor): The target mel spectrogram + with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. + mel_specgram_lengths (Tensor): The length of each mel spectrogram with shape `(n_batch, )`. + + Returns: + [Tensor, Tensor, Tensor, Tensor]: + Tensor + Mel spectrogram before Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. + Tensor + Mel spectrogram after Postnet with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. + Tensor + The output for stop token at each time step with shape `(n_batch, max of mel_specgram_lengths)`. + Tensor + Sequence of attention weights from the decoder with + shape `(n_batch, max of mel_specgram_lengths, max of token_lengths)`. + """ + + embedded_inputs = self.embedding(tokens).transpose(1, 2) + + encoder_outputs = self.encoder(embedded_inputs, token_lengths) + mel_specgram, gate_outputs, alignments = self.decoder( + encoder_outputs, mel_specgram, memory_lengths=token_lengths + ) + + mel_specgram_postnet = self.postnet(mel_specgram) + mel_specgram_postnet = mel_specgram + mel_specgram_postnet + + if self.mask_padding: + mask = _get_mask_from_lengths(mel_specgram_lengths) + mask = mask.expand(self.n_mels, mask.size(0), mask.size(1)) + mask = mask.permute(1, 0, 2) + + mel_specgram.masked_fill_(mask, 0.0) + mel_specgram_postnet.masked_fill_(mask, 0.0) + gate_outputs.masked_fill_(mask[:, 0, :], 1e3) + + return mel_specgram, mel_specgram_postnet, gate_outputs, alignments + + @torch.jit.export + def infer(self, tokens: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Tensor, Tensor]: + r"""Using Tacotron2 for inference. The input is a batch of encoded + sentences (``tokens``) and its corresponding lengths (``lengths``). The + output is the generated mel spectrograms, its corresponding lengths, and + the attention weights from the decoder. + + The input `tokens` should be padded with zeros to length max of ``lengths``. + + Args: + tokens (Tensor): The input tokens to Tacotron2 with shape `(n_batch, max of lengths)`. + lengths (Tensor or None, optional): + The valid length of each sample in ``tokens`` with shape `(n_batch, )`. + If ``None``, it is assumed that the all the tokens are valid. Default: ``None`` + + Returns: + (Tensor, Tensor, Tensor): + Tensor + The predicted mel spectrogram with shape `(n_batch, n_mels, max of mel_specgram_lengths)`. + Tensor + The length of the predicted mel spectrogram with shape `(n_batch, )`. + Tensor + Sequence of attention weights from the decoder with shape + `(n_batch, max of mel_specgram_lengths, max of lengths)`. + """ + n_batch, max_length = tokens.shape + if lengths is None: + lengths = torch.tensor([max_length]).expand(n_batch).to(tokens.device, tokens.dtype) + + assert lengths is not None # For TorchScript compiler + embedded_inputs = self.embedding(tokens).transpose(1, 2) + encoder_outputs = self.encoder(embedded_inputs, lengths) + mel_specgram, mel_specgram_lengths, _, alignments = self.decoder.infer(encoder_outputs, lengths) + + mel_outputs_postnet = self.postnet(mel_specgram) + mel_outputs_postnet = mel_specgram + mel_outputs_postnet + + alignments = alignments.unfold(1, n_batch, n_batch).transpose(0, 2) + + return mel_outputs_postnet, mel_specgram_lengths, alignments diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2letter.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2letter.py new file mode 100644 index 0000000000000000000000000000000000000000..d776131686d1f65982a565088e72e45e7b7c107f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2letter.py @@ -0,0 +1,72 @@ +from torch import nn, Tensor + +__all__ = [ + "Wav2Letter", +] + + +class Wav2Letter(nn.Module): + r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech + Recognition System* :cite:`collobert2016wav2letter`. + + See Also: + * `Training example `__ + + Args: + num_classes (int, optional): Number of classes to be classified. (Default: ``40``) + input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum`` + or ``mfcc`` (Default: ``waveform``). + num_features (int, optional): Number of input features that the network will receive (Default: ``1``). + """ + + def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None: + super().__init__() + + acoustic_num_features = 250 if input_type == "waveform" else num_features + acoustic_model = nn.Sequential( + nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0), + nn.ReLU(inplace=True), + ) + + if input_type == "waveform": + waveform_model = nn.Sequential( + nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45), + nn.ReLU(inplace=True), + ) + self.acoustic_model = nn.Sequential(waveform_model, acoustic_model) + + if input_type in ["power_spectrum", "mfcc"]: + self.acoustic_model = acoustic_model + + def forward(self, x: Tensor) -> Tensor: + r""" + Args: + x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length). + + Returns: + Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length). + """ + + x = self.acoustic_model(x) + x = nn.functional.log_softmax(x, dim=1) + return x diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb83403f5719b68c790d2f9f934f8c80acea3557 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__init__.py @@ -0,0 +1,45 @@ +from . import utils +from .model import ( + hubert_base, + hubert_large, + hubert_pretrain_base, + hubert_pretrain_large, + hubert_pretrain_model, + hubert_pretrain_xlarge, + hubert_xlarge, + HuBERTPretrainModel, + wav2vec2_base, + wav2vec2_large, + wav2vec2_large_lv60k, + wav2vec2_model, + wav2vec2_xlsr_1b, + wav2vec2_xlsr_2b, + wav2vec2_xlsr_300m, + Wav2Vec2Model, + wavlm_base, + wavlm_large, + wavlm_model, +) + +__all__ = [ + "Wav2Vec2Model", + "HuBERTPretrainModel", + "wavlm_model", + "wavlm_base", + "wavlm_large", + "wav2vec2_model", + "wav2vec2_base", + "wav2vec2_large", + "wav2vec2_large_lv60k", + "hubert_base", + "hubert_large", + "hubert_xlarge", + "hubert_pretrain_model", + "hubert_pretrain_base", + "hubert_pretrain_large", + "hubert_pretrain_xlarge", + "utils", + "wav2vec2_xlsr_300m", + "wav2vec2_xlsr_1b", + "wav2vec2_xlsr_2b", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c652c81ed4801a9c96fed4cc0e636e935860f402 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/components.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/components.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edab28b02c55ed34a66b2021e65f5c572618aa1c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/components.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c1131ea9bb8f5f2b915bb6530a44000208d6ce8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/wavlm_attention.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/wavlm_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3e59d27848b31b98e04b0c7a8a9e8e73214afb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/__pycache__/wavlm_attention.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/components.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/components.py new file mode 100644 index 0000000000000000000000000000000000000000..480a6ae50921efebf5930dc21caaa3a1a44945dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/components.py @@ -0,0 +1,1167 @@ +import logging +from typing import List, Optional, Tuple + +import torch +from torch import nn, Tensor +from torch.nn import Module, Parameter + +from .wavlm_attention import WavLMSelfAttention + +_LG = logging.getLogger(__name__) + + +def _init_transformer_params(module): + """ + Initialize the weights of Transformer module in Wav2Vec2/HuBERT. + + If the module is ``nn.Linear``, normalize the weight with mean 0 and standard deviation 0.02. + If ``bias`` is set to ``True`` in the module, set ``bias`` to 0. + + If the module is ``nn.Embedding``, normalize the weight with mean 0 and standard deviation 0.02. + If ``padding_idx`` is not None, set the weight of padding to 0. + + Note: + Ths method corresponds to + `init_bert_params + `__ + in the original ``fairseq`` implementation. + """ + + def normal_(data): + data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) + + if isinstance(module, nn.Linear): + normal_(module.weight.data) + if module.bias is not None: + module.bias.data.zero_() + if isinstance(module, nn.Embedding): + normal_(module.weight.data) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +class LayerNorm(nn.LayerNorm): + """Layer norm with transpose""" + + def forward(self, input: Tensor) -> Tensor: + x = input.transpose(-2, -1) + x = nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + x = x.transpose(-2, -1) + return x + + +class ConvLayerBlock(Module): + """Convolution unit of FeatureExtractor""" + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int, + bias: bool, + layer_norm: Optional[Module], + ): + super().__init__() + self.kernel_size = kernel_size + self.stride = stride + self.layer_norm = layer_norm + self.conv = nn.Conv1d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + bias=bias, + ) + + def forward( + self, + x: Tensor, + length: Optional[Tensor], + ) -> Tuple[Tensor, Optional[Tensor]]: + """ + Args: + x (Tensor): Shape: ``[batch, in_channels, in_frame]``. + length (Tensor or None, optional): Shape ``[batch, ]``. + Returns: + Tensor: Shape ``[batch, out_channels, out_frames]``. + Optional[Tensor]: Shape ``[batch, ]``. + """ + x = self.conv(x) + if self.layer_norm is not None: + x = self.layer_norm(x) + x = nn.functional.gelu(x) + + if length is not None: + length = torch.div(length - self.kernel_size, self.stride, rounding_mode="floor") + 1 + # When input length is 0, the resulting length can be negative. So fix it here. + length = torch.max(torch.zeros_like(length), length) + return x, length + + +class FeatureExtractor(Module): + """Extract features from audio + + Args: + conv_layers (nn.ModuleList): + convolution layers + """ + + def __init__( + self, + conv_layers: nn.ModuleList, + ): + super().__init__() + self.conv_layers = conv_layers + + def forward( + self, + x: Tensor, + length: Optional[Tensor], + ) -> Tuple[Tensor, Optional[Tensor]]: + """ + Args: + x (Tensor): + Input Tensor representing a batch of audio, + shape: ``[batch, time]``. + length (Tensor or None, optional): + Valid length of each input sample. shape: ``[batch, ]``. + + Returns: + Tensor: + The resulting feature, shape: ``[batch, frame, feature]`` + Optional[Tensor]: + Valid length of each output sample. shape: ``[batch, ]``. + """ + if x.ndim != 2: + raise ValueError(f"Expected the input Tensor to be 2D (batch, time). Found: {list(x.shape)}") + + x = x.unsqueeze(1) # (batch, channel==1, frame) + for layer in self.conv_layers: + x, length = layer(x, length) # (batch, feature, frame) + x = x.transpose(1, 2) # (batch, frame, feature) + return x, length + + +class FeatureProjection(Module): + """Layer that connects FeatureExtractor and Encoder + + Projects features to encoder dimension. + + Args: + in_features (int): Input feature dim. + out_features (int): Output feature dim. + dropout (float): Dropout probability. + """ + + def __init__( + self, + in_features: int, + out_features: int, + dropout: float, + ): + super().__init__() + self.layer_norm = nn.LayerNorm(in_features) + self.projection = nn.Linear( + in_features, + out_features, + ) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + """ + Args: + x (Tensor): + Feature Tensor. shape: ``[batch, frame, in_feature]`` + Returns: + Tensor: Projected features. ``[batch, frame, out_feature]``. + """ + x = self.layer_norm(x) + x = self.projection(x) + x = self.dropout(x) + return x + + +class ConvolutionalPositionalEmbedding(Module): + """Positional embedding which is placed at the beginning of Transformer. + + Args: + embed_dim (int): Feature dimension of the input Tensor. + kernel_size (int): The number of frames to be use. + groups (int): The number of groups in feature dimensions. + """ + + def __init__( + self, + embed_dim: int, + kernel_size: int, + groups: int, + ): + super().__init__() + self.embed_dim = embed_dim + self.kernel_size = kernel_size + self.conv = nn.Conv1d( + in_channels=embed_dim, + out_channels=embed_dim, + kernel_size=kernel_size, + padding=kernel_size // 2, + groups=groups, + ) + + self.conv = nn.utils.parametrizations.weight_norm(self.conv, name="weight", dim=2) + self.num_remove: int = 1 if kernel_size % 2 == 0 else 0 + + def __prepare_scriptable__(self): + if self.conv.__class__.__name__ == "ParametrizedConv1d": + _LG.warning("Removing weight_norm from %s", self.__class__.__name__) + torch.nn.utils.parametrize.remove_parametrizations(self.conv, "weight") + return self + + def forward(self, x): + """ + Args: + x (Tensor): shape ``[batch, frame, feature]``. + + Returns: + Tensor: The resulting feature. Shape ``[batch, frame, feature]``. + """ + x = x.transpose(-2, -1) + x = self.conv(x) + if self.num_remove > 0: + x = x[..., : -self.num_remove] + x = torch.nn.functional.gelu(x) + x = x.transpose(-2, -1) + return x + + +class SelfAttention(Module): + """Multihead Self Attention module + + Args: + embed_dim (int): Total dimension of the model. + num_heads (int): The number of heads. + dropout (float, optional): + Dropout probability on attn_output_weights. Default: ``0.0`` + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + ): + super().__init__() + head_dim = embed_dim // num_heads + if head_dim * num_heads != embed_dim: + raise ValueError(f"`embed_dim ({embed_dim})` is not divisible by `num_heads ({num_heads})`") + + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = head_dim + + self.scaling = self.head_dim**-0.5 + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True) + + def forward( + self, + x: Tensor, + attention_mask: Optional[Tensor] = None, + position_bias: Optional[Tensor] = None, + key_padding_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """ + Args: + x (Tensor): shape: ``[batch_size, sequence_length, embed_dim]``. + attention_mask (Tensor or ``None``, optional): + shape: ``[batch_size, 1, sequence_length, sequence_length]`` + position_bias: Not used. Only for the compatibility with :py:class:`WavLMSelfAttention`. + key_padding_mask (Tensor or ``None``): Not used. Only for the compatibility with + :py:class:`WavLMSelfAttention`. + Returns: + (Tensor, ``None``): The resulting attention output and ``None`` (necessary for compatibility + with :py:class:`WavLMSelAttention`). + Attention output shape: ``[batch, sequence_length, embed_dim]``. + """ + if x.ndim != 3 or x.shape[2] != self.embed_dim: + raise ValueError( + f"The expected input shape is (batch, sequence, embed_dim=={self.embed_dim}). " f"Found {x.shape}." + ) + batch_size, length, embed_dim = x.size() + if attention_mask is not None: + shape_ = (batch_size, 1, length, length) + if attention_mask.size() != shape_: + raise ValueError(f"The expected attention mask shape is {shape_}. " f"Found {attention_mask.size()}.") + + shape = (batch_size, length, self.num_heads, self.head_dim) + q = self.q_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd + k = self.k_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd + v = self.v_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd + dropout = self.dropout if self.training else 0.0 + attn_output = torch.nn.functional.scaled_dot_product_attention( + q, k, v, attn_mask=attention_mask, dropout_p=dropout, is_causal=False + ) + attn_output = attn_output.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_dim) + output = self.out_proj(attn_output) + return output, None # Necessary for compatibility with WavLMSelAttention + + +class FeedForward(Module): + """Layer that follows attention layer in encoder layer.""" + + def __init__( + self, + io_features: int, + intermediate_features: int, + intermediate_dropout: float, + output_dropout: float, + ): + super().__init__() + self.intermediate_dense = nn.Linear(io_features, intermediate_features) + self.intermediate_dropout = nn.Dropout(intermediate_dropout) + self.output_dense = nn.Linear(intermediate_features, io_features) + self.output_dropout = nn.Dropout(output_dropout) + + def forward(self, x): + """ + Args: + x (Tensor): shape: `(batch, sequence_length, io_features)` + Returns: + x (Tensor): shape: `(batch, sequence_length, io_features)` + """ + x = self.intermediate_dense(x) + x = torch.nn.functional.gelu(x) + x = self.intermediate_dropout(x) + + x = self.output_dense(x) + x = self.output_dropout(x) + return x + + +class EncoderLayer(Module): + """A layer unit in encoder. Combines multihead self attention and feed forward.""" + + def __init__( + self, + attention: Module, + dropout: float, + layer_norm_first: bool, + feed_forward: Module, + ): + super().__init__() + self.attention = attention + self.dropout = nn.Dropout(dropout) + self.layer_norm = nn.LayerNorm(attention.embed_dim) + self.layer_norm_first = layer_norm_first + self.feed_forward = feed_forward + self.final_layer_norm = nn.LayerNorm(attention.embed_dim) + + def forward( + self, + x: Tensor, + attention_mask: Optional[Tensor] = None, + position_bias: Optional[Tensor] = None, + key_padding_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """ + Args: + x (Tensor): Input of shape ``(batch, sequence_length, embed_dim)``. + attention_mask (Tensor or ``None``, optional): attention mask + of shape ``(batch, 1, sequence_length, sequence_length)``. (Default: ``None``) + position_bias (Tensor or ``None``, optional): position bias of shape + ``(batch_size * num_heads, src_len, src_len)``. + Only necessary for WavLM model, ``None`` otherwise. (Default: ``None``) + key_padding_mask (Tensor or ``None``, optional): key padding mask of shape ``(batch_size, src_len)``. + Only used for WavLM model, ignored otherwise. (Default: ``None``) + Returns: + (x, position_bias): Shapes are the same as in the input. Position bias is only relevant for WaLM model, + ``None`` otherwise. + """ + residual = x + + if self.layer_norm_first: + x = self.layer_norm(x) + + x, position_bias = self.attention( + x, attention_mask=attention_mask, position_bias=position_bias, key_padding_mask=key_padding_mask + ) + + x = self.dropout(x) + x = residual + x + + if self.layer_norm_first: + x = x + self.feed_forward(self.final_layer_norm(x)) + else: + x = self.layer_norm(x) + x = self.final_layer_norm(x + self.feed_forward(x)) + return x, position_bias + + +class Transformer(Module): + def __init__( + self, + pos_conv_embed: Module, + dropout: float, + layers: Module, + layer_norm_first: bool, + layer_drop: float, + ): + super().__init__() + self.pos_conv_embed = pos_conv_embed + self.layer_norm = nn.LayerNorm(pos_conv_embed.embed_dim) + self.layer_norm_first = layer_norm_first + self.layer_drop = layer_drop + self.dropout = nn.Dropout(dropout) + self.layers = layers + + def _preprocess(self, x: Tensor): + x = x + self.pos_conv_embed(x) + + if self.layer_norm_first: + x = self.layer_norm(x) + + x = self.dropout(x) + return x + + def forward( + self, + x: Tensor, + attention_mask: Optional[Tensor] = None, + position_bias: Optional[Tensor] = None, + ) -> Tensor: + x = self._preprocess(x) + for layer in self.layers: + if not (self.training and torch.rand(1).item() <= self.layer_drop): + x, position_bias = layer(x, attention_mask, position_bias=position_bias) + + if not self.layer_norm_first: + x = self.layer_norm(x) + return x + + def get_intermediate_outputs( + self, + x: Tensor, + attention_mask: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> List[Tensor]: + if num_layers is not None: + if not 0 < num_layers <= len(self.layers): + raise ValueError(f"`num_layers` must be between [1, {len(self.layers)}]") + + ret: List[Tensor] = [] + position_bias = None + x = self._preprocess(x) + for layer in self.layers: + x, position_bias = layer(x, attention_mask, position_bias=position_bias) + ret.append(x) + if num_layers is not None and len(ret) >= num_layers: + return ret + return ret + + +class Encoder(Module): + def __init__( + self, + feature_projection: Module, + transformer: Module, + ): + super().__init__() + self.feature_projection = feature_projection + self.transformer = transformer + + def _preprocess( + self, + features: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + x = self.feature_projection(features) + + mask: Optional[Tensor] = None + if lengths is not None: + batch_size, max_len, _ = x.shape + # create mask for padded elements and zero-out them + mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None] + x[mask] = 0.0 + # extend the mask to attention shape and set weight + mask = -10000.0 * mask[:, None, None, :].to(dtype=features.dtype) + mask = mask.expand(batch_size, 1, max_len, max_len) + return x, mask + + def forward( + self, + features: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tensor: + x, mask = self._preprocess(features, lengths) + x = self.transformer(x, attention_mask=mask) + return x + + def extract_features( + self, + features: Tensor, + lengths: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> List[Tensor]: + x, masks = self._preprocess(features, lengths) + return self.transformer.get_intermediate_outputs(x, attention_mask=masks, num_layers=num_layers) + + +################################################################################ +def _get_feature_extractor( + norm_mode: str, + shapes: List[Tuple[int, int, int]], + bias: bool, +) -> FeatureExtractor: + """ + Args: + norm_mode (str): + Either "group_norm" or "layer_norm". + If "group_norm", then a single normalization is applied + in the first convolution block. Otherwise, all the convolution + blocks will have layer normalization. + This option corresponds to "extractor_mode" from fairseq. + Expected values are "group_norm" for Base arch, and + "layer_norm" for Large arch. + shapes (list of tuple of int): + Configuration of convolution layers. List of convolution configuration, + i.e. ``[(output_channel, kernel_size, stride), ...]`` + This option corresponds to "conv_feature_layers" from fairseq. + Expected values are + ``[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2`` + for all the architectures. + bias (bool): + Whether to include bias term to each convolution operation. + This option corresponds to "conv_bias" from fairseq. + Expected values are False for Base arch, and True for Large arch. + + See Also: + * Original implementation + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L666-L733 + * "extractor_mode" + - Def and base: + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L38-L45 + - Large: + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L52 + * "conv_feature_layers" + - Def, base and large: + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L94-L100 + * "conv_bias" + - Def and base: + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L101-L103 + - Large: + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L61 + """ + if norm_mode not in ["group_norm", "layer_norm"]: + raise ValueError("Invalid norm mode") + blocks = [] + in_channels = 1 + for i, (out_channels, kernel_size, stride) in enumerate(shapes): + normalization = None + if norm_mode == "group_norm" and i == 0: + normalization = nn.GroupNorm( + num_groups=out_channels, + num_channels=out_channels, + affine=True, + ) + elif norm_mode == "layer_norm": + normalization = LayerNorm( + normalized_shape=out_channels, + elementwise_affine=True, + ) + blocks.append( + ConvLayerBlock( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + bias=bias, + layer_norm=normalization, + ) + ) + in_channels = out_channels + return FeatureExtractor(nn.ModuleList(blocks)) + + +def _get_encoder( + in_features: int, + embed_dim: int, + dropout_input: float, + pos_conv_kernel: int, + pos_conv_groups: int, + num_layers: int, + num_heads: int, + attention_dropout: float, + ff_interm_features: int, + ff_interm_dropout: float, + dropout: float, + layer_norm_first: bool, + layer_drop: float, +) -> Encoder: + """ + Args: + in_features (int): The number of input features. + embed_dim (int): + The dimension of embedding. + This option corresponds to "encoder_embed_dim" from fairseq. + Expected values are 768 for Base arch, and 1024 for Large arch. + dropout_input (float): + The dropout probability applied after the input feature is projected + to ``embed_dim``. + This option corresponds to "dropout_input" from fairseq. + Expected values are 0.1 for both Base and Large arch. + pos_conv_kernel (int): + The kernel size of convolutional positional embeddings. + This option corresponds to "conv_pos" from fairseq. + Expected values are 128 for both Base and Large arch. + pos_conv_groups (int): + The number of groups of convolutional positional embeddings. + This option corresponds to "conv_pos_groups" from fairseq. + Expected values are 16 for both Base and Large arch. + num_layers (int): + The number of self attention layers in transformer block. + This option corresponds to "encoder_layers" from fairseq. + Expected values are 12 for Base and 24 for Large arch. + num_heads (int): + The number of heads in self attention layers. + This option corresponds to "encoder_attention_heads" from fairseq. + Expected values are 12 for Base and 16 for Large arch. + attention_dropout (float): + The dropout probability applied after softmax in self-attention layer. + This option corresponds to "attention_dropout" from fairseq. + Expected values are 0.1 for Base and 0.0 for Large arch. + ff_interm_features (int): + The dimension of hidden features in feed forward layer. + This option corresponds to "encoder_ffn_embed_dim" from fairseq. + Expected values are 3072 for Base and 4096 for Large arch. + ff_interm_dropout (float): + The dropout probability applied in feedforward layer. + This option correspinds to "activation_dropout" from fairseq. + Expected values are 0.1 for both Base and Large arch. + dropout (float): + The dropout probability applied at the end of feed forward layer. + This option corresponds to "dropout" from fairseq. + Expected values are 0.1 for Base and 0.0 for Large arch. + layer_norm_first (bool): + Control the order of layer norm in transformer layer and each encoder layer. + If True, in transformer layer, layer norm is applied before features are fed + to encoder layers. In encoder layer, two layer norms are applied before and after + self attention. + If False, in transformer layer, layer norm is applied after features are fed + to encoder layers. In encoder layer, two layer norms are applied after self + attention, before and after feed forward. + This option corresponds to "layer_norm_first" from fairseq. + Expected values are False for Base and True for Large arch. + layer_drop (float): + Probability to drop each encoder layer during training. + This option corresponds to "layerdrop" from fairseq. + Expected values are 0.1 for both Base and Large arch. + + See Also: + * "encoder_embed_dim" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64 + * "dropout_input" + - Def, base and large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78 + * "conv_pos" + - Def, base and large + NOTE: The description is wrong. + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207 + - Usage + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756 + * "conv_pos_groups" + - Def, base and large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211 + * "encoder_layers" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63 + * "encoder_attention_heads" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66 + * "attention_dropout" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60 + * "encoder_ffn_embed_dim" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65 + * "activation_dropout" + - Def + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71 + - Base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55 + * "dropout" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59 + * "layer_norm_first" + - Def and base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53 + * "layerdrop" + - Def + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74 + - Base + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54 + - Large + https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54 + """ + feature_projection = FeatureProjection(in_features, embed_dim, dropout_input) + pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups) + + # Original impl + # https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782 + encoder_layers = nn.ModuleList() + for _ in range(num_layers): + attention = SelfAttention( + embed_dim=embed_dim, + num_heads=num_heads, + dropout=attention_dropout, + ) + feed_forward = FeedForward( + io_features=embed_dim, + intermediate_features=ff_interm_features, + intermediate_dropout=ff_interm_dropout, + output_dropout=dropout, + ) + encoder_layers.append( + EncoderLayer( + attention=attention, + dropout=dropout, + layer_norm_first=layer_norm_first, + feed_forward=feed_forward, + ) + ) + transformer = Transformer( + pos_conv_embed=pos_conv, + dropout=dropout, + layers=encoder_layers, + layer_norm_first=not layer_norm_first, + layer_drop=layer_drop, + ) + return Encoder(feature_projection, transformer) + + +def _get_wavlm_encoder( + in_features: int, + embed_dim: int, + dropout_input: float, + pos_conv_kernel: int, + pos_conv_groups: int, + num_layers: int, + num_heads: int, + num_buckets: int, + max_distance: int, + attention_dropout: float, + ff_interm_features: int, + ff_interm_dropout: float, + dropout: float, + layer_norm_first: bool, + layer_drop: float, +) -> Encoder: + """ + Construct encoder for WavLM model :cite:`chen2022wavlm`. The structure of the encoder and most of the argments are + the same as in :py:func:`_get_encoder` so refer there for documentation. The only difference from Wav2Vec2 encoder + is usage of `WavLMSelfAttention` instead of `SelfAttention` and two additional parameters: `num_buckets` and + `max_distance`. + Args: + in_features (int): See :py:func:`_get_encoder`. + embed_dim (int): See :py:func:`_get_encoder`. + dropout_input (float): See :py:func:`_get_encoder`. + pos_conv_kernel (int): See :py:func:`_get_encoder`. + pos_conv_groups (int): See :py:func:`_get_encoder`. + num_layers (int): See :py:func:`_get_encoder`. + num_heads (int): See :py:func:`_get_encoder`. + num_buckets (int): Number of buckets for relative position embedding. + max_distance (int): Maximum distance for relative position embedding. + attention_dropout (float): See :py:func:`_get_encoder`. + ff_interm_features (int): See :py:func:`_get_encoder`. + ff_interm_dropout (float): See :py:func:`_get_encoder`. + dropout (float): See :py:func:`_get_encoder`. + layer_norm_first (bool): See :py:func:`_get_encoder`. + layer_drop (float): See :py:func:`_get_encoder`. + + """ + feature_projection = FeatureProjection(in_features, embed_dim, dropout_input) + pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups) + + # Original impl + # https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782 + encoder_layers = nn.ModuleList() + for i in range(num_layers): + attention = WavLMSelfAttention( + embed_dim=embed_dim, + num_heads=num_heads, + num_buckets=num_buckets, + max_distance=max_distance, + dropout=attention_dropout, + has_relative_attention_bias=(i == 0), # Position embedding is only necessary in the first layer. + ) + feed_forward = FeedForward( + io_features=embed_dim, + intermediate_features=ff_interm_features, + intermediate_dropout=ff_interm_dropout, + output_dropout=dropout, + ) + encoder_layers.append( + EncoderLayer( + attention=attention, + dropout=dropout, + layer_norm_first=layer_norm_first, + feed_forward=feed_forward, + ) + ) + transformer = Transformer( + pos_conv_embed=pos_conv, + dropout=dropout, + layers=encoder_layers, + layer_norm_first=not layer_norm_first, + layer_drop=layer_drop, + ) + return Encoder(feature_projection, transformer) + + +def _compute_mask_indices( + shape: Tuple[int, int], + padding_mask: Optional[Tensor], + mask_prob: float, + mask_length: int, + mask_type: str = "static", + mask_other: float = 0.0, + min_masks: int = 0, + no_overlap: bool = False, + min_space: int = 0, +) -> Tensor: + """Computes random mask spans for a given shape. + Args: + shape (int, int): The shape for which to compute masks. + The first element is batch size and second is the number of frames. + padding_mask (Tensor or None): The padding mask of the same dimension as shape, + which will prevent masking padded elements. + mask_prob (float): Probability for each token to be chosen as start of the span to be masked. + This will be multiplied by number of timesteps divided by length of mask span to mask + approximately this percentage of all elements. However due to overlaps, the actual number + will be smaller (unless no_overlap is True). + mask_type (str): How to compute mask lengths. Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + ``static``: Fixed size + ``uniform``: Sample from uniform distribution [mask_other, mask_length*2] + ``normal``: Sample from normal distribution with mean ``mask_length`` and stdev ``mask_other``. + ``poisson``: Sample from possion distribution with lambda = ``mask_length``. + min_masks (int): Minimum number of masked spans. + no_overlap (bool): If false, will switch to an alternative recursive algorithm + that prevents spans from overlapping. + min_space (int): How many frames to keep unmasked between spans (Only used if no_overlap is True). + + Returns: + (Tensor): The mask indices of dimension `[batch, frame]`. + """ + + batch_size, frame = shape + mask = torch.full((batch_size, frame), False) + # add a random number for probabilistic rounding + all_num_mask = int(mask_prob * frame / float(mask_length) + torch.rand(1)) + + all_num_mask = max(min_masks, all_num_mask) + + mask_idcs = [] + for i in range(batch_size): + if padding_mask is not None: + sz = frame - padding_mask[i].long().sum().item() + # add a random number for probabilistic rounding + num_mask = int(mask_prob * sz / float(mask_length) + torch.rand(1)) + num_mask = max(min_masks, num_mask) + else: + sz = frame + num_mask = all_num_mask + + if mask_type == "static": + lengths = torch.full((num_mask,), mask_length) + elif mask_type == "uniform": + lengths = torch.randint(int(mask_other), mask_length * 2 + 1, size=(num_mask,)) + elif mask_type == "normal": + lengths = torch.normal(mask_length, mask_other, size=(num_mask,)) + lengths = torch.maximum(torch.ones(1), torch.round(lengths)).int() + elif mask_type == "poisson": + lengths = torch.poisson(mask_length, size=(num_mask,)) + lengths = torch.round(lengths).int() + else: + raise Exception(f"unknown mask selection: {mask_type}") + + if sum(lengths) == 0: + lengths[0] = min(mask_length, sz - 1) + + if no_overlap: + mask_idc = [] + + def arrange(s, e, length, keep_length): + span_start = torch.randint(s, e - length, size=(1,)) + mask_idc.extend(span_start + i for i in range(length)) + + new_parts = [] + if span_start - s - min_space >= keep_length: + new_parts.append((s, span_start - min_space + 1)) + if e - span_start - keep_length - min_space > keep_length: + new_parts.append((span_start + length + min_space, e)) + return new_parts + + parts = [(0, sz)] + min_length = min(lengths) + for length in sorted(lengths, reverse=True): + lens = torch.tensor([e - s for s, e in parts], dtype=torch.int) + lens[lens < length + min_space] = 0 + l_sum = lens.sum() + if l_sum == 0: + break + probs = lens / l_sum + c = torch.distributions.categorical.Categorical(probs).sample() + s, e = parts.pop(c) + parts.extend(arrange(s, e, length, min_length)) + mask_idc = torch.tensor(mask_idc) + else: + min_len = min(lengths) + if sz - min_len <= num_mask: + min_len = sz - num_mask - 1 + + mask_idc = torch.randperm(sz - min_len)[:num_mask] + mask_idc = torch.tensor( + [mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j])] + ) + + mask_idcs.append(torch.unique(mask_idc[mask_idc < sz])) + + min_len = min([len(m) for m in mask_idcs]) + for i, mask_idc in enumerate(mask_idcs): + if len(mask_idc) > min_len: + mask_idc = mask_idc[torch.randperm(len(mask_idc))[:min_len].long()] + mask[i, mask_idc] = True + + return mask + + +def _get_padding_mask(input: Tensor, lengths: Tensor) -> Tensor: + """Generate the padding mask given the padded input and the lengths Tensors. + Args: + input (Tensor): The padded Tensor of dimension `[batch, max_len, frequency]`. + lengths (Tensor): The lengths Tensor of dimension `[batch,]`. + + Returns: + (Tensor): The padding mask. + """ + batch_size, max_len, _ = input.shape + mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None] + return mask + + +class MaskGenerator(Module): + """Generate the masks for masked prediction. + Args: + encoder_embed_dim (int): The dimension of the transformer embedding output. + mask_prob (float): Probability for each token to be chosen as start of the span to be masked. + This will be multiplied by number of timesteps divided by length of mask span to mask + approximately this percentage of all elements. However due to overlaps, the actual number + will be smaller (unless no_overlap is True). + mask_selection (str): How to choose the mask length. + Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + mask_other (float): Secondary mask argument (used for more complex distributions). + mask_length (int): The lengths of the mask. + no_mask_overlap (bool): Whether to allow masks to overlap. + mask_min_space (int): Minimum space between spans (if no overlap is enabled). + mask_channel_prob (float): The probability of replacing a feature with 0. + mask_channel_selection (str): How to choose the mask length for channel masking. + Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + mask_channel_other (float): Secondary mask argument for channel masking(used for more complex distributions). + mask_channel_length (int): Minimum space between spans (if no overlap is enabled) for channel masking. + no_mask_channel_overlap (bool): Whether to allow channel masks to overlap. + mask_channel_min_space (int): Minimum space between spans for channel masking(if no overlap is enabled). + """ + + def __init__( + self, + encoder_embed_dim: int, + mask_prob: float, + mask_selection: str, + mask_other: float, + mask_length: int, + no_mask_overlap: bool, + mask_min_space: int, + mask_channel_prob: float, + mask_channel_selection: str, + mask_channel_other: float, + mask_channel_length: int, + no_mask_channel_overlap: bool, + mask_channel_min_space: int, + ): + super().__init__() + self.mask_prob = mask_prob + self.mask_selection = mask_selection + self.mask_other = mask_other + self.mask_length = mask_length + self.no_mask_overlap = no_mask_overlap + self.mask_min_space = mask_min_space + self.mask_channel_prob = mask_channel_prob + self.mask_channel_selection = mask_channel_selection + self.mask_channel_other = mask_channel_other + self.mask_channel_length = mask_channel_length + self.no_mask_channel_overlap = no_mask_channel_overlap + self.mask_channel_min_space = mask_channel_min_space + self.mask_embedding = Parameter(torch.FloatTensor(encoder_embed_dim)) + torch.nn.init.uniform_(self.mask_embedding) + + def forward(self, x: Tensor, padding_mask: Optional[Tensor]) -> Tensor: + """ + Args: + x (Tensor): The encoded representations after feature extraction module. + padding_mask (Tensor or None): The padding mask of the same dimension as shape, + which will prevent masking padded elements. + + Returns: + Tensor: The feature representations after masking. + Tensor: The generated mask indices. + """ + B, T, C = x.shape + if self.mask_prob > 0: + mask_indices = _compute_mask_indices( + (B, T), + padding_mask, + self.mask_prob, + self.mask_length, + self.mask_selection, + self.mask_other, + min_masks=2, + no_overlap=self.no_mask_overlap, + min_space=self.mask_min_space, + ) + mask_indices = mask_indices.to(x.device) + # change dtype of mask_embedding to x for mixed-precision training. + # see https://github.com/pytorch/audio/issues/2847 for details. + x[mask_indices] = self.mask_embedding.to(x.dtype) + else: + mask_indices = None + + if self.mask_channel_prob > 0: + mask_channel_indices = _compute_mask_indices( + (B, C), + None, + self.mask_channel_prob, + self.mask_channel_length, + self.mask_channel_selection, + self.mask_channel_other, + no_overlap=self.no_mask_channel_overlap, + min_space=self.mask_channel_min_space, + ) + mask_channel_indices = mask_channel_indices.to(x.device).unsqueeze(1).expand(-1, T, -1) + x[mask_channel_indices] = 0 + + return x, mask_indices + + +def _compute_logits( + proj_x: Tensor, + target: Tensor, + label_embeddings: Parameter, +) -> Tensor: + """Compute the logits of the embeddings. + Args: + proj_x (Tensor): The projected masked representations of dimension `[batch, frame, final_dim]`. + target (Tensor): The target Tensor of dimension `[batch, frame, final_dim]`. + label_embeddings (Parameter): The trainable embeddings of target of dimension `[num_class, final_dim]`. + + Returns: + (Tensor): The logits of the inputs. + """ + logit_temp = 0.1 + pos = torch.index_select(label_embeddings, 0, target.long()) + negs = label_embeddings.unsqueeze(1).expand(-1, proj_x.size(0), -1) + neg_is_pos = (pos == negs).all(-1) + pos = pos.unsqueeze(0) + targets = torch.cat([pos, negs], dim=0) + + logits = torch.cosine_similarity(proj_x.float(), targets.float(), dim=-1).type_as(proj_x) + logits /= logit_temp + if neg_is_pos.any(): + logits[1:][neg_is_pos] = float("-inf") + logits = logits.transpose(0, 1) # (num_x, num_cls+1) + return logits + + +class LogitGenerator(Module): + """Generate the logits of masked and unmasked inputs. + Args: + encoder_embed_dim (int): The dimension of the transformer embedding output. + num_classes (int): The number of classes in the labels. + final_dim (int): Project final representations and targets to `final_dim`. + skip_masked (bool): If True, skip computing losses over masked frames. + skip_nomask (bool): If True, skip computing losses over unmasked frames. + """ + + def __init__( + self, + encoder_embed_dim: int, + num_classes: int, + final_dim: int, + skip_masked: bool, + skip_nomask: bool, + ): + super().__init__() + self.label_embeddings = Parameter(torch.FloatTensor(num_classes, final_dim)) + torch.nn.init.uniform_(self.label_embeddings) + self.final_proj = torch.nn.Linear(encoder_embed_dim, final_dim) + self.skip_masked = skip_masked + self.skip_nomask = skip_nomask + + def forward(self, x: Tensor, label: Tensor, mask_m: Tensor, mask_u: Tensor) -> Tuple[Tensor, Tensor]: + """ + Args: + x (Tensor): The feature representation of the last transformer layer. + label (Tensor): The label Tensor of dimension `[batch, frame]`. + mask_m (Tensor): The masked indices of dimension `[batch, frame]`. + mask_u (Tensor): The unmasked indices of dimension `[batch, frame]`. + + Returns: + Tensor: The logits of masked frames. Tensor of dimension `[masked_frame, final_dim]`. + Tensor: The logits of unmasked frames. Tensor of dimension `[unmasked_frame, final_dim]`. + """ + proj_x = self.final_proj(x) + if self.skip_masked: + logit_m = None + else: + proj_x_m = proj_x[mask_m] + label_m = label[mask_m] + logit_m = _compute_logits(proj_x_m, label_m, self.label_embeddings) + + if self.skip_nomask: + logit_u = None + else: + proj_x_u = proj_x[mask_u] + label_u = label[mask_u] + logit_u = _compute_logits(proj_x_u, label_u, self.label_embeddings) + return logit_m, logit_u + + +class GradMultiply(torch.autograd.Function): + @staticmethod + def forward(ctx, x, scale): + ctx.scale = scale + res = x.new(x) + return res + + @staticmethod + def backward(ctx, grad): + return grad * ctx.scale, None diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/model.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/model.py new file mode 100644 index 0000000000000000000000000000000000000000..254122f0eee21906ec50f3d4238a5b3024e74a0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/model.py @@ -0,0 +1,1579 @@ +import math +from typing import List, Optional, Tuple + +import torch +from torch import Tensor +from torch.nn import Module + +from . import components + + +class Wav2Vec2Model(Module): + """Acoustic model used in *wav2vec 2.0* :cite:`baevski2020wav2vec`. + + Note: + To build the model, please use one of the factory functions. + + See Also: + * :class:`torchaudio.pipelines.Wav2Vec2Bundle`: Pretrained models (without fine-tuning) + * :class:`torchaudio.pipelines.Wav2Vec2ASRBundle`: ASR pipelines with pretrained models. + + Args: + feature_extractor (torch.nn.Module): + Feature extractor that extracts feature vectors from raw audio Tensor. + + encoder (torch.nn.Module): + Encoder that converts the audio features into the sequence of probability + distribution (in negative log-likelihood) over labels. + + aux (torch.nn.Module or None, optional): + Auxiliary module. If provided, the output from encoder is passed to this module. + """ # noqa: E501 + + def __init__( + self, + feature_extractor: Module, + encoder: Module, + aux: Optional[Module] = None, + ): + super().__init__() + self.feature_extractor = feature_extractor + self.encoder = encoder + self.aux = aux + + @torch.jit.export + def extract_features( + self, + waveforms: Tensor, + lengths: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> Tuple[List[Tensor], Optional[Tensor]]: + """Extract feature vectors from raw waveforms + + This returns the list of outputs from the intermediate layers of + transformer block in encoder. + + Args: + waveforms (Tensor): Audio tensor of shape `(batch, frames)`. + lengths (Tensor or None, optional): + Indicates the valid length of each audio in the batch. + Shape: `(batch, )`. + When the ``waveforms`` contains audios with different durations, + by providing ``lengths`` argument, the model will compute + the corresponding valid output lengths and apply proper mask in + transformer attention layer. + If ``None``, it is assumed that the entire audio waveform + length is valid. + num_layers (int or None, optional): + If given, limit the number of intermediate layers to go through. + Providing `1` will stop the computation after going through one + intermediate layers. If not given, the outputs from all the + intermediate layers are returned. + + Returns: + (List[Tensor], Optional[Tensor]): + List of Tensors + Features from requested layers. + Each Tensor is of shape: `(batch, time frame, feature dimension)` + Tensor or None + If ``lengths`` argument was provided, a Tensor of shape `(batch, )` + is returned. + It indicates the valid length in time axis of each feature Tensor. + """ + x, lengths = self.feature_extractor(waveforms, lengths) + x = self.encoder.extract_features(x, lengths, num_layers) + return x, lengths + + def forward( + self, + waveforms: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """Compute the sequence of probability distribution over labels. + + Args: + waveforms (Tensor): Audio tensor of shape `(batch, frames)`. + lengths (Tensor or None, optional): + Indicates the valid length of each audio in the batch. + Shape: `(batch, )`. + When the ``waveforms`` contains audios with different durations, + by providing ``lengths`` argument, the model will compute + the corresponding valid output lengths and apply proper mask in + transformer attention layer. + If ``None``, it is assumed that all the audio in ``waveforms`` + have valid length. Default: ``None``. + + Returns: + (Tensor, Optional[Tensor]): + Tensor + The sequences of probability distribution (in logit) over labels. + Shape: `(batch, frames, num labels)`. + Tensor or None + If ``lengths`` argument was provided, a Tensor of shape `(batch, )` + is returned. + It indicates the valid length in time axis of the output Tensor. + """ + x, lengths = self.feature_extractor(waveforms, lengths) + x = self.encoder(x, lengths) + if self.aux is not None: + x = self.aux(x) + return x, lengths + + +class HuBERTPretrainModel(Module): + """HuBERTPretrainModel() + + HuBERT model used for pretraining in *HuBERT* :cite:`hsu2021hubert`. + + Note: + To build the model, please use one of the factory functions. + + See Also: + `HuBERT Pre-training and Fine-tuning Recipes + `__ + + Args: + wav2vec2 (Wav2Vec2Model): + Wav2Vec2 encoder that generates the transformer outputs. + + mask_generator (torch.nn.Module): + Mask generator that generates the mask for masked prediction during the training. + + logit_generator (torch.nn.Module): + Logit generator that predicts the logits of the masked and unmasked inputs. + + feature_grad_mult (float or None): + The factor to scale the convolutional feature extraction layer gradients by. + If ``None``, the gradients of feature extraction layers are not affected. + The scale factor will not affect the forward pass. + """ + + def __init__( + self, + wav2vec2: Wav2Vec2Model, + mask_generator: Module, + logit_generator: Module, + feature_grad_mult: Optional[float], + ): + super().__init__() + self.wav2vec2 = wav2vec2 + self.mask_generator = mask_generator + self.logit_generator = logit_generator + if feature_grad_mult is not None and not 0.0 < feature_grad_mult < 1.0: + raise ValueError( + f"The value of `feature_grad_mult` must be ``None``or between (0, 1). Found {feature_grad_mult}" + ) + self.feature_grad_mult = feature_grad_mult + + def forward( + self, + waveforms: Tensor, + labels: Tensor, + audio_lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """Compute the sequence of probability distribution over labels. + + Args: + waveforms (Tensor): Audio tensor of dimension `[batch, frames]`. + labels (Tensor): Label for pre-training. A Tensor of dimension `[batch, frames]`. + audio_lengths (Tensor or None, optional): + Indicates the valid length of each audio in the batch. + Shape: `[batch, ]`. + When the ``waveforms`` contains audios with different durations, + by providing ``lengths`` argument, the model will compute + the corresponding valid output lengths and apply proper mask in + transformer attention layer. + If ``None``, it is assumed that all the audio in ``waveforms`` + have valid length. Default: ``None``. + + Returns: + (Tensor, Tensor, Tensor): + Tensor + The masked sequences of probability distribution (in logit). + Shape: `(masked_frames, num labels)`. + Tensor + The unmasked sequence of probability distribution (in logit). + Shape: `(unmasked_frames, num labels)`. + Tensor + The feature mean value for additional penalty loss. + Shape: `(1,)`. + """ + x, lengths = self.wav2vec2.feature_extractor(waveforms, audio_lengths) + if self.feature_grad_mult is not None and self.feature_grad_mult < 1.0: + x = components.GradMultiply.apply(x, self.feature_grad_mult) + features_pen = x.float().pow(2).mean() + if lengths is not None: + padding_mask = components._get_padding_mask(x, lengths) + else: + padding_mask = None + x, attention_mask = self.wav2vec2.encoder._preprocess(x, lengths) + x, mask = self.mask_generator(x, padding_mask) + x = self.wav2vec2.encoder.transformer(x, attention_mask=attention_mask) + if x.shape[1] != labels.shape[1]: + raise ValueError("The length of label must match that of HuBERT model output") + if padding_mask is not None: + mask_m = torch.logical_and(~padding_mask, mask) + mask_u = torch.logical_and(~padding_mask, ~mask_m) + else: + mask_m = mask + mask_u = ~mask_m + + logit_m, logit_u = self.logit_generator(x, labels, mask_m, mask_u) + + return logit_m, logit_u, features_pen + + +def wav2vec2_model( + extractor_mode: str, + extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], + extractor_conv_bias: bool, + encoder_embed_dim: int, + encoder_projection_dropout: float, + encoder_pos_conv_kernel: int, + encoder_pos_conv_groups: int, + encoder_num_layers: int, + encoder_num_heads: int, + encoder_attention_dropout: float, + encoder_ff_interm_features: int, + encoder_ff_interm_dropout: float, + encoder_dropout: float, + encoder_layer_norm_first: bool, + encoder_layer_drop: float, + aux_num_out: Optional[int], +) -> Wav2Vec2Model: + """Builds custom :class:`~torchaudio.models.Wav2Vec2Model`. + + Note: + The "feature extractor" below corresponds to + `ConvFeatureExtractionModel `__ + in the original ``fairseq`` implementation. + This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0* + :cite:`baevski2020wav2vec` paper. + + The "encoder" below corresponds to `TransformerEncoder `__, + and this is referred as "Transformer" in the paper. + + Args: + extractor_mode (str): Operation mode of feature extractor. + Valid values are ``"group_norm"`` or ``"layer_norm"``. + If ``"group_norm"``, then a single normalization is applied + in the first convolution block. Otherwise, all the convolution + blocks will have layer normalization. + + This option corresponds to ``extractor_mode`` from ``fairseq``. + extractor_conv_layer_config (list of integer tuples or None): + Configuration of convolution layers in feature extractor. + List of convolution configuration, + i.e. ``[(output_channel, kernel_size, stride), ...]`` + + If ``None`` is provided, then the following default value is used. + + .. code-block:: python + + [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ] + + This option corresponds to ``conv_feature_layers`` from ``fairseq``. + + extractor_conv_bias (bool): + Whether to include bias term to each convolution operation. + + This option corresponds to ``conv_bias`` from ``fairseq``. + + encoder_embed_dim (int): + The dimension of embedding in encoder. + + This option corresponds to ``encoder_embed_dim`` from ``fairseq``. + + encoder_projection_dropout (float): + The dropout probability applied after the input feature is projected + to ``encoder_embed_dim``. + + This option corresponds to ``dropout_input`` from ``fairseq``. + + encoder_pos_conv_kernel (int): + The kernel size of convolutional positional embeddings. + + This option corresponds to ``conv_pos`` from ``fairseq``. + + encoder_pos_conv_groups (int): + The number of groups of convolutional positional embeddings. + + This option corresponds to ``conv_pos_groups`` from ``fairseq``. + + encoder_num_layers (int): + The number of self attention layers in transformer block. + + This option corresponds to ``encoder_layers`` from ``fairseq``. + + encoder_num_heads (int): + The number of heads in self attention layers. + + This option corresponds to ``encoder_attention_heads`` from ``fairseq``. + + encoder_attention_dropout (float): + The dropout probability applied after softmax in self-attention layer. + + This option corresponds to ``attention_dropout`` from ``fairseq``. + + encoder_ff_interm_features (int): + The dimension of hidden features in feed forward layer. + + This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``. + + encoder_ff_interm_dropout (float): + The dropout probability applied in feedforward layer. + + This option correspinds to ``activation_dropout`` from ``fairseq``. + + encoder_dropout (float): + The dropout probability applied at the end of feed forward layer. + + This option corresponds to ``dropout`` from ``fairseq``. + + encoder_layer_norm_first (bool): + Control the order of layer norm in transformer layer and each encoder layer. + If True, in transformer layer, layer norm is applied before features are fed + to encoder layers. In encoder layer, two layer norms are applied before and after + self attention. + If False, in transformer layer, layer norm is applied after features are fed + to encoder layers. In encoder layer, two layer norms are applied after self + attention, before and after feed forward. + + This option corresponds to ``layer_norm_first`` from ``fairseq``. + + encoder_layer_drop (float): + Probability to drop each encoder layer during training. + + This option corresponds to ``layerdrop`` from ``fairseq``. + + aux_num_out (int or None): + When provided, attach an extra linear layer on top of encoder, which can be + used for fine-tuning. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + if extractor_conv_layer_config is None: + extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 + + feature_extractor = components._get_feature_extractor( + extractor_mode, extractor_conv_layer_config, extractor_conv_bias + ) + encoder = components._get_encoder( + in_features=extractor_conv_layer_config[-1][0], + embed_dim=encoder_embed_dim, + dropout_input=encoder_projection_dropout, + pos_conv_kernel=encoder_pos_conv_kernel, + pos_conv_groups=encoder_pos_conv_groups, + num_layers=encoder_num_layers, + num_heads=encoder_num_heads, + attention_dropout=encoder_attention_dropout, + ff_interm_features=encoder_ff_interm_features, + ff_interm_dropout=encoder_ff_interm_dropout, + dropout=encoder_dropout, + layer_norm_first=encoder_layer_norm_first, + layer_drop=encoder_layer_drop, + ) + aux = None + if aux_num_out is not None: + aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out) + return Wav2Vec2Model(feature_extractor, encoder, aux) + + +def wav2vec2_base( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.1, + encoder_ff_interm_dropout: float = 0.1, + encoder_dropout: float = 0.1, + encoder_layer_drop: float = 0.1, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "base" :class:`~torchaudio.models.Wav2Vec2Model` from *wav2vec 2.0* :cite:`baevski2020wav2vec` + + Args: + encoder_projection_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`wav2vec2_model`. + aux_num_out (int or None, optional): + See :py:func:`wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + return wav2vec2_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=768, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=12, + encoder_num_heads=12, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=3072, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=False, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def wav2vec2_large( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.1, + encoder_ff_interm_dropout: float = 0.1, + encoder_dropout: float = 0.1, + encoder_layer_drop: float = 0.1, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "large" :class:`~torchaudio.models.Wav2Vec2Model` from *wav2vec 2.0* :cite:`baevski2020wav2vec` + + Args: + encoder_projection_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`wav2vec2_model`. + aux_num_out (int or None, optional): + See :py:func:`wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + return wav2vec2_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1024, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=False, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def wav2vec2_large_lv60k( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.1, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.1, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "large lv-60k" :class:`~torchaudio.models.Wav2Vec2Model` from *wav2vec 2.0* :cite:`baevski2020wav2vec` + + Args: + encoder_projection_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`wav2vec2_model`. + aux_num_out (int or None, optional): + See :py:func:`wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + return wav2vec2_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=True, + encoder_embed_dim=1024, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def hubert_base( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.1, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.1, + encoder_layer_drop: float = 0.05, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "base" :class:`HuBERT ` from *HuBERT* :cite:`hsu2021hubert` + + Args: + encoder_projection_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`wav2vec2_model`. + aux_num_out (int or None, optional): + See :py:func:`wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + return wav2vec2_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=768, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=12, + encoder_num_heads=12, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=3072, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=False, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def hubert_large( + encoder_projection_dropout: float = 0.0, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "large" :class:`HuBERT ` from *HuBERT* :cite:`hsu2021hubert` + + Args: + encoder_projection_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`wav2vec2_model`. + aux_num_out (int or None, optional): + See :py:func:`wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + return wav2vec2_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1024, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def hubert_xlarge( + encoder_projection_dropout: float = 0.0, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "extra large" :class:`HuBERT ` from *HuBERT* :cite:`hsu2021hubert` + + Args: + encoder_projection_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_dropout (float): + See :py:func:`wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`wav2vec2_model`. + aux_num_out (int or None, optional): + See :py:func:`wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ # noqa: E501 + return wav2vec2_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1280, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=48, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=5120, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def _init_hubert_pretrain_model(module): + if isinstance(module, components.ConvLayerBlock): + torch.nn.init.kaiming_normal_(module.conv.weight) + elif isinstance(module, components.ConvolutionalPositionalEmbedding): + # normalize the weight to normal distribution. + std = math.sqrt(4.0 / (module.embed_dim * module.kernel_size)) + torch.nn.init.normal_(module.conv.weight, mean=0.0, std=std) + torch.nn.init.constant_(module.conv.bias, 0.0) + elif isinstance(module, components.SelfAttention): + # normalize the query, key, value, and out_proj parameters in self attention module. + torch.nn.init.xavier_uniform_(module.k_proj.weight, gain=1 / math.sqrt(2)) + torch.nn.init.xavier_uniform_(module.v_proj.weight, gain=1 / math.sqrt(2)) + torch.nn.init.xavier_uniform_(module.q_proj.weight, gain=1 / math.sqrt(2)) + torch.nn.init.xavier_uniform_(module.out_proj.weight) + torch.nn.init.constant_(module.out_proj.bias, 0.0) + elif isinstance(module, components.Transformer): + module.apply(components._init_transformer_params) + else: + pass + + +def hubert_pretrain_model( + extractor_mode: str, + extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], + extractor_conv_bias: bool, + encoder_embed_dim: int, + encoder_projection_dropout: float, + encoder_pos_conv_kernel: int, + encoder_pos_conv_groups: int, + encoder_num_layers: int, + encoder_num_heads: int, + encoder_attention_dropout: float, + encoder_ff_interm_features: int, + encoder_ff_interm_dropout: float, + encoder_dropout: float, + encoder_layer_norm_first: bool, + encoder_layer_drop: float, + mask_prob: float, + mask_selection: str, + mask_other: float, + mask_length: int, + no_mask_overlap: bool, + mask_min_space: int, + mask_channel_prob: float, + mask_channel_selection: str, + mask_channel_other: float, + mask_channel_length: int, + no_mask_channel_overlap: bool, + mask_channel_min_space: int, + skip_masked: bool, + skip_nomask: bool, + num_classes: int, + final_dim: int, + feature_grad_mult: Optional[float], +) -> HuBERTPretrainModel: + """Builds custom :class:`HuBERTPretrainModel` for training from scratch + + Note: + The "feature extractor" below corresponds to + `ConvFeatureExtractionModel `__ + in the original ``fairseq`` implementation. + This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0* + :cite:`baevski2020wav2vec` paper. + + The "encoder" below corresponds to `TransformerEncoder `__, + and this is referred as "Transformer" in the paper. + + Args: + extractor_mode (str): Operation mode of feature extractor. + Valid values are ``"group_norm"`` or ``"layer_norm"``. + If ``"group_norm"``, then a single normalization is applied + in the first convolution block. Otherwise, all the convolution + blocks will have layer normalization. + + This option corresponds to ``extractor_mode`` from ``fairseq``. + + extractor_conv_layer_config (list of integer tuples or None): + Configuration of convolution layers in feature extractor. + List of convolution configuration, + i.e. ``[(output_channel, kernel_size, stride), ...]`` + + If ``None`` is provided, then the following default value is used. + + .. code-block:: python + + [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ] + + This option corresponds to ``conv_feature_layers`` from ``fairseq``. + + extractor_conv_bias (bool): + Whether to include bias term to each convolution operation. + + This option corresponds to ``conv_bias`` from ``fairseq``. + + encoder_embed_dim (int): + The dimension of embedding in encoder. + + This option corresponds to ``encoder_embed_dim`` from ``fairseq``. + + encoder_projection_dropout (float): + The dropout probability applied after the input feature is projected + to ``encoder_embed_dim``. + + This option corresponds to ``dropout_input`` from ``fairseq``. + + encoder_pos_conv_kernel (int): + The kernel size of convolutional positional embeddings. + + This option corresponds to ``conv_pos`` from ``fairseq``. + + encoder_pos_conv_groups (int): + The number of groups of convolutional positional embeddings. + + This option corresponds to ``conv_pos_groups`` from ``fairseq``. + + encoder_num_layers (int): + The number of self attention layers in transformer block. + + This option corresponds to ``encoder_layers`` from ``fairseq``. + + encoder_num_heads (int): + The number of heads in self attention layers. + + This option corresponds to ``encoder_attention_heads`` from ``fairseq``. + + encoder_attention_dropout (float): + The dropout probability applied after softmax in self-attention layer. + + This option corresponds to ``attention_dropout`` from ``fairseq``. + + encoder_ff_interm_features (int): + The dimension of hidden features in feed forward layer. + + This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``. + + encoder_ff_interm_dropout (float): + The dropout probability applied in feedforward layer. + + This option correspinds to ``activation_dropout`` from ``fairseq``. + + encoder_dropout (float): + The dropout probability applied at the end of feed forward layer. + + This option corresponds to ``dropout`` from ``fairseq``. + + encoder_layer_norm_first (bool): + Control the order of layer norm in transformer layer and each encoder layer. + If True, in transformer layer, layer norm is applied before features are fed + to encoder layers. In encoder layer, two layer norms are applied before and after + self attention. + If False, in transformer layer, layer norm is applied after features are fed + to encoder layers. In encoder layer, two layer norms are applied after self + attention, before and after feed forward. + + This option corresponds to ``layer_norm_first`` from ``fairseq``. + + encoder_layer_drop (float): + Probability to drop each encoder layer during training. + + This option corresponds to ``layerdrop`` from ``fairseq``. + + mask_prob (float): + Probability for each token to be chosen as start of the span to be masked. this will be multiplied by + number of timesteps divided by length of mask span to mask approximately this percentage of all elements. + However due to overlaps, the actual number will be smaller (unless no_overlap is True). + + This option corresponds to ``mask_prob`` from ``fairseq``. + + mask_selection (str): + How to choose the mask length. Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + + This option corresponds to ``mask_selection`` from ``fairseq``. + + mask_other (float): + Secondary mask argument (used for more complex distributions). + + This option corresponds to ``mask_other`` from ``fairseq``. + + mask_length (int): + The lengths of the mask. + + This option corresponds to ``mask_length`` from ``fairseq``. + + no_mask_overlap (bool): + Whether to allow masks to overlap. + + This option corresponds to ``no_mask_overlap`` from ``fairseq``. + + mask_min_space (int): + Minimum space between spans (if no overlap is enabled). + + This option corresponds to ``mask_min_space`` from ``fairseq``. + + mask_channel_prob: (float): + The probability of replacing a feature with 0. + + This option corresponds to ``mask_channel_prob`` from ``fairseq``. + + mask_channel_selection (str): + How to choose the mask length for channel masking. Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + + This option corresponds to ``mask_channel_selection`` from ``fairseq``. + + mask_channel_other (float): + Secondary mask argument for channel masking(used for more complex distributions). + + This option corresponds to ``mask_channel_other`` from ``fairseq``. + + mask_channel_length (int): + Minimum space between spans (if no overlap is enabled) for channel masking. + + This option corresponds to ``mask_channel_length`` from ``fairseq``. + + no_mask_channel_overlap (bool): + Whether to allow channel masks to overlap. + + This option corresponds to ``no_mask_channel_overlap`` from ``fairseq``. + + mask_channel_min_space (int): + Minimum space between spans for channel masking(if no overlap is enabled). + + This option corresponds to ``mask_channel_min_space`` from ``fairseq``. + + skip_masked (bool): + If True, skip computing losses over masked frames. + + This option corresponds to ``skip_masked`` from ``fairseq``. + + skip_nomask (bool): + If True, skip computing losses over unmasked frames. + + This option corresponds to ``skip_nomask`` from ``fairseq``. + + num_classes (int): + The number of classes in the labels. + + final_dim (int): + Project final representations and targets to `final_dim`. + + This option corresponds to ``final_dim`` from ``fairseq``. + + feature_grad_mult (float or None): + The factor to scale the convolutional feature extraction layer gradients by. + The scale factor will not affect the forward pass. + + This option corresponds to ``feature_grad_mult`` from ``fairseq``. + + Returns: + HuBERTPretrainModel: + The resulting model. + """ # noqa: E501 + if extractor_conv_layer_config is None: + extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 + + feature_extractor = components._get_feature_extractor( + extractor_mode, extractor_conv_layer_config, extractor_conv_bias + ) + encoder = components._get_encoder( + in_features=extractor_conv_layer_config[-1][0], + embed_dim=encoder_embed_dim, + dropout_input=encoder_projection_dropout, + pos_conv_kernel=encoder_pos_conv_kernel, + pos_conv_groups=encoder_pos_conv_groups, + num_layers=encoder_num_layers, + num_heads=encoder_num_heads, + attention_dropout=encoder_attention_dropout, + ff_interm_features=encoder_ff_interm_features, + ff_interm_dropout=encoder_ff_interm_dropout, + dropout=encoder_dropout, + layer_norm_first=encoder_layer_norm_first, + layer_drop=encoder_layer_drop, + ) + wav2vec2 = Wav2Vec2Model(feature_extractor, encoder) + mask_generator = components.MaskGenerator( + encoder_embed_dim, + mask_prob, + mask_selection, + mask_other, + mask_length, + no_mask_overlap, + mask_min_space, + mask_channel_prob, + mask_channel_selection, + mask_channel_other, + mask_channel_length, + no_mask_channel_overlap, + mask_channel_min_space, + ) + logit_generator = components.LogitGenerator( + encoder_embed_dim, + num_classes, + final_dim, + skip_masked, + skip_nomask, + ) + model = HuBERTPretrainModel( + wav2vec2=wav2vec2, + mask_generator=mask_generator, + logit_generator=logit_generator, + feature_grad_mult=feature_grad_mult, + ) + # initialize the model for pre-training + model.apply(_init_hubert_pretrain_model) + return model + + +def hubert_pretrain_base( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.1, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.1, + encoder_layer_drop: float = 0.05, + mask_prob: float = 0.8, + mask_channel_prob: float = 0.0, + mask_channel_length: int = 10, + feature_grad_mult: Optional[float] = 0.1, + num_classes: int = 100, +) -> HuBERTPretrainModel: + """Builds "base" :class:`HuBERTPretrainModel` from *HuBERT* :cite:`hsu2021hubert` for pretraining. + + Args: + encoder_projection_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_attention_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_ff_interm_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_layer_drop (float): + See :py:func:`hubert_pretrain_model`. + mask_prob (float): + See :py:func:`hubert_pretrain_model`. + mask_channel_prob (float): + See :py:func:`hubert_pretrain_model`. + mask_channel_length (int): + See :py:func:`hubert_pretrain_model`. + feature_grad_mult (float or None): + See :py:func:`hubert_pretrain_model`. + num_classes (int, optional): + See :py:func:`hubert_pretrain_model`. + + Returns: + HuBERTPretrainModel: + The resulting model. + """ # noqa: E501 + return hubert_pretrain_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=768, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=12, + encoder_num_heads=12, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=3072, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=False, + encoder_layer_drop=encoder_layer_drop, + mask_prob=mask_prob, + mask_selection="static", + mask_other=0.0, + mask_length=10, + no_mask_overlap=False, + mask_min_space=1, + mask_channel_prob=mask_channel_prob, + mask_channel_selection="static", + mask_channel_other=0.0, + mask_channel_length=mask_channel_length, + no_mask_channel_overlap=False, + mask_channel_min_space=1, + skip_masked=False, + skip_nomask=False, + num_classes=num_classes, + final_dim=256, + feature_grad_mult=feature_grad_mult, + ) + + +def hubert_pretrain_large( + encoder_projection_dropout: float = 0.0, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + mask_prob: float = 0.8, + mask_channel_prob: float = 0.0, + mask_channel_length: int = 10, + feature_grad_mult: Optional[float] = None, +) -> HuBERTPretrainModel: + """Builds "large" :class:`HuBERTPretrainModel` from *HuBERT* :cite:`hsu2021hubert` for pretraining. + + Args: + encoder_projection_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_attention_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_ff_interm_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_layer_drop (float): + See :py:func:`hubert_pretrain_model`. + mask_prob (float): + See :py:func:`hubert_pretrain_model`. + mask_channel_prob (float): + See :py:func:`hubert_pretrain_model`. + mask_channel_length (int): + See :py:func:`hubert_pretrain_model`. + feature_grad_mult (float or None): + See :py:func:`hubert_pretrain_model`. + + Returns: + HuBERTPretrainModel: + The resulting model. + """ # noqa: E501 + return hubert_pretrain_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1024, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + mask_prob=mask_prob, + mask_selection="static", + mask_other=0.0, + mask_length=10, + no_mask_overlap=False, + mask_min_space=1, + mask_channel_prob=mask_channel_prob, + mask_channel_selection="static", + mask_channel_other=0.0, + mask_channel_length=mask_channel_length, + no_mask_channel_overlap=False, + mask_channel_min_space=1, + skip_masked=False, + skip_nomask=False, + num_classes=500, + final_dim=768, + feature_grad_mult=feature_grad_mult, + ) + + +def hubert_pretrain_xlarge( + encoder_projection_dropout: float = 0.0, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + mask_prob: float = 0.8, + mask_channel_prob: float = 0.0, + mask_channel_length: int = 10, + feature_grad_mult: Optional[float] = None, +) -> HuBERTPretrainModel: + """Builds "extra large" :class:`HuBERTPretrainModel` from *HuBERT* :cite:`hsu2021hubert` for pretraining. + + Args: + encoder_projection_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_attention_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_ff_interm_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_dropout (float): + See :py:func:`hubert_pretrain_model`. + encoder_layer_drop (float): + See :py:func:`hubert_pretrain_model`. + mask_prob (float): + See :py:func:`hubert_pretrain_model`. + mask_channel_prob (float): + See :py:func:`hubert_pretrain_model`. + mask_channel_length (int): + See :py:func:`hubert_pretrain_model`. + feature_grad_mult (float or None): + See :py:func:`hubert_pretrain_model`. + + Returns: + HuBERTPretrainModel: + The resulting model. + """ # noqa: E501 + return hubert_pretrain_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1280, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=48, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=5120, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + mask_prob=mask_prob, + mask_selection="static", + mask_other=0.0, + mask_length=10, + no_mask_overlap=False, + mask_min_space=1, + mask_channel_prob=mask_channel_prob, + mask_channel_selection="static", + mask_channel_other=0.0, + mask_channel_length=mask_channel_length, + no_mask_channel_overlap=False, + mask_channel_min_space=1, + skip_masked=False, + skip_nomask=False, + num_classes=500, + final_dim=1024, + feature_grad_mult=feature_grad_mult, + ) + + +def wavlm_model( + extractor_mode: str, + extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], + extractor_conv_bias: bool, + encoder_embed_dim: int, + encoder_projection_dropout: float, + encoder_pos_conv_kernel: int, + encoder_pos_conv_groups: int, + encoder_num_layers: int, + encoder_num_heads: int, + encoder_num_buckets: int, + encoder_max_distance: int, + encoder_attention_dropout: float, + encoder_ff_interm_features: int, + encoder_ff_interm_dropout: float, + encoder_dropout: float, + encoder_layer_norm_first: bool, + encoder_layer_drop: float, + aux_num_out: Optional[int], +) -> Wav2Vec2Model: + """Builds custom WaveLM model :cite:`chen2022wavlm`. The architecture is compatible + with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output object is + :class:`~torchaudio.models.Wav2Vec2Model`. Most of the arguments have the same meaning + as in :py:func:`~torchaudio.models.wav2vec2_model` so please refer there for documentation. + + Args: + extractor_mode (str): Operation mode of feature extractor. + See :py:func:`~torchaudio.models.wav2vec2_model`. + + extractor_conv_layer_config (list of integer tuples or None): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + extractor_conv_bias (bool): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_embed_dim (int): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_projection_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_pos_conv_kernel (int): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_pos_conv_groups (int): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_num_layers (int): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_num_heads (int): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_num_buckets (int): + Number of buckets for relative position embedding. + encoder_max_distance (int): + Maximum distance for relative position embedding. + + encoder_attention_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_ff_interm_features (int): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_ff_interm_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_layer_norm_first (bool): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + encoder_layer_drop (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + aux_num_out (int or None): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ + if extractor_conv_layer_config is None: + extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 + + feature_extractor = components._get_feature_extractor( + extractor_mode, extractor_conv_layer_config, extractor_conv_bias + ) + encoder = components._get_wavlm_encoder( + in_features=extractor_conv_layer_config[-1][0], + embed_dim=encoder_embed_dim, + dropout_input=encoder_projection_dropout, + pos_conv_kernel=encoder_pos_conv_kernel, + pos_conv_groups=encoder_pos_conv_groups, + num_layers=encoder_num_layers, + num_heads=encoder_num_heads, + num_buckets=encoder_num_buckets, + max_distance=encoder_max_distance, + attention_dropout=encoder_attention_dropout, + ff_interm_features=encoder_ff_interm_features, + ff_interm_dropout=encoder_ff_interm_dropout, + dropout=encoder_dropout, + layer_norm_first=encoder_layer_norm_first, + layer_drop=encoder_layer_drop, + ) + aux = None + if aux_num_out is not None: + aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out) + return Wav2Vec2Model(feature_extractor, encoder, aux) + + +def wavlm_base( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.1, + encoder_ff_interm_dropout: float = 0.1, + encoder_dropout: float = 0.1, + encoder_layer_drop: float = 0.1, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "base" WaveLM model :cite:`chen2022wavlm`. The architecture is compatible + with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is + :class:`~torchaudio.models.Wav2Vec2Model`. + + Args: + encoder_projection_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + aux_num_out (int, optional): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ + return wavlm_model( + extractor_mode="group_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=768, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=12, + encoder_num_heads=12, + encoder_num_buckets=320, + encoder_max_distance=800, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=3072, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=False, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def wavlm_large( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.1, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.1, + encoder_layer_drop: float = 0.1, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds "large" WaveLM model :cite:`chen2022wavlm`. The architecture is compatible + with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is + :class:`~torchaudio.models.Wav2Vec2Model`. + + Args: + encoder_projection_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + aux_num_out (int, optional): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ + return wavlm_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=False, + encoder_embed_dim=1024, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_num_buckets=320, + encoder_max_distance=800, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def wav2vec2_xlsr_300m( + encoder_projection_dropout: float = 0.0, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds XLS-R model :cite:`babu2021xls` with 300 millions of parameters. The architecture is compatible + with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is + :class:`~torchaudio.models.Wav2Vec2Model`. + + Args: + encoder_projection_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + aux_num_out (int, optional): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ + return wav2vec2_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=True, + encoder_embed_dim=1024, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=24, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=4096, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def wav2vec2_xlsr_1b( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds XLS-R model :cite:`babu2021xls` with 1 billion of parameters. The architecture is compatible + with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is + :class:`~torchaudio.models.Wav2Vec2Model`. + + Args: + encoder_projection_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + aux_num_out (int, optional): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ + return wav2vec2_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=True, + encoder_embed_dim=1280, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=48, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=5120, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) + + +def wav2vec2_xlsr_2b( + encoder_projection_dropout: float = 0.1, + encoder_attention_dropout: float = 0.0, + encoder_ff_interm_dropout: float = 0.0, + encoder_dropout: float = 0.0, + encoder_layer_drop: float = 0.0, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Builds XLS-R model :cite:`babu2021xls` with 2 billions of parameters. The architecture is compatible + with Wav2Vec2 model :cite:`baevski2020wav2vec`, and so the output class is + :class:`~torchaudio.models.Wav2Vec2Model`. + + Args: + encoder_projection_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_attention_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_ff_interm_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_dropout (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + encoder_layer_drop (float): + See :py:func:`~torchaudio.models.wav2vec2_model`. + aux_num_out (int, optional): + See :py:func:`~torchaudio.models.wav2vec2_model`. + + Returns: + Wav2Vec2Model: + The resulting model. + """ + return wav2vec2_model( + extractor_mode="layer_norm", + extractor_conv_layer_config=None, + extractor_conv_bias=True, + encoder_embed_dim=1920, + encoder_projection_dropout=encoder_projection_dropout, + encoder_pos_conv_kernel=128, + encoder_pos_conv_groups=16, + encoder_num_layers=48, + encoder_num_heads=16, + encoder_attention_dropout=encoder_attention_dropout, + encoder_ff_interm_features=7680, + encoder_ff_interm_dropout=encoder_ff_interm_dropout, + encoder_dropout=encoder_dropout, + encoder_layer_norm_first=True, + encoder_layer_drop=encoder_layer_drop, + aux_num_out=aux_num_out, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0457b5dd707f7216adc3ea919ba8e257d86f4f71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__init__.py @@ -0,0 +1,7 @@ +from .import_fairseq import import_fairseq_model +from .import_huggingface import import_huggingface_model + +__all__ = [ + "import_huggingface_model", + "import_fairseq_model", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc0815bdc42c7621eb5209f231246e3752a765b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/import_fairseq.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/import_fairseq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab3a6779291122b7beab2172a36b26d1b5210d52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/import_fairseq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/import_huggingface.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/import_huggingface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19cb5925216f10335f65acb3fdcc267b700e9c90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/__pycache__/import_huggingface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/import_fairseq.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/import_fairseq.py new file mode 100644 index 0000000000000000000000000000000000000000..39791e9b7d75ac3c2eb1fcf4f9c3517e7483048c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/import_fairseq.py @@ -0,0 +1,213 @@ +"""Import fariseq's wav2vec2.0 pretrained weights to torchaudios's format. + +For this module to work, you need `fairseq`. +""" +import re + +from torch.nn import Module + +from ..model import wav2vec2_model, Wav2Vec2Model + + +def _parse_config(w2v_model): + encoder = w2v_model.encoder + conv_layers = w2v_model.feature_extractor.conv_layers + + extractor_mode = "layer_norm" + if "GroupNorm" in conv_layers[0][2].__class__.__name__: + extractor_mode = "group_norm" + else: + extractor_mode = "layer_norm" + + conv_layer_config = [(l[0].out_channels, l[0].kernel_size[0], l[0].stride[0]) for l in conv_layers] + + if all(l[0].bias is None for l in conv_layers): + conv_bias = False + elif all(l[0].bias is not None for l in conv_layers): + conv_bias = True + else: + raise ValueError("Either all the convolutions layers have bias term or none of them should.") + + config = { + "extractor_mode": extractor_mode, + "extractor_conv_layer_config": conv_layer_config, + "extractor_conv_bias": conv_bias, + "encoder_embed_dim": w2v_model.post_extract_proj.out_features, + "encoder_projection_dropout": w2v_model.dropout_input.p, + "encoder_pos_conv_kernel": encoder.pos_conv[0].kernel_size[0], + "encoder_pos_conv_groups": encoder.pos_conv[0].groups, + "encoder_num_layers": len(encoder.layers), + "encoder_num_heads": encoder.layers[0].self_attn.num_heads, + "encoder_attention_dropout": encoder.layers[0].self_attn.dropout_module.p, + "encoder_ff_interm_features": encoder.layers[0].fc1.out_features, + "encoder_ff_interm_dropout": encoder.layers[0].dropout2.p, + "encoder_dropout": encoder.layers[0].dropout3.p, + "encoder_layer_norm_first": encoder.layer_norm_first, + "encoder_layer_drop": encoder.layerdrop, + } + return config + + +def _map_key(key): + key_ = key + if key.startswith("w2v_model."): + key = key.replace("w2v_model.", "") + if re.match(r"(mask_emb|quantizer|project_q|final_proj|mask_emb)", key): + return None + # Feature Extractor + # Group norm when "extractor_mode" is "default". + # (Only the first layer) + # "conv_layers.0.2.weight" -> "conv_layers.0.layer_norm.weight" + # "conv_layers.0.2.bias" -> "conv_layers.0.layer_norm.bias" + match = re.match(r"feature_extractor\.conv_layers\.0\.2\.(weight|bias)", key) + if match: + return f"feature_extractor.conv_layers.0.layer_norm.{match.group(1)}" + # Convolutions + # "conv_layers.X.0.weight" -> "conv_layers.X.conv.weight" + # "conv_layers.X.0.bias" -> "conv_layers.X.conv.bias" + match = re.match(r"feature_extractor\.conv_layers\.(\d+)\.0\.(weight|bias)", key) + if match: + return f"feature_extractor.conv_layers.{match.group(1)}.conv.{match.group(2)}" + # Layer norm when "extractor_mode" is "layer_norm". + # "conv_layers.X.2.1.weight" -> "conv_layers.X.layer_norm.weight" + # "conv_layers.X.2.1.bias" -> "conv_layers.X.layer_norm.bias" + match = re.match(r"feature_extractor\.conv_layers\.(\d+)\.2\.1\.(weight|bias)", key) + if match: + return f"feature_extractor.conv_layers.{match.group(1)}.layer_norm.{match.group(2)}" + match = re.match(r"post_extract_proj\.(weight|bias)", key) + # Encoder - Feature projection + if match: + return f"encoder.feature_projection.projection.{match.group(1)}" + match = re.match(r"layer_norm\.(weight|bias)", key) + if match: + return f"encoder.feature_projection.layer_norm.{match.group(1)}" + # Encoder - Transformer - Convolutional positional embedding + match = re.match(r"encoder\.pos_conv\.0\.(bias|weight_g|weight_v)", key) + if match: + return f"encoder.transformer.pos_conv_embed.conv.{match.group(1)}" + match = re.match(r"encoder\.layer_norm\.(weight|bias)", key) + if match: + return f"encoder.transformer.layer_norm.{match.group(1)}" + # Encoder - Transformer - Self attention layers + match = re.match(r"encoder\.layers\.(\d+)\.self_attn\.((k_|v_|q_|out_)proj\.(weight|bias))", key) + if match: + return f"encoder.transformer.layers.{match.group(1)}.attention.{match.group(2)}" + match = re.match(r"encoder\.layers\.(\d+)\.self_attn_layer_norm\.(weight|bias)", key) + if match: + return f"encoder.transformer.layers.{match.group(1)}.layer_norm.{match.group(2)}" + match = re.match(r"encoder\.layers\.(\d+)\.fc1\.(weight|bias)", key) + if match: + return f"encoder.transformer.layers.{match.group(1)}.feed_forward.intermediate_dense.{match.group(2)}" + match = re.match(r"encoder\.layers\.(\d+)\.fc2\.(weight|bias)", key) + if match: + return f"encoder.transformer.layers.{match.group(1)}.feed_forward.output_dense.{match.group(2)}" + match = re.match(r"encoder\.layers\.(\d+)\.final_layer_norm\.(weight|bias)", key) + if match: + return f"encoder.transformer.layers.{match.group(1)}.final_layer_norm.{match.group(2)}" + match = re.match(r"proj\.(weight|bias)", key) + # Auxiliary Module + # Only relevant when loading fine-tuned models + if match: + return f"aux.{match.group(1)}" + # HuBERT Extension + if key in ["label_embs_concat"]: + return key + raise ValueError(f"Unexpected key: {key_}") + + +def _convert_state_dict(state_dict): + converted = {} + for k, v in state_dict.items(): + k = _map_key(k) + if k is not None: + converted[k] = v + return converted + + +def import_fairseq_model(original: Module) -> Wav2Vec2Model: + """Builds :class:`Wav2Vec2Model` from the corresponding model object of + `fairseq `_. + + Args: + original (torch.nn.Module): + An instance of fairseq's Wav2Vec2.0 or HuBERT model. + One of ``fairseq.models.wav2vec.wav2vec2_asr.Wav2VecEncoder``, + ``fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model`` or + ``fairseq.models.hubert.hubert_asr.HubertEncoder``. + + Returns: + Wav2Vec2Model: Imported model. + + Example - Loading pretrain-only model + >>> from torchaudio.models.wav2vec2.utils import import_fairseq_model + >>> + >>> # Load model using fairseq + >>> model_file = 'wav2vec_small.pt' + >>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file]) + >>> original = model[0] + >>> imported = import_fairseq_model(original) + >>> + >>> # Perform feature extraction + >>> waveform, _ = torchaudio.load('audio.wav') + >>> features, _ = imported.extract_features(waveform) + >>> + >>> # Compare result with the original model from fairseq + >>> reference = original.feature_extractor(waveform).transpose(1, 2) + >>> torch.testing.assert_allclose(features, reference) + + Example - Fine-tuned model + >>> from torchaudio.models.wav2vec2.utils import import_fairseq_model + >>> + >>> # Load model using fairseq + >>> model_file = 'wav2vec_small_960h.pt' + >>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file]) + >>> original = model[0] + >>> imported = import_fairseq_model(original.w2v_encoder) + >>> + >>> # Perform encoding + >>> waveform, _ = torchaudio.load('audio.wav') + >>> emission, _ = imported(waveform) + >>> + >>> # Compare result with the original model from fairseq + >>> mask = torch.zeros_like(waveform) + >>> reference = original(waveform, mask)['encoder_out'].transpose(0, 1) + >>> torch.testing.assert_allclose(emission, reference) + """ + class_ = original.__class__.__name__ + if class_ == "Wav2Vec2Model": + return _import_wav2vec2_pretraining(original) + if class_ == "Wav2VecEncoder": + return _import_wav2vec2_finetuning(original) + if class_ == "HubertModel": + return _import_hubert_pretraining(original) + if class_ == "HubertEncoder": + return _import_hubert_finetuning(original) + raise ValueError(f"Expected an instance of `Wav2Vec2Model` or `Wav2VecEncoder`. Found: {class_}") + + +def _import_wav2vec2_finetuning(original: Module) -> Wav2Vec2Model: + config = _parse_config(original.w2v_model) + model = wav2vec2_model(**config, aux_num_out=original.proj.out_features) + model.load_state_dict(_convert_state_dict(original.state_dict())) + return model + + +def _import_wav2vec2_pretraining(original: Module) -> Wav2Vec2Model: + config = _parse_config(original) + model = wav2vec2_model(**config, aux_num_out=None) + model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) + return model + + +def _import_hubert_finetuning(original: Module) -> Wav2Vec2Model: + config = _parse_config(original.w2v_model) + model = wav2vec2_model(**config, aux_num_out=original.proj.out_features) + model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) + return model + + +def _import_hubert_pretraining(original: Module) -> Wav2Vec2Model: + config = _parse_config(original) + model = wav2vec2_model(**config, aux_num_out=None) + model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False) + return model diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/import_huggingface.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/import_huggingface.py new file mode 100644 index 0000000000000000000000000000000000000000..519d8c919f02be62b2f2e2aa0dd8db97222430d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/utils/import_huggingface.py @@ -0,0 +1,134 @@ +"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format. +""" +import logging +from typing import Any, Dict + +import torch +from torch.nn import Module + +from ..model import wav2vec2_model, Wav2Vec2Model, wavlm_model + +_LG = logging.getLogger(__name__) + + +def _get_config(cfg): + config = { + "extractor_mode": f"{cfg.feat_extract_norm}_norm", + "extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)), + "extractor_conv_bias": cfg.conv_bias, + "encoder_embed_dim": cfg.hidden_size, + "encoder_projection_dropout": cfg.feat_proj_dropout, + "encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings, + "encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups, + "encoder_num_layers": cfg.num_hidden_layers, + "encoder_num_heads": cfg.num_attention_heads, + "encoder_attention_dropout": cfg.attention_dropout, + "encoder_ff_interm_features": cfg.intermediate_size, + "encoder_ff_interm_dropout": cfg.activation_dropout, + "encoder_dropout": cfg.hidden_dropout, + "encoder_layer_norm_first": cfg.do_stable_layer_norm, + "encoder_layer_drop": cfg.layerdrop, + } + return config + + +def _get_config_wavlm(cfg): + config = { + "extractor_mode": f"{cfg.feat_extract_norm}_norm", + "extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)), + "extractor_conv_bias": cfg.conv_bias, + "encoder_embed_dim": cfg.hidden_size, + "encoder_projection_dropout": cfg.feat_proj_dropout, + "encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings, + "encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups, + "encoder_num_layers": cfg.num_hidden_layers, + "encoder_num_heads": cfg.num_attention_heads, + "encoder_num_buckets": cfg.num_buckets, + "encoder_max_distance": cfg.max_bucket_distance, + "encoder_attention_dropout": cfg.attention_dropout, + "encoder_ff_interm_features": cfg.intermediate_size, + "encoder_ff_interm_dropout": cfg.activation_dropout, + "encoder_dropout": cfg.hidden_dropout, + "encoder_layer_norm_first": cfg.do_stable_layer_norm, + "encoder_layer_drop": cfg.layerdrop, + } + return config + + +def _build(config, original): + is_for_ctc = original.__class__.__name__ in ["Wav2Vec2ForCTC", "WavLMForCTC"] + if is_for_ctc: + aux_num_out = original.config.vocab_size + wav2vec2 = original.wav2vec2 + else: + _LG.warning( + "The model is not an instance of Wav2Vec2ForCTC or WavLMForCTC. " '"lm_head" module is not imported.' + ) + aux_num_out = None + wav2vec2 = original + is_wavlm = original.__class__.__name__ in ["WavLMModel", "WavLMForCTC"] + if is_wavlm: + imported = wavlm_model(**config, aux_num_out=aux_num_out) + else: + imported = wav2vec2_model(**config, aux_num_out=aux_num_out) + imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict()) + imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict()) + encoder_state_dict = wav2vec2.encoder.state_dict() + if is_wavlm: # Rename paramaters of linear transformations for compatibility with the HF model + transform_wavlm_encoder_state(encoder_state_dict, config["encoder_num_layers"]) + imported.encoder.transformer.load_state_dict(encoder_state_dict) + if is_for_ctc: + imported.aux.load_state_dict(original.lm_head.state_dict()) + return imported + + +def transform_wavlm_encoder_state(state: Dict[str, Any], encoder_num_layers: int): + """Converts WavLM encoder state from HuggingFace format. In particular, concatenates linear projection weights and + biases to align with the structure of ``torch.nn.MultiheadAttention``. + """ + for i in range(encoder_num_layers): + q_proj_bias = state.pop(f"layers.{i}.attention.q_proj.bias") + k_proj_bias = state.pop(f"layers.{i}.attention.k_proj.bias") + v_proj_bias = state.pop(f"layers.{i}.attention.v_proj.bias") + q_proj_weight = state.pop(f"layers.{i}.attention.q_proj.weight") + k_proj_weight = state.pop(f"layers.{i}.attention.k_proj.weight") + v_proj_weight = state.pop(f"layers.{i}.attention.v_proj.weight") + state[f"layers.{i}.attention.attention.in_proj_bias"] = torch.cat((q_proj_bias, k_proj_bias, v_proj_bias)) + state[f"layers.{i}.attention.attention.in_proj_weight"] = torch.cat( + (q_proj_weight, k_proj_weight, v_proj_weight) + ) + + state[f"layers.{i}.attention.attention.out_proj.weight"] = state.pop(f"layers.{i}.attention.out_proj.weight") + state[f"layers.{i}.attention.attention.out_proj.bias"] = state.pop(f"layers.{i}.attention.out_proj.bias") + + +def import_huggingface_model(original: Module) -> Wav2Vec2Model: + """Builds :class:`Wav2Vec2Model` from the corresponding model object of + `Transformers `_. + + Args: + original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``. + + Returns: + Wav2Vec2Model: Imported model. + + Example + >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model + >>> + >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") + >>> model = import_huggingface_model(original) + >>> + >>> waveforms, _ = torchaudio.load("audio.wav") + >>> logits, _ = model(waveforms) + """ + _LG.info("Importing model.") + _LG.info("Loading model configuration.") + is_wavlm = original.__class__.__name__ in ["WavLMModel", "WavLMForCTC"] + if is_wavlm: + config = _get_config_wavlm(original.config) + else: + config = _get_config(original.config) + _LG.debug(" - config: %s", config) + _LG.info("Building model.") + imported = _build(config, original) + return imported diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/wavlm_attention.py b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/wavlm_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..fafddfeb958cbcdfdc0a7781b49bc124fff78290 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wav2vec2/wavlm_attention.py @@ -0,0 +1,214 @@ +""" +The MIT License (MIT) + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +import math +from typing import Optional, Tuple + +import torch +from torch import nn, Tensor + + +class WavLMSelfAttention(nn.Module): + """Multi-headed self-attention for WavLM model :cite:`chen2022wavlm`. + Wraps around ``torch.nn.MultiheadAttention``, creating relaive position embeddings and passing them to multi-headed + attention as a mask. + Source: https://github.com/microsoft/unilm/blob/2d8302f09c99bca2b82e6e868d81d4281cceebc8/wavlm/modules.py#L303-L763 + + Args: + embed_dim (int): Total dimension of the model. + num_heads (int): The number of heads. + dropout (float, optional): Dropout probability on attn_output_weights. (Default: to ``0.0``) + bias (bool, optional): If ``True``, add bias to input / output projection layers. (Default: ``True``) + has_relative_attention_bias (bool, optional): If ``True``, apply relative position embedding. + Necessary in the first encoder layer, but not in the subsequent ones. (Default: ``False``) + num_buckets (int, optional): Number of buckets for relative position embedding. (Default: ``32``) + max_distance (int, optional): Naximum distance for relative position embedding. (Default: ``128``) + gru_rel_pos (bool, optional): If ``True``, apply gated relative position embedding. (Default: ``False``) + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + bias: bool = True, + has_relative_attention_bias: bool = False, + num_buckets: int = 32, + max_distance: int = 128, + gru_rel_pos: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.has_relative_attention_bias = has_relative_attention_bias + self.num_buckets = num_buckets + self.max_distance = max_distance + + if has_relative_attention_bias: + self.rel_attn_embed = nn.Embedding(num_buckets, num_heads) + else: + self.rel_attn_embed = None + + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + self.dropout = dropout + self.attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True) + + self.gru_rel_pos = gru_rel_pos + if self.gru_rel_pos: + self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8) + self.gru_rel_pos_const = nn.Parameter(torch.ones(1, num_heads, 1, 1)) + self.has_position_bias = True + + def compute_bias(self, query_length: int, key_length: int) -> Tensor: + """Compute relative position embeddings for WavLM model. + Args: + query_length (int): Query position can take values between 0 and ``query_length - 1``. + key_length (int): Key position can take values between 0 and ``key_length - 1``. + Returns: + Tensor of shape `(num_heads, query_length, key_length)`, relative positions embeddings + """ + context_position = torch.arange(query_length, dtype=torch.long)[:, None] + memory_position = torch.arange(key_length, dtype=torch.long)[None, :] + relative_position = memory_position - context_position # Shape (query_length, key_length) + relative_position_bucket = self._relative_positions_bucket(relative_position, bidirectional=True) + relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device) + values = self.rel_attn_embed(relative_position_bucket) # Shape (query_length, key_length, num_heads) + values = values.permute([2, 0, 1]) + return values + + def _relative_positions_bucket(self, relative_positions: Tensor, bidirectional: bool = True): + """Compute relative position buckets for WavLM model. Computation similar to formula (5) in WavLM + paper :cite:`chen2022wavlm`. + Args: + relative_positions (Tensor): Relative offsets between query and key positions, + of shape ``(query_length, key_length)``. + bidirectional (bool): If ``True``, values will be filled both above and below the diagonal in the resulting + matrix. If ``False``, the elements above the diagonal (i.e. with negative relative offsets) will be set + to zero. (Default ``True``) + Returns: + Tensor of shape ``(query_length, key_length)`` filled bucketed values of with relative positions. + """ + num_buckets = self.num_buckets + max_distance = self.max_distance + # Shape (query_length, key_length) + relative_buckets = torch.zeros_like(relative_positions, dtype=torch.long) + + if bidirectional: + num_buckets = num_buckets // 2 + relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets + relative_positions = torch.abs(relative_positions) + else: + relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions)) + + max_exact = num_buckets // 2 + is_small = relative_positions < max_exact + + relative_postion_if_large = max_exact + ( + torch.log(relative_positions.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).to(torch.long) + relative_postion_if_large = torch.min( + relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) + ) + + relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large) + return relative_buckets + + def forward( + self, + query: Tensor, + key_padding_mask: Optional[Tensor] = None, + attention_mask: Optional[Tensor] = None, + position_bias: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + """ + Args: + query (Tensor): Input of shape ``(batch_size, src_len, embed_dim)``. + key_padding_mask (Tensor or None, optional): Mask to exclude keys that are pads, of shape + `(batch, src_len)`, where padding elements are indicated by 1s. (Default: ``None``) + attn_mask: Needs to be ``None``. The argument exists for compatibility with + ``EncoderLayer``. (Default: ``None``) + position_bias (Tensor or None, optional): Position bias of shape + ``(batch_size * num_heads, src_len, src_len)``. When used inside WavLM model encoder, will be + generated in the first layer and then passed from each encoder layer to the next one. + (Default: ``None``) + Returns: + attn_output (Tensor): Attention output of shape ``(batch_size, src_len, embed_dim)``. + position_bias (Tensor or None): Position bias of shape ``(batch_size * num_heads, src_len, src_len)``. + """ + bsz, seq_len, embed_dim = query.size() + assert embed_dim == self.embed_dim + assert attention_mask is None + + if self.rel_attn_embed is not None and position_bias is None: + position_bias = self.compute_bias(seq_len, seq_len) + position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1) + + attn_mask_rel_pos: Optional[Tensor] = None + if position_bias is not None: + attn_mask_rel_pos = position_bias + if self.gru_rel_pos: # Apply gating on relative position bias + query_layer = query.view(bsz, seq_len, self.num_heads, -1) + query_layer = query_layer.permute(0, 2, 1, 3) + + gate_a, gate_b = torch.sigmoid( + self.gru_rel_pos_linear(query_layer).view(bsz, self.num_heads, seq_len, 2, 4).sum(-1, keepdim=False) + ).chunk(2, dim=-1) + gate_a_1 = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0 + attn_mask_rel_pos = gate_a_1.view(bsz, self.num_heads, -1, 1) * position_bias + + attn_mask_rel_pos = attn_mask_rel_pos.view((bsz, self.num_heads, seq_len, seq_len)) + + if attn_mask_rel_pos is not None and key_padding_mask is not None: + key_padding_mask = key_padding_mask.view(bsz, 1, 1, seq_len).expand(-1, self.num_heads, -1, -1) + key_padding_mask = torch.nn.functional._canonical_mask( + mask=key_padding_mask, + mask_name="key_padding_mask", + other_type=torch.nn.functional._none_or_dtype(attn_mask_rel_pos), + other_name="", + target_type=query.dtype, + ) + if attn_mask_rel_pos is not None and key_padding_mask is not None: + attn_mask_rel_pos = attn_mask_rel_pos + key_padding_mask + query_projected = torch.nn.functional.linear(query, self.attention.in_proj_weight, self.attention.in_proj_bias) + query, key, value = query_projected.chunk(3, -1) + shape = (bsz, seq_len, self.num_heads, self.head_dim) + query = query.view(shape).transpose(2, 1) # (batch, num_heads, seq_len, head_dim) + key = key.view(shape).transpose(2, 1) # (batch, num_heads, seq_len, head_dim) + value = value.view(shape).transpose(2, 1) # (batch, num_heads, seq_len, head_dim) + dropout = self.dropout if self.training else 0.0 + attn_output = torch.nn.functional.scaled_dot_product_attention( + query, + key, + value, + attn_mask=attn_mask_rel_pos, + dropout_p=dropout, + is_causal=False, + ) + attn_output = attn_output.transpose(1, 2).reshape(bsz, -1, self.num_heads * self.head_dim) + attn_output = self.attention.out_proj(attn_output) + return attn_output, position_bias diff --git a/venv/lib/python3.10/site-packages/torchaudio/models/wavernn.py b/venv/lib/python3.10/site-packages/torchaudio/models/wavernn.py new file mode 100644 index 0000000000000000000000000000000000000000..8ae5a3e91675cd9ef7d4614f0daaec50f80dcdee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/models/wavernn.py @@ -0,0 +1,409 @@ +import math +from typing import List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +__all__ = [ + "ResBlock", + "MelResNet", + "Stretch2d", + "UpsampleNetwork", + "WaveRNN", +] + + +class ResBlock(nn.Module): + r"""ResNet block based on *Efficient Neural Audio Synthesis* :cite:`kalchbrenner2018efficient`. + + Args: + n_freq: the number of bins in a spectrogram. (Default: ``128``) + + Examples + >>> resblock = ResBlock() + >>> input = torch.rand(10, 128, 512) # a random spectrogram + >>> output = resblock(input) # shape: (10, 128, 512) + """ + + def __init__(self, n_freq: int = 128) -> None: + super().__init__() + + self.resblock_model = nn.Sequential( + nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False), + nn.BatchNorm1d(n_freq), + nn.ReLU(inplace=True), + nn.Conv1d(in_channels=n_freq, out_channels=n_freq, kernel_size=1, bias=False), + nn.BatchNorm1d(n_freq), + ) + + def forward(self, specgram: Tensor) -> Tensor: + r"""Pass the input through the ResBlock layer. + Args: + specgram (Tensor): the input sequence to the ResBlock layer (n_batch, n_freq, n_time). + + Return: + Tensor shape: (n_batch, n_freq, n_time) + """ + + return self.resblock_model(specgram) + specgram + + +class MelResNet(nn.Module): + r"""MelResNet layer uses a stack of ResBlocks on spectrogram. + + Args: + n_res_block: the number of ResBlock in stack. (Default: ``10``) + n_freq: the number of bins in a spectrogram. (Default: ``128``) + n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) + n_output: the number of output dimensions of melresnet. (Default: ``128``) + kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) + + Examples + >>> melresnet = MelResNet() + >>> input = torch.rand(10, 128, 512) # a random spectrogram + >>> output = melresnet(input) # shape: (10, 128, 508) + """ + + def __init__( + self, n_res_block: int = 10, n_freq: int = 128, n_hidden: int = 128, n_output: int = 128, kernel_size: int = 5 + ) -> None: + super().__init__() + + ResBlocks = [ResBlock(n_hidden) for _ in range(n_res_block)] + + self.melresnet_model = nn.Sequential( + nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False), + nn.BatchNorm1d(n_hidden), + nn.ReLU(inplace=True), + *ResBlocks, + nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1), + ) + + def forward(self, specgram: Tensor) -> Tensor: + r"""Pass the input through the MelResNet layer. + Args: + specgram (Tensor): the input sequence to the MelResNet layer (n_batch, n_freq, n_time). + + Return: + Tensor shape: (n_batch, n_output, n_time - kernel_size + 1) + """ + + return self.melresnet_model(specgram) + + +class Stretch2d(nn.Module): + r"""Upscale the frequency and time dimensions of a spectrogram. + + Args: + time_scale: the scale factor in time dimension + freq_scale: the scale factor in frequency dimension + + Examples + >>> stretch2d = Stretch2d(time_scale=10, freq_scale=5) + + >>> input = torch.rand(10, 100, 512) # a random spectrogram + >>> output = stretch2d(input) # shape: (10, 500, 5120) + """ + + def __init__(self, time_scale: int, freq_scale: int) -> None: + super().__init__() + + self.freq_scale = freq_scale + self.time_scale = time_scale + + def forward(self, specgram: Tensor) -> Tensor: + r"""Pass the input through the Stretch2d layer. + + Args: + specgram (Tensor): the input sequence to the Stretch2d layer (..., n_freq, n_time). + + Return: + Tensor shape: (..., n_freq * freq_scale, n_time * time_scale) + """ + + return specgram.repeat_interleave(self.freq_scale, -2).repeat_interleave(self.time_scale, -1) + + +class UpsampleNetwork(nn.Module): + r"""Upscale the dimensions of a spectrogram. + + Args: + upsample_scales: the list of upsample scales. + n_res_block: the number of ResBlock in stack. (Default: ``10``) + n_freq: the number of bins in a spectrogram. (Default: ``128``) + n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) + n_output: the number of output dimensions of melresnet. (Default: ``128``) + kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) + + Examples + >>> upsamplenetwork = UpsampleNetwork(upsample_scales=[4, 4, 16]) + >>> input = torch.rand(10, 128, 10) # a random spectrogram + >>> output = upsamplenetwork(input) # shape: (10, 128, 1536), (10, 128, 1536) + """ + + def __init__( + self, + upsample_scales: List[int], + n_res_block: int = 10, + n_freq: int = 128, + n_hidden: int = 128, + n_output: int = 128, + kernel_size: int = 5, + ) -> None: + super().__init__() + + total_scale = 1 + for upsample_scale in upsample_scales: + total_scale *= upsample_scale + self.total_scale: int = total_scale + + self.indent = (kernel_size - 1) // 2 * total_scale + self.resnet = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size) + self.resnet_stretch = Stretch2d(total_scale, 1) + + up_layers = [] + for scale in upsample_scales: + stretch = Stretch2d(scale, 1) + conv = nn.Conv2d( + in_channels=1, out_channels=1, kernel_size=(1, scale * 2 + 1), padding=(0, scale), bias=False + ) + torch.nn.init.constant_(conv.weight, 1.0 / (scale * 2 + 1)) + up_layers.append(stretch) + up_layers.append(conv) + self.upsample_layers = nn.Sequential(*up_layers) + + def forward(self, specgram: Tensor) -> Tuple[Tensor, Tensor]: + r"""Pass the input through the UpsampleNetwork layer. + + Args: + specgram (Tensor): the input sequence to the UpsampleNetwork layer (n_batch, n_freq, n_time) + + Return: + Tensor shape: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale), + (n_batch, n_output, (n_time - kernel_size + 1) * total_scale) + where total_scale is the product of all elements in upsample_scales. + """ + + resnet_output = self.resnet(specgram).unsqueeze(1) + resnet_output = self.resnet_stretch(resnet_output) + resnet_output = resnet_output.squeeze(1) + + specgram = specgram.unsqueeze(1) + upsampling_output = self.upsample_layers(specgram) + upsampling_output = upsampling_output.squeeze(1)[:, :, self.indent : -self.indent] + + return upsampling_output, resnet_output + + +class WaveRNN(nn.Module): + r"""WaveRNN model from *Efficient Neural Audio Synthesis* :cite:`wavernn` + based on the implementation from `fatchord/WaveRNN `_. + + The original implementation was introduced in *Efficient Neural Audio Synthesis* + :cite:`kalchbrenner2018efficient`. The input channels of waveform and spectrogram have to be 1. + The product of `upsample_scales` must equal `hop_length`. + + See Also: + * `Training example `__ + * :class:`torchaudio.pipelines.Tacotron2TTSBundle`: TTS pipeline with pretrained model. + + Args: + upsample_scales: the list of upsample scales. + n_classes: the number of output classes. + hop_length: the number of samples between the starts of consecutive frames. + n_res_block: the number of ResBlock in stack. (Default: ``10``) + n_rnn: the dimension of RNN layer. (Default: ``512``) + n_fc: the dimension of fully connected layer. (Default: ``512``) + kernel_size: the number of kernel size in the first Conv1d layer. (Default: ``5``) + n_freq: the number of bins in a spectrogram. (Default: ``128``) + n_hidden: the number of hidden dimensions of resblock. (Default: ``128``) + n_output: the number of output dimensions of melresnet. (Default: ``128``) + + Example + >>> wavernn = WaveRNN(upsample_scales=[5,5,8], n_classes=512, hop_length=200) + >>> waveform, sample_rate = torchaudio.load(file) + >>> # waveform shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length) + >>> specgram = MelSpectrogram(sample_rate)(waveform) # shape: (n_batch, n_channel, n_freq, n_time) + >>> output = wavernn(waveform, specgram) + >>> # output shape: (n_batch, n_channel, (n_time - kernel_size + 1) * hop_length, n_classes) + """ + + def __init__( + self, + upsample_scales: List[int], + n_classes: int, + hop_length: int, + n_res_block: int = 10, + n_rnn: int = 512, + n_fc: int = 512, + kernel_size: int = 5, + n_freq: int = 128, + n_hidden: int = 128, + n_output: int = 128, + ) -> None: + super().__init__() + + self.kernel_size = kernel_size + self._pad = (kernel_size - 1 if kernel_size % 2 else kernel_size) // 2 + self.n_rnn = n_rnn + self.n_aux = n_output // 4 + self.hop_length = hop_length + self.n_classes = n_classes + self.n_bits: int = int(math.log2(self.n_classes)) + + total_scale = 1 + for upsample_scale in upsample_scales: + total_scale *= upsample_scale + if total_scale != self.hop_length: + raise ValueError(f"Expected: total_scale == hop_length, but found {total_scale} != {hop_length}") + + self.upsample = UpsampleNetwork(upsample_scales, n_res_block, n_freq, n_hidden, n_output, kernel_size) + self.fc = nn.Linear(n_freq + self.n_aux + 1, n_rnn) + + self.rnn1 = nn.GRU(n_rnn, n_rnn, batch_first=True) + self.rnn2 = nn.GRU(n_rnn + self.n_aux, n_rnn, batch_first=True) + + self.relu1 = nn.ReLU(inplace=True) + self.relu2 = nn.ReLU(inplace=True) + + self.fc1 = nn.Linear(n_rnn + self.n_aux, n_fc) + self.fc2 = nn.Linear(n_fc + self.n_aux, n_fc) + self.fc3 = nn.Linear(n_fc, self.n_classes) + + def forward(self, waveform: Tensor, specgram: Tensor) -> Tensor: + r"""Pass the input through the WaveRNN model. + + Args: + waveform: the input waveform to the WaveRNN layer (n_batch, 1, (n_time - kernel_size + 1) * hop_length) + specgram: the input spectrogram to the WaveRNN layer (n_batch, 1, n_freq, n_time) + + Return: + Tensor: shape (n_batch, 1, (n_time - kernel_size + 1) * hop_length, n_classes) + """ + + if waveform.size(1) != 1: + raise ValueError("Require the input channel of waveform is 1") + if specgram.size(1) != 1: + raise ValueError("Require the input channel of specgram is 1") + # remove channel dimension until the end + waveform, specgram = waveform.squeeze(1), specgram.squeeze(1) + + batch_size = waveform.size(0) + h1 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device) + h2 = torch.zeros(1, batch_size, self.n_rnn, dtype=waveform.dtype, device=waveform.device) + # output of upsample: + # specgram: (n_batch, n_freq, (n_time - kernel_size + 1) * total_scale) + # aux: (n_batch, n_output, (n_time - kernel_size + 1) * total_scale) + specgram, aux = self.upsample(specgram) + specgram = specgram.transpose(1, 2) + aux = aux.transpose(1, 2) + + aux_idx = [self.n_aux * i for i in range(5)] + a1 = aux[:, :, aux_idx[0] : aux_idx[1]] + a2 = aux[:, :, aux_idx[1] : aux_idx[2]] + a3 = aux[:, :, aux_idx[2] : aux_idx[3]] + a4 = aux[:, :, aux_idx[3] : aux_idx[4]] + + x = torch.cat([waveform.unsqueeze(-1), specgram, a1], dim=-1) + x = self.fc(x) + res = x + x, _ = self.rnn1(x, h1) + + x = x + res + res = x + x = torch.cat([x, a2], dim=-1) + x, _ = self.rnn2(x, h2) + + x = x + res + x = torch.cat([x, a3], dim=-1) + x = self.fc1(x) + x = self.relu1(x) + + x = torch.cat([x, a4], dim=-1) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + # bring back channel dimension + return x.unsqueeze(1) + + @torch.jit.export + def infer(self, specgram: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: + r"""Inference method of WaveRNN. + + This function currently only supports multinomial sampling, which assumes the + network is trained on cross entropy loss. + + Args: + specgram (Tensor): + Batch of spectrograms. Shape: `(n_batch, n_freq, n_time)`. + lengths (Tensor or None, optional): + Indicates the valid length of each audio in the batch. + Shape: `(batch, )`. + When the ``specgram`` contains spectrograms with different durations, + by providing ``lengths`` argument, the model will compute + the corresponding valid output lengths. + If ``None``, it is assumed that all the audio in ``waveforms`` + have valid length. Default: ``None``. + + Returns: + (Tensor, Optional[Tensor]): + Tensor + The inferred waveform of size `(n_batch, 1, n_time)`. + 1 stands for a single channel. + Tensor or None + If ``lengths`` argument was provided, a Tensor of shape `(batch, )` + is returned. + It indicates the valid length in time axis of the output Tensor. + """ + + device = specgram.device + dtype = specgram.dtype + + specgram = torch.nn.functional.pad(specgram, (self._pad, self._pad)) + specgram, aux = self.upsample(specgram) + if lengths is not None: + lengths = lengths * self.upsample.total_scale + + output: List[Tensor] = [] + b_size, _, seq_len = specgram.size() + + h1 = torch.zeros((1, b_size, self.n_rnn), device=device, dtype=dtype) + h2 = torch.zeros((1, b_size, self.n_rnn), device=device, dtype=dtype) + x = torch.zeros((b_size, 1), device=device, dtype=dtype) + + aux_split = [aux[:, self.n_aux * i : self.n_aux * (i + 1), :] for i in range(4)] + + for i in range(seq_len): + + m_t = specgram[:, :, i] + + a1_t, a2_t, a3_t, a4_t = [a[:, :, i] for a in aux_split] + + x = torch.cat([x, m_t, a1_t], dim=1) + x = self.fc(x) + _, h1 = self.rnn1(x.unsqueeze(1), h1) + + x = x + h1[0] + inp = torch.cat([x, a2_t], dim=1) + _, h2 = self.rnn2(inp.unsqueeze(1), h2) + + x = x + h2[0] + x = torch.cat([x, a3_t], dim=1) + x = F.relu(self.fc1(x)) + + x = torch.cat([x, a4_t], dim=1) + x = F.relu(self.fc2(x)) + + logits = self.fc3(x) + + posterior = F.softmax(logits, dim=1) + + x = torch.multinomial(posterior, 1).float() + # Transform label [0, 2 ** n_bits - 1] to waveform [-1, 1] + x = 2 * x / (2**self.n_bits - 1.0) - 1.0 + + output.append(x) + + return torch.stack(output).permute(1, 2, 0), lengths diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efec1f3521e760803e095efb71f164ed268896f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__init__.py @@ -0,0 +1,102 @@ +from ._source_separation_pipeline import ( + CONVTASNET_BASE_LIBRI2MIX, + HDEMUCS_HIGH_MUSDB, + HDEMUCS_HIGH_MUSDB_PLUS, + SourceSeparationBundle, +) +from ._squim_pipeline import SQUIM_OBJECTIVE, SQUIM_SUBJECTIVE, SquimObjectiveBundle, SquimSubjectiveBundle +from ._tts import ( + TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, + TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, + TACOTRON2_WAVERNN_CHAR_LJSPEECH, + TACOTRON2_WAVERNN_PHONE_LJSPEECH, + Tacotron2TTSBundle, +) +from ._wav2vec2.impl import ( + HUBERT_ASR_LARGE, + HUBERT_ASR_XLARGE, + HUBERT_BASE, + HUBERT_LARGE, + HUBERT_XLARGE, + MMS_FA, + VOXPOPULI_ASR_BASE_10K_DE, + VOXPOPULI_ASR_BASE_10K_EN, + VOXPOPULI_ASR_BASE_10K_ES, + VOXPOPULI_ASR_BASE_10K_FR, + VOXPOPULI_ASR_BASE_10K_IT, + WAV2VEC2_ASR_BASE_100H, + WAV2VEC2_ASR_BASE_10M, + WAV2VEC2_ASR_BASE_960H, + WAV2VEC2_ASR_LARGE_100H, + WAV2VEC2_ASR_LARGE_10M, + WAV2VEC2_ASR_LARGE_960H, + WAV2VEC2_ASR_LARGE_LV60K_100H, + WAV2VEC2_ASR_LARGE_LV60K_10M, + WAV2VEC2_ASR_LARGE_LV60K_960H, + WAV2VEC2_BASE, + WAV2VEC2_LARGE, + WAV2VEC2_LARGE_LV60K, + WAV2VEC2_XLSR53, + WAV2VEC2_XLSR_1B, + WAV2VEC2_XLSR_2B, + WAV2VEC2_XLSR_300M, + Wav2Vec2ASRBundle, + Wav2Vec2Bundle, + Wav2Vec2FABundle, + WAVLM_BASE, + WAVLM_BASE_PLUS, + WAVLM_LARGE, +) +from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle + + +__all__ = [ + "Wav2Vec2Bundle", + "Wav2Vec2ASRBundle", + "Wav2Vec2FABundle", + "WAV2VEC2_BASE", + "WAV2VEC2_LARGE", + "WAV2VEC2_LARGE_LV60K", + "WAV2VEC2_ASR_BASE_10M", + "WAV2VEC2_ASR_BASE_100H", + "WAV2VEC2_ASR_BASE_960H", + "WAV2VEC2_ASR_LARGE_10M", + "WAV2VEC2_ASR_LARGE_100H", + "WAV2VEC2_ASR_LARGE_960H", + "WAV2VEC2_ASR_LARGE_LV60K_10M", + "WAV2VEC2_ASR_LARGE_LV60K_100H", + "WAV2VEC2_ASR_LARGE_LV60K_960H", + "WAV2VEC2_XLSR53", + "WAV2VEC2_XLSR_300M", + "WAV2VEC2_XLSR_1B", + "WAV2VEC2_XLSR_2B", + "VOXPOPULI_ASR_BASE_10K_EN", + "VOXPOPULI_ASR_BASE_10K_ES", + "VOXPOPULI_ASR_BASE_10K_DE", + "VOXPOPULI_ASR_BASE_10K_FR", + "VOXPOPULI_ASR_BASE_10K_IT", + "HUBERT_BASE", + "HUBERT_LARGE", + "HUBERT_XLARGE", + "HUBERT_ASR_LARGE", + "HUBERT_ASR_XLARGE", + "MMS_FA", + "WAVLM_BASE", + "WAVLM_BASE_PLUS", + "WAVLM_LARGE", + "Tacotron2TTSBundle", + "TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH", + "TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH", + "TACOTRON2_WAVERNN_CHAR_LJSPEECH", + "TACOTRON2_WAVERNN_PHONE_LJSPEECH", + "RNNTBundle", + "EMFORMER_RNNT_BASE_LIBRISPEECH", + "SourceSeparationBundle", + "CONVTASNET_BASE_LIBRI2MIX", + "HDEMUCS_HIGH_MUSDB_PLUS", + "HDEMUCS_HIGH_MUSDB", + "SQUIM_OBJECTIVE", + "SQUIM_SUBJECTIVE", + "SquimObjectiveBundle", + "SquimSubjectiveBundle", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db78e48c04de8d53414126441e68eb53fbd700a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/_source_separation_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/_source_separation_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19cffdd9909dd3c70ad84fb5dd469055243f4433 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/_source_separation_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/_squim_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/_squim_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a50bf80551925461291100c510ffb9f47c5b076 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/_squim_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f68cfd4cd9caa36583c496e6688675cd7d056f47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_source_separation_pipeline.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_source_separation_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..ae92e21831307f91450b32a73563c0011e455753 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_source_separation_pipeline.py @@ -0,0 +1,109 @@ +from dataclasses import dataclass +from functools import partial +from typing import Callable + +import torch +import torchaudio + +from torchaudio.models import conv_tasnet_base, hdemucs_high + + +@dataclass +class SourceSeparationBundle: + """Dataclass that bundles components for performing source separation. + + Example + >>> import torchaudio + >>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX + >>> import torch + >>> + >>> # Build the separation model. + >>> model = CONVTASNET_BASE_LIBRI2MIX.get_model() + >>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s] + >>> + >>> # Instantiate the test set of Libri2Mix dataset. + >>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test") + >>> + >>> # Apply source separation on mixture audio. + >>> for i, data in enumerate(dataset): + >>> sample_rate, mixture, clean_sources = data + >>> # Make sure the shape of input suits the model requirement. + >>> mixture = mixture.reshape(1, 1, -1) + >>> estimated_sources = model(mixture) + >>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration + >>> print(f"Si-SNR score is : {score}.) + >>> break + >>> Si-SNR score is : 16.24. + >>> + """ + + _model_path: str + _model_factory_func: Callable[[], torch.nn.Module] + _sample_rate: int + + @property + def sample_rate(self) -> int: + """Sample rate of the audio that the model is trained on. + + :type: int + """ + return self._sample_rate + + def get_model(self) -> torch.nn.Module: + """Construct the model and load the pretrained weight.""" + model = self._model_factory_func() + path = torchaudio.utils.download_asset(self._model_path) + state_dict = torch.load(path) + model.load_state_dict(state_dict) + model.eval() + return model + + +CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle( + _model_path="models/conv_tasnet_base_libri2mix.pt", + _model_factory_func=partial(conv_tasnet_base, num_sources=2), + _sample_rate=8000, +) +CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet* +:cite:`Luo_2019` trained on *Libri2Mix dataset* :cite:`cosentino2020librimix`. + +The source separation model is constructed by :func:`~torchaudio.models.conv_tasnet_base` +and is trained using the training script ``lightning_train.py`` +`here `__ +with default arguments. + +Please refer to :class:`SourceSeparationBundle` for usage instructions. +""" + + +HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle( + _model_path="models/hdemucs_high_trained.pt", + _model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]), + _sample_rate=44100, +) +HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained music source separation pipeline with +*Hybrid Demucs* :cite:`defossez2021hybrid` trained on both training and test sets of +MUSDB-HQ :cite:`MUSDB18HQ` and an additional 150 extra songs from an internal database +that was specifically produced for Meta. + +The model is constructed by :func:`~torchaudio.models.hdemucs_high`. + +Training was performed in the original HDemucs repository `here `__. + +Please refer to :class:`SourceSeparationBundle` for usage instructions. +""" + + +HDEMUCS_HIGH_MUSDB = SourceSeparationBundle( + _model_path="models/hdemucs_high_musdbhq_only.pt", + _model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]), + _sample_rate=44100, +) +HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained music source separation pipeline with +*Hybrid Demucs* :cite:`defossez2021hybrid` trained on the training set of MUSDB-HQ :cite:`MUSDB18HQ`. + +The model is constructed by :func:`~torchaudio.models.hdemucs_high`. +Training was performed in the original HDemucs repository `here `__. + +Please refer to :class:`SourceSeparationBundle` for usage instructions. +""" diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_squim_pipeline.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_squim_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..0c70db4aef70397d33dcb3d3b28131221cef52c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_squim_pipeline.py @@ -0,0 +1,156 @@ +from dataclasses import dataclass + +import torch +import torchaudio + +from torchaudio.models import squim_objective_base, squim_subjective_base, SquimObjective, SquimSubjective + + +@dataclass +class SquimObjectiveBundle: + """Data class that bundles associated information to use pretrained + :py:class:`~torchaudio.models.SquimObjective` model. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + This bundle can estimate objective metric scores for speech enhancement, such as STOI, PESQ, Si-SDR. + A typical use case would be a flow like `waveform -> list of scores`. Please see below for the code example. + + Example: Estimate the objective metric scores for the input waveform. + >>> import torch + >>> import torchaudio + >>> from torchaudio.pipelines import SQUIM_OBJECTIVE as bundle + >>> + >>> # Load the SquimObjective bundle + >>> model = bundle.get_model() + Downloading: "https://download.pytorch.org/torchaudio/models/squim_objective_dns2020.pth" + 100%|████████████| 28.2M/28.2M [00:03<00:00, 9.24MB/s] + >>> + >>> # Resample audio to the expected sampling rate + >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) + >>> + >>> # Estimate objective metric scores + >>> scores = model(waveform) + >>> print(f"STOI: {scores[0].item()}, PESQ: {scores[1].item()}, SI-SDR: {scores[2].item()}.") + """ # noqa: E501 + + _path: str + _sample_rate: float + + def get_model(self) -> SquimObjective: + """Construct the SquimObjective model, and load the pretrained weight. + + Returns: + Variation of :py:class:`~torchaudio.models.SquimObjective`. + """ + model = squim_objective_base() + path = torchaudio.utils.download_asset(f"models/{self._path}") + state_dict = torch.load(path, weights_only=True) + model.load_state_dict(state_dict) + model.eval() + return model + + @property + def sample_rate(self): + """Sample rate of the audio that the model is trained on. + + :type: float + """ + return self._sample_rate + + +SQUIM_OBJECTIVE = SquimObjectiveBundle( + "squim_objective_dns2020.pth", + _sample_rate=16000, +) +SQUIM_OBJECTIVE.__doc__ = """SquimObjective pipeline trained using approach described in + :cite:`kumar2023torchaudio` on the *DNS 2020 Dataset* :cite:`reddy2020interspeech`. + + The underlying model is constructed by :py:func:`torchaudio.models.squim_objective_base`. + The weights are under `Creative Commons Attribution 4.0 International License + `__. + + Please refer to :py:class:`SquimObjectiveBundle` for usage instructions. + """ + + +@dataclass +class SquimSubjectiveBundle: + """Data class that bundles associated information to use pretrained + :py:class:`~torchaudio.models.SquimSubjective` model. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + This bundle can estimate subjective metric scores for speech enhancement, such as MOS. + A typical use case would be a flow like `waveform -> score`. Please see below for the code example. + + Example: Estimate the subjective metric scores for the input waveform. + >>> import torch + >>> import torchaudio + >>> from torchaudio.pipelines import SQUIM_SUBJECTIVE as bundle + >>> + >>> # Load the SquimSubjective bundle + >>> model = bundle.get_model() + Downloading: "https://download.pytorch.org/torchaudio/models/squim_subjective_bvcc_daps.pth" + 100%|████████████| 360M/360M [00:09<00:00, 41.1MB/s] + >>> + >>> # Resample audio to the expected sampling rate + >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) + >>> # Use a clean reference (doesn't need to be the reference for the waveform) as the second input + >>> reference = torchaudio.functional.resample(reference, sample_rate, bundle.sample_rate) + >>> + >>> # Estimate subjective metric scores + >>> score = model(waveform, reference) + >>> print(f"MOS: {score}.") + """ # noqa: E501 + + _path: str + _sample_rate: float + + def get_model(self) -> SquimSubjective: + """Construct the SquimSubjective model, and load the pretrained weight. + Returns: + Variation of :py:class:`~torchaudio.models.SquimObjective`. + """ + model = squim_subjective_base() + path = torchaudio.utils.download_asset(f"models/{self._path}") + state_dict = torch.load(path, weights_only=True) + model.load_state_dict(state_dict) + model.eval() + return model + + @property + def sample_rate(self): + """Sample rate of the audio that the model is trained on. + + :type: float + """ + return self._sample_rate + + +SQUIM_SUBJECTIVE = SquimSubjectiveBundle( + "squim_subjective_bvcc_daps.pth", + _sample_rate=16000, +) +SQUIM_SUBJECTIVE.__doc__ = """SquimSubjective pipeline trained + as described in :cite:`manocha2022speech` and :cite:`kumar2023torchaudio` + on the *BVCC* :cite:`cooper2021voices` and *DAPS* :cite:`mysore2014can` datasets. + + The underlying model is constructed by :py:func:`torchaudio.models.squim_subjective_base`. + The weights are under `Creative Commons Attribution Non Commercial 4.0 International + `__. + + Please refer to :py:class:`SquimSubjectiveBundle` for usage instructions. + """ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02851f596ceb281acc75c4d6a1aaf17eeee4a809 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__init__.py @@ -0,0 +1,16 @@ +from .impl import ( + TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH, + TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH, + TACOTRON2_WAVERNN_CHAR_LJSPEECH, + TACOTRON2_WAVERNN_PHONE_LJSPEECH, +) +from .interface import Tacotron2TTSBundle + + +__all__ = [ + "Tacotron2TTSBundle", + "TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH", + "TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH", + "TACOTRON2_WAVERNN_CHAR_LJSPEECH", + "TACOTRON2_WAVERNN_PHONE_LJSPEECH", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac5778db025bec535c037bb0dda239c46b3d39fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bb62e9e791fcc98e4be79be98a79755fb6ca266 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/interface.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48e2bbe54b671abb0c37a3ee5623b947ec4cfa5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/interface.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aa395ef3445b1b23562fa122ff0a1d458a72227 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/impl.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/impl.py new file mode 100644 index 0000000000000000000000000000000000000000..b8542286242dcbb2036fff49c1d0e11fbbf9258b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/impl.py @@ -0,0 +1,385 @@ +import re +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch import Tensor +from torchaudio._internal import load_state_dict_from_url +from torchaudio.functional import mu_law_decoding +from torchaudio.models import Tacotron2, WaveRNN +from torchaudio.transforms import GriffinLim, InverseMelScale + +from . import utils +from .interface import Tacotron2TTSBundle + +__all__ = [] + +_BASE_URL = "https://download.pytorch.org/torchaudio/models" + + +################################################################################ +# Pipeline implementation - Text Processor +################################################################################ + + +class _EnglishCharProcessor(Tacotron2TTSBundle.TextProcessor): + def __init__(self): + super().__init__() + self._tokens = utils._get_chars() + self._mapping = {s: i for i, s in enumerate(self._tokens)} + + @property + def tokens(self): + return self._tokens + + def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: + if isinstance(texts, str): + texts = [texts] + indices = [[self._mapping[c] for c in t.lower() if c in self._mapping] for t in texts] + return utils._to_tensor(indices) + + +class _EnglishPhoneProcessor(Tacotron2TTSBundle.TextProcessor): + def __init__(self, *, dl_kwargs=None): + super().__init__() + self._tokens = utils._get_phones() + self._mapping = {p: i for i, p in enumerate(self._tokens)} + self._phonemizer = utils._load_phonemizer("en_us_cmudict_forward.pt", dl_kwargs=dl_kwargs) + self._pattern = r"(\[[A-Z]+?\]|[_!'(),.:;? -])" + + @property + def tokens(self): + return self._tokens + + def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: + if isinstance(texts, str): + texts = [texts] + + indices = [] + for phones in self._phonemizer(texts, lang="en_us"): + # '[F][UW][B][AA][R]!' -> ['F', 'UW', 'B', 'AA', 'R', '!'] + ret = [re.sub(r"[\[\]]", "", r) for r in re.findall(self._pattern, phones)] + indices.append([self._mapping[p] for p in ret]) + return utils._to_tensor(indices) + + +################################################################################ +# Pipeline implementation - Vocoder +################################################################################ + + +class _WaveRNNVocoder(torch.nn.Module, Tacotron2TTSBundle.Vocoder): + def __init__(self, model: WaveRNN, min_level_db: Optional[float] = -100): + super().__init__() + self._sample_rate = 22050 + self._model = model + self._min_level_db = min_level_db + + @property + def sample_rate(self): + return self._sample_rate + + def forward(self, mel_spec, lengths=None): + mel_spec = torch.exp(mel_spec) + mel_spec = 20 * torch.log10(torch.clamp(mel_spec, min=1e-5)) + if self._min_level_db is not None: + mel_spec = (self._min_level_db - mel_spec) / self._min_level_db + mel_spec = torch.clamp(mel_spec, min=0, max=1) + waveform, lengths = self._model.infer(mel_spec, lengths) + waveform = utils._unnormalize_waveform(waveform, self._model.n_bits) + waveform = mu_law_decoding(waveform, self._model.n_classes) + waveform = waveform.squeeze(1) + return waveform, lengths + + +class _GriffinLimVocoder(torch.nn.Module, Tacotron2TTSBundle.Vocoder): + def __init__(self): + super().__init__() + self._sample_rate = 22050 + self._inv_mel = InverseMelScale( + n_stft=(1024 // 2 + 1), + n_mels=80, + sample_rate=self.sample_rate, + f_min=0.0, + f_max=8000.0, + mel_scale="slaney", + norm="slaney", + ) + self._griffin_lim = GriffinLim( + n_fft=1024, + power=1, + hop_length=256, + win_length=1024, + ) + + @property + def sample_rate(self): + return self._sample_rate + + def forward(self, mel_spec, lengths=None): + mel_spec = torch.exp(mel_spec) + mel_spec = mel_spec.clone().detach().requires_grad_(True) + spec = self._inv_mel(mel_spec) + spec = spec.detach().requires_grad_(False) + waveforms = self._griffin_lim(spec) + return waveforms, lengths + + +################################################################################ +# Bundle classes mixins +################################################################################ + + +class _CharMixin: + def get_text_processor(self) -> Tacotron2TTSBundle.TextProcessor: + return _EnglishCharProcessor() + + +class _PhoneMixin: + def get_text_processor(self, *, dl_kwargs=None) -> Tacotron2TTSBundle.TextProcessor: + return _EnglishPhoneProcessor(dl_kwargs=dl_kwargs) + + +@dataclass +class _Tacotron2Mixin: + _tacotron2_path: str + _tacotron2_params: Dict[str, Any] + + def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2: + model = Tacotron2(**self._tacotron2_params) + url = f"{_BASE_URL}/{self._tacotron2_path}" + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + model.load_state_dict(state_dict) + model.eval() + return model + + +@dataclass +class _WaveRNNMixin: + _wavernn_path: Optional[str] + _wavernn_params: Optional[Dict[str, Any]] + + def get_vocoder(self, *, dl_kwargs=None): + wavernn = self._get_wavernn(dl_kwargs=dl_kwargs) + return _WaveRNNVocoder(wavernn) + + def _get_wavernn(self, *, dl_kwargs=None): + model = WaveRNN(**self._wavernn_params) + url = f"{_BASE_URL}/{self._wavernn_path}" + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + model.load_state_dict(state_dict) + model.eval() + return model + + +class _GriffinLimMixin: + def get_vocoder(self, **_): + return _GriffinLimVocoder() + + +################################################################################ +# Bundle classes +################################################################################ + + +@dataclass +class _Tacotron2WaveRNNCharBundle(_WaveRNNMixin, _Tacotron2Mixin, _CharMixin, Tacotron2TTSBundle): + pass + + +@dataclass +class _Tacotron2WaveRNNPhoneBundle(_WaveRNNMixin, _Tacotron2Mixin, _PhoneMixin, Tacotron2TTSBundle): + pass + + +@dataclass +class _Tacotron2GriffinLimCharBundle(_GriffinLimMixin, _Tacotron2Mixin, _CharMixin, Tacotron2TTSBundle): + pass + + +@dataclass +class _Tacotron2GriffinLimPhoneBundle(_GriffinLimMixin, _Tacotron2Mixin, _PhoneMixin, Tacotron2TTSBundle): + pass + + +################################################################################ +# Instantiate bundle objects +################################################################################ + + +TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH = _Tacotron2GriffinLimCharBundle( + _tacotron2_path="tacotron2_english_characters_1500_epochs_ljspeech.pth", + _tacotron2_params=utils._get_taco_params(n_symbols=38), +) +TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.__doc__ = """Character-based TTS pipeline with :py:class:`~torchaudio.models.Tacotron2` trained on *LJSpeech* :cite:`ljspeech17` for 1,500 epochs, and +:py:class:`~torchaudio.transforms.GriffinLim` as vocoder. + +The text processor encodes the input texts character-by-character. + +You can find the training script `here `__. +The default parameters were used. + +Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. + +Example - "Hello world! T T S stands for Text to Speech!" + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + +""" # noqa: E501 + +TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH = _Tacotron2GriffinLimPhoneBundle( + _tacotron2_path="tacotron2_english_phonemes_1500_epochs_ljspeech.pth", + _tacotron2_params=utils._get_taco_params(n_symbols=96), +) +TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.__doc__ = """Phoneme-based TTS pipeline with :py:class:`~torchaudio.models.Tacotron2` trained on *LJSpeech* :cite:`ljspeech17` for 1,500 epochs and +:py:class:`~torchaudio.transforms.GriffinLim` as vocoder. + +The text processor encodes the input texts based on phoneme. +It uses `DeepPhonemizer `__ to convert +graphemes to phonemes. +The model (*en_us_cmudict_forward*) was trained on +`CMUDict `__. + +You can find the training script `here `__. +The text processor is set to the *"english_phonemes"*. + +Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. + +Example - "Hello world! T T S stands for Text to Speech!" + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + + +""" # noqa: E501 + +TACOTRON2_WAVERNN_CHAR_LJSPEECH = _Tacotron2WaveRNNCharBundle( + _tacotron2_path="tacotron2_english_characters_1500_epochs_wavernn_ljspeech.pth", + _tacotron2_params=utils._get_taco_params(n_symbols=38), + _wavernn_path="wavernn_10k_epochs_8bits_ljspeech.pth", + _wavernn_params=utils._get_wrnn_params(), +) +TACOTRON2_WAVERNN_CHAR_LJSPEECH.__doc__ = """Character-based TTS pipeline with :py:class:`~torchaudio.models.Tacotron2` trained on *LJSpeech* :cite:`ljspeech17` for 1,500 epochs and :py:class:`~torchaudio.models.WaveRNN` vocoder trained on 8 bits depth waveform of *LJSpeech* :cite:`ljspeech17` for 10,000 epochs. + +The text processor encodes the input texts character-by-character. + +You can find the training script `here `__. +The following parameters were used; ``win_length=1100``, ``hop_length=275``, ``n_fft=2048``, +``mel_fmin=40``, and ``mel_fmax=11025``. + +You can find the training script `here `__. + +Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. + +Example - "Hello world! T T S stands for Text to Speech!" + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_CHAR_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + +""" # noqa: E501 + +TACOTRON2_WAVERNN_PHONE_LJSPEECH = _Tacotron2WaveRNNPhoneBundle( + _tacotron2_path="tacotron2_english_phonemes_1500_epochs_wavernn_ljspeech.pth", + _tacotron2_params=utils._get_taco_params(n_symbols=96), + _wavernn_path="wavernn_10k_epochs_8bits_ljspeech.pth", + _wavernn_params=utils._get_wrnn_params(), +) +TACOTRON2_WAVERNN_PHONE_LJSPEECH.__doc__ = """Phoneme-based TTS pipeline with :py:class:`~torchaudio.models.Tacotron2` trained on *LJSpeech* :cite:`ljspeech17` for 1,500 epochs, and +:py:class:`~torchaudio.models.WaveRNN` vocoder trained on 8 bits depth waveform of *LJSpeech* :cite:`ljspeech17` for 10,000 epochs. + +The text processor encodes the input texts based on phoneme. +It uses `DeepPhonemizer `__ to convert +graphemes to phonemes. +The model (*en_us_cmudict_forward*) was trained on +`CMUDict `__. + +You can find the training script for Tacotron2 `here `__. +The following parameters were used; ``win_length=1100``, ``hop_length=275``, ``n_fft=2048``, +``mel_fmin=40``, and ``mel_fmax=11025``. + +You can find the training script for WaveRNN `here `__. + +Please refer to :func:`torchaudio.pipelines.Tacotron2TTSBundle` for the usage. + +Example - "Hello world! T T S stands for Text to Speech!" + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + + + +Example - "The examination and testimony of the experts enabled the Commission to conclude that five shots may have been fired," + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/TACOTRON2_WAVERNN_PHONE_LJSPEECH_v2.png + :alt: Spectrogram generated by Tacotron2 + + .. raw:: html + + +""" # noqa: E501 diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/interface.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..564f236bc7c239d17dc82db04c350a9ccc618841 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/interface.py @@ -0,0 +1,255 @@ +from abc import ABC, abstractmethod +from typing import List, Optional, Tuple, Union + +from torch import Tensor +from torchaudio.models import Tacotron2 + + +class _TextProcessor(ABC): + @property + @abstractmethod + def tokens(self): + """The tokens that the each value in the processed tensor represent. + + :type: List[str] + """ + + @abstractmethod + def __call__(self, texts: Union[str, List[str]]) -> Tuple[Tensor, Tensor]: + """Encode the given (batch of) texts into numerical tensors + + Args: + text (str or list of str): The input texts. + + Returns: + (Tensor, Tensor): + Tensor: + The encoded texts. Shape: `(batch, max length)` + Tensor: + The valid length of each sample in the batch. Shape: `(batch, )`. + """ + + +class _Vocoder(ABC): + @property + @abstractmethod + def sample_rate(self): + """The sample rate of the resulting waveform + + :type: float + """ + + @abstractmethod + def __call__(self, specgrams: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: + """Generate waveform from the given input, such as spectrogram + + Args: + specgrams (Tensor): + The input spectrogram. Shape: `(batch, frequency bins, time)`. + The expected shape depends on the implementation. + lengths (Tensor, or None, optional): + The valid length of each sample in the batch. Shape: `(batch, )`. + (Default: `None`) + + Returns: + (Tensor, Optional[Tensor]): + Tensor: + The generated waveform. Shape: `(batch, max length)` + Tensor or None: + The valid length of each sample in the batch. Shape: `(batch, )`. + """ + + +class Tacotron2TTSBundle(ABC): + """Data class that bundles associated information to use pretrained Tacotron2 and vocoder. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + Please see below for the usage and the available values. + + Example - Character-based TTS pipeline with Tacotron2 and WaveRNN + >>> import torchaudio + >>> + >>> text = "Hello, T T S !" + >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH + >>> + >>> # Build processor, Tacotron2 and WaveRNN model + >>> processor = bundle.get_text_processor() + >>> tacotron2 = bundle.get_tacotron2() + Downloading: + 100%|███████████████████████████████| 107M/107M [00:01<00:00, 87.9MB/s] + >>> vocoder = bundle.get_vocoder() + Downloading: + 100%|███████████████████████████████| 16.7M/16.7M [00:00<00:00, 78.1MB/s] + >>> + >>> # Encode text + >>> input, lengths = processor(text) + >>> + >>> # Generate (mel-scale) spectrogram + >>> specgram, lengths, _ = tacotron2.infer(input, lengths) + >>> + >>> # Convert spectrogram to waveform + >>> waveforms, lengths = vocoder(specgram, lengths) + >>> + >>> torchaudio.save('hello-tts.wav', waveforms, vocoder.sample_rate) + + Example - Phoneme-based TTS pipeline with Tacotron2 and WaveRNN + >>> + >>> # Note: + >>> # This bundle uses pre-trained DeepPhonemizer as + >>> # the text pre-processor. + >>> # Please install deep-phonemizer. + >>> # See https://github.com/as-ideas/DeepPhonemizer + >>> # The pretrained weight is automatically downloaded. + >>> + >>> import torchaudio + >>> + >>> text = "Hello, TTS!" + >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH + >>> + >>> # Build processor, Tacotron2 and WaveRNN model + >>> processor = bundle.get_text_processor() + Downloading: + 100%|███████████████████████████████| 63.6M/63.6M [00:04<00:00, 15.3MB/s] + >>> tacotron2 = bundle.get_tacotron2() + Downloading: + 100%|███████████████████████████████| 107M/107M [00:01<00:00, 87.9MB/s] + >>> vocoder = bundle.get_vocoder() + Downloading: + 100%|███████████████████████████████| 16.7M/16.7M [00:00<00:00, 78.1MB/s] + >>> + >>> # Encode text + >>> input, lengths = processor(text) + >>> + >>> # Generate (mel-scale) spectrogram + >>> specgram, lengths, _ = tacotron2.infer(input, lengths) + >>> + >>> # Convert spectrogram to waveform + >>> waveforms, lengths = vocoder(specgram, lengths) + >>> + >>> torchaudio.save('hello-tts.wav', waveforms, vocoder.sample_rate) + """ + + # Using the inner class so that these interfaces are not directly exposed on + # `torchaudio.pipelines`, but still listed in documentation. + # The thing is, text processing and vocoder are generic and we do not know what kind of + # new text processing and vocoder will be added in the future, so we want to make these + # interfaces specific to this Tacotron2TTS pipeline. + + class TextProcessor(_TextProcessor): + """Interface of the text processing part of Tacotron2TTS pipeline + + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_text_processor` for the usage. + """ + + class Vocoder(_Vocoder): + """Interface of the vocoder part of Tacotron2TTS pipeline + + See :func:`torchaudio.pipelines.Tacotron2TTSBundle.get_vocoder` for the usage. + """ + + @abstractmethod + def get_text_processor(self, *, dl_kwargs=None) -> TextProcessor: + """Create a text processor + + For character-based pipeline, this processor splits the input text by character. + For phoneme-based pipeline, this processor converts the input text (grapheme) to + phonemes. + + If a pre-trained weight file is necessary, + :func:`torch.hub.download_url_to_file` is used to downloaded it. + + Args: + dl_kwargs (dictionary of keyword arguments,): + Passed to :func:`torch.hub.download_url_to_file`. + + Returns: + TextProcessor: + A callable which takes a string or a list of strings as input and + returns Tensor of encoded texts and Tensor of valid lengths. + The object also has ``tokens`` property, which allows to recover the + tokenized form. + + Example - Character-based + >>> text = [ + >>> "Hello World!", + >>> "Text-to-speech!", + >>> ] + >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH + >>> processor = bundle.get_text_processor() + >>> input, lengths = processor(text) + >>> + >>> print(input) + tensor([[19, 16, 23, 23, 26, 11, 34, 26, 29, 23, 15, 2, 0, 0, 0], + [31, 16, 35, 31, 1, 31, 26, 1, 30, 27, 16, 16, 14, 19, 2]], + dtype=torch.int32) + >>> + >>> print(lengths) + tensor([12, 15], dtype=torch.int32) + >>> + >>> print([processor.tokens[i] for i in input[0, :lengths[0]]]) + ['h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!'] + >>> print([processor.tokens[i] for i in input[1, :lengths[1]]]) + ['t', 'e', 'x', 't', '-', 't', 'o', '-', 's', 'p', 'e', 'e', 'c', 'h', '!'] + + Example - Phoneme-based + >>> text = [ + >>> "Hello, T T S !", + >>> "Text-to-speech!", + >>> ] + >>> bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH + >>> processor = bundle.get_text_processor() + Downloading: + 100%|███████████████████████████████| 63.6M/63.6M [00:04<00:00, 15.3MB/s] + >>> input, lengths = processor(text) + >>> + >>> print(input) + tensor([[54, 20, 65, 69, 11, 92, 44, 65, 38, 2, 0, 0, 0, 0], + [81, 40, 64, 79, 81, 1, 81, 20, 1, 79, 77, 59, 37, 2]], + dtype=torch.int32) + >>> + >>> print(lengths) + tensor([10, 14], dtype=torch.int32) + >>> + >>> print([processor.tokens[i] for i in input[0]]) + ['HH', 'AH', 'L', 'OW', ' ', 'W', 'ER', 'L', 'D', '!', '_', '_', '_', '_'] + >>> print([processor.tokens[i] for i in input[1]]) + ['T', 'EH', 'K', 'S', 'T', '-', 'T', 'AH', '-', 'S', 'P', 'IY', 'CH', '!'] + """ + + @abstractmethod + def get_vocoder(self, *, dl_kwargs=None) -> Vocoder: + """Create a vocoder module, based off of either WaveRNN or GriffinLim. + + If a pre-trained weight file is necessary, + :func:`torch.hub.load_state_dict_from_url` is used to downloaded it. + + Args: + dl_kwargs (dictionary of keyword arguments): + Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Vocoder: + A vocoder module, which takes spectrogram Tensor and an optional + length Tensor, then returns resulting waveform Tensor and an optional + length Tensor. + """ + + @abstractmethod + def get_tacotron2(self, *, dl_kwargs=None) -> Tacotron2: + """Create a Tacotron2 model with pre-trained weight. + + Args: + dl_kwargs (dictionary of keyword arguments): + Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Tacotron2: + The resulting model. + """ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/utils.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef3ecb31335ae0cf9950d040fa11c21fb3403b25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_tts/utils.py @@ -0,0 +1,228 @@ +import logging +import os + +import torch +from torchaudio._internal import download_url_to_file, module_utils as _mod_utils + + +def _get_chars(): + return ( + "_", + "-", + "!", + "'", + "(", + ")", + ",", + ".", + ":", + ";", + "?", + " ", + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + ) + + +def _get_phones(): + return ( + "_", + "-", + "!", + "'", + "(", + ")", + ",", + ".", + ":", + ";", + "?", + " ", + "AA", + "AA0", + "AA1", + "AA2", + "AE", + "AE0", + "AE1", + "AE2", + "AH", + "AH0", + "AH1", + "AH2", + "AO", + "AO0", + "AO1", + "AO2", + "AW", + "AW0", + "AW1", + "AW2", + "AY", + "AY0", + "AY1", + "AY2", + "B", + "CH", + "D", + "DH", + "EH", + "EH0", + "EH1", + "EH2", + "ER", + "ER0", + "ER1", + "ER2", + "EY", + "EY0", + "EY1", + "EY2", + "F", + "G", + "HH", + "IH", + "IH0", + "IH1", + "IH2", + "IY", + "IY0", + "IY1", + "IY2", + "JH", + "K", + "L", + "M", + "N", + "NG", + "OW", + "OW0", + "OW1", + "OW2", + "OY", + "OY0", + "OY1", + "OY2", + "P", + "R", + "S", + "SH", + "T", + "TH", + "UH", + "UH0", + "UH1", + "UH2", + "UW", + "UW0", + "UW1", + "UW2", + "V", + "W", + "Y", + "Z", + "ZH", + ) + + +def _to_tensor(indices): + lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32) + values = [torch.tensor(i) for i in indices] + values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True) + return values, lengths + + +def _load_phonemizer(file, dl_kwargs): + if not _mod_utils.is_module_available("dp"): + raise RuntimeError("DeepPhonemizer is not installed. Please install it.") + + from dp.phonemizer import Phonemizer + + # By default, dp issues DEBUG level log. + logger = logging.getLogger("dp") + orig_level = logger.level + logger.setLevel(logging.INFO) + try: + url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}" + directory = os.path.join(torch.hub.get_dir(), "checkpoints") + os.makedirs(directory, exist_ok=True) + path = os.path.join(directory, file) + if not os.path.exists(path): + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + download_url_to_file(url, path, **dl_kwargs) + return Phonemizer.from_checkpoint(path) + finally: + logger.setLevel(orig_level) + + +def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor: + r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]""" + waveform = torch.clamp(waveform, -1, 1) + waveform = (waveform + 1.0) * (2**bits - 1) / 2 + return torch.clamp(waveform, 0, 2**bits - 1).int() + + +def _get_taco_params(n_symbols): + return { + "mask_padding": False, + "n_mels": 80, + "n_frames_per_step": 1, + "symbol_embedding_dim": 512, + "encoder_embedding_dim": 512, + "encoder_n_convolution": 3, + "encoder_kernel_size": 5, + "decoder_rnn_dim": 1024, + "decoder_max_step": 2000, + "decoder_dropout": 0.1, + "decoder_early_stopping": True, + "attention_rnn_dim": 1024, + "attention_hidden_dim": 128, + "attention_location_n_filter": 32, + "attention_location_kernel_size": 31, + "attention_dropout": 0.1, + "prenet_dim": 256, + "postnet_n_convolution": 5, + "postnet_kernel_size": 5, + "postnet_embedding_dim": 512, + "gate_threshold": 0.5, + "n_symbol": n_symbols, + } + + +def _get_wrnn_params(): + return { + "upsample_scales": [5, 5, 11], + "n_classes": 2**8, # n_bits = 8 + "hop_length": 275, + "n_res_block": 10, + "n_rnn": 512, + "n_fc": 512, + "kernel_size": 5, + "n_freq": 80, + "n_hidden": 128, + "n_output": 128, + } diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91525812b4aecbff95a66365e0f41f56cbb2637d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/aligner.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/aligner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8116bfb38a7250c9c024eafbe5ed012f3185a795 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/aligner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0986224942dd7cf8f052c44b18e4d5d47584112 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c47701fae0d74b65a5ff3ba3fadc8d86c399fc21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/aligner.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/aligner.py new file mode 100644 index 0000000000000000000000000000000000000000..3655d5bae88181796d6d889013b4438d0ea014b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/aligner.py @@ -0,0 +1,87 @@ +from abc import ABC, abstractmethod +from typing import Dict, List + +import torch +import torchaudio.functional as F +from torch import Tensor +from torchaudio.functional import TokenSpan + + +class ITokenizer(ABC): + @abstractmethod + def __call__(self, transcript: List[str]) -> List[List[str]]: + """Tokenize the given transcript (list of word) + + .. note:: + + The toranscript must be normalized. + + Args: + transcript (list of str): Transcript (list of word). + + Returns: + (list of int): List of token sequences + """ + + +class Tokenizer(ITokenizer): + def __init__(self, dictionary: Dict[str, int]): + self.dictionary = dictionary + + def __call__(self, transcript: List[str]) -> List[List[int]]: + return [[self.dictionary[c] for c in word] for word in transcript] + + +def _align_emission_and_tokens(emission: Tensor, tokens: List[int], blank: int = 0): + device = emission.device + emission = emission.unsqueeze(0) + targets = torch.tensor([tokens], dtype=torch.int32, device=device) + + aligned_tokens, scores = F.forced_align(emission, targets, blank=blank) + + scores = scores.exp() # convert back to probability + aligned_tokens, scores = aligned_tokens[0], scores[0] # remove batch dimension + return aligned_tokens, scores + + +class IAligner(ABC): + @abstractmethod + def __call__(self, emission: Tensor, tokens: List[List[int]]) -> List[List[TokenSpan]]: + """Generate list of time-stamped token sequences + + Args: + emission (Tensor): Sequence of token probability distributions in log-domain. + Shape: `(time, tokens)`. + tokens (list of integer sequence): Tokenized transcript. + Output from :py:class:`torchaudio.pipelines.Wav2Vec2FABundle.Tokenizer`. + + Returns: + (list of TokenSpan sequence): Tokens with time stamps and scores. + """ + + +def _unflatten(list_, lengths): + assert len(list_) == sum(lengths) + i = 0 + ret = [] + for l in lengths: + ret.append(list_[i : i + l]) + i += l + return ret + + +def _flatten(nested_list): + return [item for list_ in nested_list for item in list_] + + +class Aligner(IAligner): + def __init__(self, blank): + self.blank = blank + + def __call__(self, emission: Tensor, tokens: List[List[int]]) -> List[List[TokenSpan]]: + if emission.ndim != 2: + raise ValueError(f"The input emission must be 2D. Found: {emission.shape}") + + aligned_tokens, scores = _align_emission_and_tokens(emission, _flatten(tokens), self.blank) + spans = F.merge_tokens(aligned_tokens, scores) + return _unflatten(spans, [len(ts) for ts in tokens]) diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/impl.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/impl.py new file mode 100644 index 0000000000000000000000000000000000000000..d60fa8adb94e92e1a479fe94e09f521d0fe50056 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/impl.py @@ -0,0 +1,1699 @@ +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple + +from torch.nn import Module + +from . import aligner, utils + + +__all__ = [] # type: ignore + + +@dataclass +class Wav2Vec2Bundle: + """Data class that bundles associated information to use pretrained :py:class:`~torchaudio.models.Wav2Vec2Model`. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + Please see below for the usage and the available values. + + Example - Feature Extraction + >>> import torchaudio + >>> + >>> bundle = torchaudio.pipelines.HUBERT_BASE + >>> + >>> # Build the model and load pretrained weight. + >>> model = bundle.get_model() + Downloading: + 100%|███████████████████████████████| 360M/360M [00:06<00:00, 60.6MB/s] + >>> + >>> # Resample audio to the expected sampling rate + >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) + >>> + >>> # Extract acoustic features + >>> features, _ = model.extract_features(waveform) + """ # noqa: E501 + + _path: str + _params: Dict[str, Any] + _sample_rate: float + _normalize_waveform: bool + _model_type: str + + @property + def sample_rate(self) -> float: + """Sample rate of the audio that the model is trained on. + + :type: float + """ + return self._sample_rate + + def _get_state_dict(self, dl_kwargs): + # Note: This method is overridden in ASR bundle + return utils._get_state_dict(self._path, dl_kwargs) + + def get_model(self, *, dl_kwargs=None) -> Module: + """Construct the model and load the pretrained weight. + + The weight file is downloaded from the internet and cached with + :func:`torch.hub.load_state_dict_from_url` + + Args: + dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Variation of :py:class:`~torchaudio.models.Wav2Vec2Model`. + + For the models listed below, an additional layer normalization is performed on the input. + + For all other models, a :py:class:`~torchaudio.models.Wav2Vec2Model` instance is returned. + + - WAV2VEC2_LARGE_LV60K + - WAV2VEC2_ASR_LARGE_LV60K_10M + - WAV2VEC2_ASR_LARGE_LV60K_100H + - WAV2VEC2_ASR_LARGE_LV60K_960H + - WAV2VEC2_XLSR53 + - WAV2VEC2_XLSR_300M + - WAV2VEC2_XLSR_1B + - WAV2VEC2_XLSR_2B + - HUBERT_LARGE + - HUBERT_XLARGE + - HUBERT_ASR_LARGE + - HUBERT_ASR_XLARGE + - WAVLM_LARGE + """ + model = utils._get_model(self._model_type, self._params) + state_dict = self._get_state_dict(dl_kwargs) + model.load_state_dict(state_dict) + if self._normalize_waveform: + model = utils._extend_model(model, normalize_waveform=True) + model.eval() + return model + + +@dataclass +class Wav2Vec2ASRBundle(Wav2Vec2Bundle): + """Data class that bundles associated information to use pretrained + :py:class:`~torchaudio.models.Wav2Vec2Model`. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + Please see below for the usage and the available values. + + Example - ASR + >>> import torchaudio + >>> + >>> bundle = torchaudio.pipelines.HUBERT_ASR_LARGE + >>> + >>> # Build the model and load pretrained weight. + >>> model = bundle.get_model() + Downloading: + 100%|███████████████████████████████| 1.18G/1.18G [00:17<00:00, 73.8MB/s] + >>> + >>> # Check the corresponding labels of the output. + >>> labels = bundle.get_labels() + >>> print(labels) + ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') + >>> + >>> # Resample audio to the expected sampling rate + >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) + >>> + >>> # Infer the label probability distribution + >>> emissions, _ = model(waveform) + >>> + >>> # Pass emission to decoder + >>> # `ctc_decode` is for illustration purpose only + >>> transcripts = ctc_decode(emissions, labels) + """ # noqa: E501 + + _labels: Tuple[str, ...] + _remove_aux_axis: Tuple[int, ...] = (1, 2, 3) + + def get_labels( + self, + *, + blank: str = "-", + ) -> Tuple[str, ...]: + """The output class labels. + + The first is blank token, and it is customizable. + + Args: + blank (str, optional): Blank token. (default: ``'-'``) + + Returns: + Tuple[str, ...]: + For models fine-tuned on ASR, returns the tuple of strings representing + the output class labels. + + Example + >>> from torchaudio.pipelines import HUBERT_ASR_LARGE as bundle + >>> bundle.get_labels() + ('-', '|', 'E', 'T', 'A', 'O', 'N', 'I', 'H', 'S', 'R', 'D', 'L', 'U', 'M', 'W', 'C', 'F', 'G', 'Y', 'P', 'B', 'V', 'K', "'", 'X', 'J', 'Q', 'Z') + """ # noqa: E501 + return (blank, *self._labels) + + def _get_state_dict(self, dl_kwargs): + return utils._get_state_dict(self._path, dl_kwargs, self._remove_aux_axis) + + +WAV2VEC2_BASE = Wav2Vec2Bundle( + _path="wav2vec2_fairseq_base_ls960.pth", + _params={ + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_BASE.__doc__ = """Wav2vec 2.0 model ("base" architecture), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), not fine-tuned. + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_BASE_10M = Wav2Vec2ASRBundle( + _path="wav2vec2_fairseq_base_ls960_asr_ll10m.pth", + _params={ + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_BASE_10M.__doc__ = """Wav2vec 2.0 model ("base" architecture with an extra linear module), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and +fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset +:cite:`librilight` ("train-10min" subset). + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_BASE_100H = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_base_ls960_asr_ls100.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) + +WAV2VEC2_ASR_BASE_100H.__doc__ = """Wav2vec 2.0 model ("base" architecture with an extra linear module), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and +fine-tuned for ASR on 100 hours of transcribed audio from "train-clean-100" subset. + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_BASE_960H = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_base_ls960_asr_ls960.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_BASE_960H.__doc__ = """Wav2vec 2.0 model ("base" architecture with an extra linear module), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and +fine-tuned for ASR on the same audio with the corresponding transcripts. + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_LARGE = Wav2Vec2Bundle( + "wav2vec2_fairseq_large_ls960.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.2, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_LARGE.__doc__ = """Wav2vec 2.0 model ("large" architecture), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), not fine-tuned. + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_LARGE_10M = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_large_ls960_asr_ll10m.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.2, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_LARGE_10M.__doc__ = """Wav2vec 2.0 model ("large" architecture with an extra linear module), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and +fine-tuned for ASR on 10 minutes of transcribed audio from *Libri-Light* dataset +:cite:`librilight` ("train-10min" subset). + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_LARGE_100H = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_large_ls960_asr_ls100.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.2, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_LARGE_100H.__doc__ = """Wav2vec 2.0 model ("large" architecture with an extra linear module), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and +fine-tuned for ASR on 100 hours of transcribed audio from +the same dataset ("train-clean-100" subset). + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_LARGE_960H = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_large_ls960_asr_ls960.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.2, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_LARGE_960H.__doc__ = """Wav2vec 2.0 model ("large" architecture with an extra linear module), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), and +fine-tuned for ASR on the same audio with the corresponding transcripts. + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_LARGE_LV60K = Wav2Vec2Bundle( + "wav2vec2_fairseq_large_lv60k.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +WAV2VEC2_LARGE_LV60K.__doc__ = """Wav2vec 2.0 model ("large-lv60k" architecture), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset :cite:`librilight`, +not fine-tuned. + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_LARGE_LV60K_10M = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_large_lv60k_asr_ll10m.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_LARGE_LV60K_10M.__doc__ = """Wav2vec 2.0 model ("large-lv60k" architecture with an extra linear module), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset :cite:`librilight`, and +fine-tuned for ASR on 10 minutes of transcribed audio from the same dataset ("train-10min" subset). + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_LARGE_LV60K_100H = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_large_lv60k_asr_ls100.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_LARGE_LV60K_100H.__doc__ = """Wav2vec 2.0 model ("large-lv60k" architecture with an extra linear module), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset :cite:`librilight`, and +fine-tuned for ASR on 100 hours of transcribed audio from +*LibriSpeech* dataset :cite:`7178964` ("train-clean-100" subset). + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_ASR_LARGE_LV60K_960H = Wav2Vec2ASRBundle( + "wav2vec2_fairseq_large_lv60k_asr_ls960.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +WAV2VEC2_ASR_LARGE_LV60K_960H.__doc__ = """Wav2vec 2.0 model ("large-lv60k" architecture with an extra linear module), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* :cite:`librilight` dataset, and +fine-tuned for ASR on 960 hours of transcribed audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"). + +Originally published by the authors of *wav2vec 2.0* :cite:`baevski2020wav2vec` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +WAV2VEC2_XLSR53 = Wav2Vec2Bundle( + "wav2vec2_fairseq_large_xlsr53.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +WAV2VEC2_XLSR53.__doc__ = """Wav2vec 2.0 model ("base" architecture), +pre-trained on 56,000 hours of unlabeled audio from multiple datasets ( +*Multilingual LibriSpeech* :cite:`Pratap_2020`, +*CommonVoice* :cite:`ardila2020common` and +*BABEL* :cite:`Gales2014SpeechRA`), +not fine-tuned. + +Originally published by the authors of +*Unsupervised Cross-lingual Representation Learning for Speech Recognition* +:cite:`conneau2020unsupervised` under MIT License and redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +HUBERT_BASE = Wav2Vec2Bundle( + "hubert_fairseq_base_ls960.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +HUBERT_BASE.__doc__ = """HuBERT model ("base" architecture), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"), not fine-tuned. + +Originally published by the authors of *HuBERT* :cite:`hsu2021hubert` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +HUBERT_LARGE = Wav2Vec2Bundle( + "hubert_fairseq_large_ll60k.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +HUBERT_LARGE.__doc__ = """HuBERT model ("large" architecture), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset :cite:`librilight`, +not fine-tuned. + +Originally published by the authors of *HuBERT* :cite:`hsu2021hubert` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +HUBERT_XLARGE = Wav2Vec2Bundle( + "hubert_fairseq_xlarge_ll60k.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1280, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 48, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 5120, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +HUBERT_XLARGE.__doc__ = """HuBERT model ("extra large" architecture), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset :cite:`librilight`, +not fine-tuned. + +Originally published by the authors of *HuBERT* :cite:`hsu2021hubert` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + +HUBERT_ASR_LARGE = Wav2Vec2ASRBundle( + "hubert_fairseq_large_ll60k_asr_ls960.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.1, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +HUBERT_ASR_LARGE.__doc__ = """HuBERT model ("large" architecture), +pre-trained on 60,000 hours of unlabeled audio from *Libri-Light* dataset :cite:`librilight`, and +fine-tuned for ASR on 960 hours of transcribed audio from *LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"). + +Originally published by the authors of *HuBERT* :cite:`hsu2021hubert` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +HUBERT_ASR_XLARGE = Wav2Vec2ASRBundle( + "hubert_fairseq_xlarge_ll60k_asr_ls960.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1280, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 48, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 5120, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.1, + "aux_num_out": 29, + }, + _labels=utils._get_en_labels(), + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +HUBERT_ASR_XLARGE.__doc__ = """HuBERT model ("extra large" architecture), +pre-trained on 60,000 hours of unlabeled audio from +*Libri-Light* dataset :cite:`librilight`, and +fine-tuned for ASR on 960 hours of transcribed audio from +*LibriSpeech* dataset :cite:`7178964` +(the combination of "train-clean-100", "train-clean-360", and "train-other-500"). + +Originally published by the authors of *HuBERT* :cite:`hsu2021hubert` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + + +VOXPOPULI_ASR_BASE_10K_DE = Wav2Vec2ASRBundle( + "wav2vec2_voxpopuli_base_10k_asr_de.pt", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 32, + }, + _labels=utils._get_de_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _remove_aux_axis=(1, 2, 3, 35), + _model_type="Wav2Vec2", +) +VOXPOPULI_ASR_BASE_10K_DE.__doc__ = """wav2vec 2.0 model ("base" architecture), +pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset :cite:`voxpopuli` +("10k" subset, consisting of 23 languages), and +fine-tuned for ASR on 282 hours of transcribed audio from "de" subset. + +Originally published by the authors of *VoxPopuli* :cite:`voxpopuli` under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + + +VOXPOPULI_ASR_BASE_10K_EN = Wav2Vec2ASRBundle( + "wav2vec2_voxpopuli_base_10k_asr_en.pt", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 28, + }, + _labels=utils._get_vp_en_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _remove_aux_axis=(1, 2, 3, 31), + _model_type="Wav2Vec2", +) +VOXPOPULI_ASR_BASE_10K_EN.__doc__ = """wav2vec 2.0 model ("base" architecture), +pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset :cite:`voxpopuli` +("10k" subset, consisting of 23 languages), and +fine-tuned for ASR on 543 hours of transcribed audio from "en" subset. + +Originally published by the authors of *VoxPopuli* :cite:`voxpopuli` under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + + +VOXPOPULI_ASR_BASE_10K_ES = Wav2Vec2ASRBundle( + "wav2vec2_voxpopuli_base_10k_asr_es.pt", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 35, + }, + _labels=utils._get_es_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _remove_aux_axis=(1, 2, 3, 35), + _model_type="Wav2Vec2", +) +VOXPOPULI_ASR_BASE_10K_ES.__doc__ = """wav2vec 2.0 model ("base" architecture), +pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset :cite:`voxpopuli` +("10k" subset, consisting of 23 languages), and +fine-tuned for ASR on 166 hours of transcribed audio from "es" subset. + +Originally published by the authors of *VoxPopuli* :cite:`voxpopuli` under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + +VOXPOPULI_ASR_BASE_10K_FR = Wav2Vec2ASRBundle( + "wav2vec2_voxpopuli_base_10k_asr_fr.pt", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 43, + }, + _labels=utils._get_fr_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _model_type="Wav2Vec2", +) +VOXPOPULI_ASR_BASE_10K_FR.__doc__ = """wav2vec 2.0 model ("base" architecture), +pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset :cite:`voxpopuli` +("10k" subset, consisting of 23 languages), and +fine-tuned for ASR on 211 hours of transcribed audio from "fr" subset. + +Originally published by the authors of *VoxPopuli* :cite:`voxpopuli` under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + + +VOXPOPULI_ASR_BASE_10K_IT = Wav2Vec2ASRBundle( + "wav2vec2_voxpopuli_base_10k_asr_it.pt", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.1, + "aux_num_out": 37, + }, + _labels=utils._get_it_labels(), + _sample_rate=16000, + _normalize_waveform=False, + _remove_aux_axis=(1, 2, 3), + _model_type="Wav2Vec2", +) +VOXPOPULI_ASR_BASE_10K_IT.__doc__ = """wav2vec 2.0 model ("base" architecture), +pre-trained on 10k hours of unlabeled audio from *VoxPopuli* dataset :cite:`voxpopuli` +("10k" subset, consisting of 23 languages), and +fine-tuned for ASR on 91 hours of transcribed audio from "it" subset. + +Originally published by the authors of *VoxPopuli* :cite:`voxpopuli` under CC BY-NC 4.0 and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2ASRBundle` for the usage. +""" # noqa: E501 + + +WAVLM_BASE = Wav2Vec2Bundle( + "wavlm_base.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_max_distance": 800, + "encoder_num_buckets": 320, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": None, + }, + _model_type="WavLM", + _sample_rate=16000, + _normalize_waveform=False, +) +WAVLM_BASE.__doc__ = """WavLM Base model ("base" architecture), +pre-trained on 960 hours of unlabeled audio from *LibriSpeech* dataset :cite:`7178964`, not fine-tuned. + +Originally published by the authors of *WavLM* :cite:`chen2022wavlm` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + + +WAVLM_BASE_PLUS = Wav2Vec2Bundle( + "wavlm_base_plus.pth", + { + "extractor_mode": "group_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 768, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 12, + "encoder_num_heads": 12, + "encoder_max_distance": 800, + "encoder_num_buckets": 320, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 3072, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": False, + "encoder_layer_drop": 0.05, + "aux_num_out": None, + }, + _model_type="WavLM", + _sample_rate=16000, + _normalize_waveform=False, +) +WAVLM_BASE_PLUS.__doc__ = """WavLM Base+ model ("base" architecture), +pre-trained on 60,000 hours of Libri-Light dataset :cite:`librilight`, 10,000 hours of GigaSpeech :cite:`GigaSpeech2021`, +and 24,000 hours of *VoxPopuli* :cite:`voxpopuli`, not fine-tuned. + +Originally published by the authors of *WavLM* :cite:`chen2022wavlm` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + + +WAVLM_LARGE = Wav2Vec2Bundle( + "wavlm_large.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": False, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_max_distance": 800, + "encoder_num_buckets": 320, + "encoder_attention_dropout": 0.1, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.1, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.05, + "aux_num_out": None, + }, + _model_type="WavLM", + _sample_rate=16000, + _normalize_waveform=True, +) +WAVLM_LARGE.__doc__ = """WavLM Large model ("large" architecture), +pre-trained on 60,000 hours of Libri-Light dataset :cite:`librilight`, 10,000 hours of GigaSpeech :cite:`GigaSpeech2021`, +and 24,000 hours of *VoxPopuli* :cite:`voxpopuli`, not fine-tuned. + +Originally published by the authors of *WavLM* :cite:`chen2022wavlm` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for the usage. +""" # noqa: E501 + + +WAV2VEC2_XLSR_300M = Wav2Vec2Bundle( + "wav2vec2_xlsr_300m.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _model_type="Wav2Vec2", + _sample_rate=16000, + _normalize_waveform=True, +) +WAV2VEC2_XLSR_300M.__doc__ = """XLS-R model with 300 million parameters, +pre-trained on 436,000 hours of unlabeled audio from multiple datasets ( +*Multilingual LibriSpeech* :cite:`Pratap_2020`, +*CommonVoice* :cite:`ardila2020common`, +*VoxLingua107* :cite:`valk2021voxlingua107`, +*BABEL* :cite:`Gales2014SpeechRA`, and +*VoxPopuli* :cite:`voxpopuli`) in 128 languages, +not fine-tuned. + +Originally published by the authors of *XLS-R* :cite:`babu2021xls` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for usage details. +""" # noqa: E501 + + +WAV2VEC2_XLSR_1B = Wav2Vec2Bundle( + "wav2vec2_xlsr_1b.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1280, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 48, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 5120, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _model_type="Wav2Vec2", + _sample_rate=16000, + _normalize_waveform=True, +) +WAV2VEC2_XLSR_1B.__doc__ = """XLS-R model with 1 billion parameters, +pre-trained on 436,000 hours of unlabeled audio from multiple datasets ( +*Multilingual LibriSpeech* :cite:`Pratap_2020`, +*CommonVoice* :cite:`ardila2020common`, +*VoxLingua107* :cite:`valk2021voxlingua107`, +*BABEL* :cite:`Gales2014SpeechRA`, and +*VoxPopuli* :cite:`voxpopuli`) in 128 languages, +not fine-tuned. + +Originally published by the authors of *XLS-R* :cite:`babu2021xls` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for usage details. +""" # noqa: E501 + +WAV2VEC2_XLSR_2B = Wav2Vec2Bundle( + "wav2vec2_xlsr_2b.pth", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1920, + "encoder_projection_dropout": 0.1, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 48, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 7680, + "encoder_ff_interm_dropout": 0.0, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.0, + "aux_num_out": None, + }, + _model_type="Wav2Vec2", + _sample_rate=16000, + _normalize_waveform=True, +) +WAV2VEC2_XLSR_2B.__doc__ = """XLS-R model with 2 billion parameters, +pre-trained on 436,000 hours of unlabeled audio from multiple datasets ( +*Multilingual LibriSpeech* :cite:`Pratap_2020`, +*CommonVoice* :cite:`ardila2020common`, +*VoxLingua107* :cite:`valk2021voxlingua107`, +*BABEL* :cite:`Gales2014SpeechRA`, and +*VoxPopuli* :cite:`voxpopuli`) in 128 languages, +not fine-tuned. + +Originally published by the authors of *XLS-R* :cite:`babu2021xls` under MIT License and +redistributed with the same license. +[`License `__, +`Source `__] + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2Bundle` for usage details. +""" # noqa: E501 + + +@dataclass +class Wav2Vec2FABundle(Wav2Vec2ASRBundle): + """Data class that bundles associated information to use pretrained :py:class:`~torchaudio.models.Wav2Vec2Model` for forced alignment. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + Please see below for the usage and the available values. + + Example - Feature Extraction + >>> import torchaudio + >>> + >>> bundle = torchaudio.pipelines.MMS_FA + >>> + >>> # Build the model and load pretrained weight. + >>> model = bundle.get_model() + Downloading: + 100%|███████████████████████████████| 1.18G/1.18G [00:05<00:00, 216MB/s] + >>> + >>> # Resample audio to the expected sampling rate + >>> waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate) + >>> + >>> # Estimate the probability of token distribution + >>> emission, _ = model(waveform) + >>> + >>> # Generate frame-wise alignment + >>> alignment, scores = torchaudio.functional.forced_align( + >>> emission, targets, input_lengths, target_lengths, blank=0) + >>> + """ # noqa: E501 + + class Tokenizer(aligner.ITokenizer): + """Interface of the tokenizer""" + + class Aligner(aligner.IAligner): + """Interface of the aligner""" + + def get_labels(self, star: Optional[str] = "*", blank: str = "-") -> Tuple[str, ...]: + """Get the labels corresponding to the feature dimension of emission. + + The first is blank token, and it is customizable. + + Args: + star (str or None, optional): Change or disable star token. (default: ``"*"``) + blank (str, optional): Change the blank token. (default: ``'-'``) + + Returns: + Tuple[str, ...]: + For models fine-tuned on ASR, returns the tuple of strings representing + the output class labels. + + Example + >>> from torchaudio.pipelines import MMS_FA as bundle + >>> bundle.get_labels() + ('-', 'a', 'i', 'e', 'n', 'o', 'u', 't', 's', 'r', 'm', 'k', 'l', 'd', 'g', 'h', 'y', 'b', 'p', 'w', 'c', 'v', 'j', 'z', 'f', "'", 'q', 'x', '*') + >>> bundle.get_labels(star=None) + ('-', 'a', 'i', 'e', 'n', 'o', 'u', 't', 's', 'r', 'm', 'k', 'l', 'd', 'g', 'h', 'y', 'b', 'p', 'w', 'c', 'v', 'j', 'z', 'f', "'", 'q', 'x') + """ # noqa: E501 + labels = super().get_labels(blank=blank) + return labels if star is None else (*labels, star) + + def get_model(self, with_star: bool = True, *, dl_kwargs=None) -> Module: + """Construct the model and load the pretrained weight. + + The weight file is downloaded from the internet and cached with + :func:`torch.hub.load_state_dict_from_url` + + Args: + with_star (bool, optional): If enabled, the last dimension of output layer is + extended by one, which corresponds to `star` token. + dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Variation of :py:class:`~torchaudio.models.Wav2Vec2Model`. + + .. note:: + + The model created with this method returns probability in log-domain, + (i.e. :py:func:`torch.nn.functional.log_softmax` is applied), whereas + the other Wav2Vec2 models returns logit. + """ + model = utils._get_model(self._model_type, self._params) + state_dict = utils._get_state_dict(self._path, dl_kwargs, self._remove_aux_axis) + model.load_state_dict(state_dict) + model = utils._extend_model( + model, normalize_waveform=self._normalize_waveform, apply_log_softmax=True, append_star=with_star + ) + model.eval() + return model + + def get_dict(self, star: Optional[str] = "*", blank: str = "-") -> Dict[str, int]: + """Get the mapping from token to index (in emission feature dim) + + Args: + star (str or None, optional): Change or disable star token. (default: ``"*"``) + blank (str, optional): Change the blank token. (default: ``'-'``) + + Returns: + Tuple[str, ...]: + For models fine-tuned on ASR, returns the tuple of strings representing + the output class labels. + + Example + >>> from torchaudio.pipelines import MMS_FA as bundle + >>> bundle.get_dict() + {'-': 0, 'a': 1, 'i': 2, 'e': 3, 'n': 4, 'o': 5, 'u': 6, 't': 7, 's': 8, 'r': 9, 'm': 10, 'k': 11, 'l': 12, 'd': 13, 'g': 14, 'h': 15, 'y': 16, 'b': 17, 'p': 18, 'w': 19, 'c': 20, 'v': 21, 'j': 22, 'z': 23, 'f': 24, "'": 25, 'q': 26, 'x': 27, '*': 28} + >>> bundle.get_dict(star=None) + {'-': 0, 'a': 1, 'i': 2, 'e': 3, 'n': 4, 'o': 5, 'u': 6, 't': 7, 's': 8, 'r': 9, 'm': 10, 'k': 11, 'l': 12, 'd': 13, 'g': 14, 'h': 15, 'y': 16, 'b': 17, 'p': 18, 'w': 19, 'c': 20, 'v': 21, 'j': 22, 'z': 23, 'f': 24, "'": 25, 'q': 26, 'x': 27} + """ # noqa: E501 + return {k: i for i, k in enumerate(self.get_labels(star=star, blank=blank))} + + def get_tokenizer(self) -> Tokenizer: + """Instantiate a Tokenizer. + + Returns: + Tokenizer + """ + return aligner.Tokenizer(self.get_dict()) + + def get_aligner(self) -> Aligner: + """Instantiate an Aligner. + + Returns: + Aligner + """ + return aligner.Aligner(blank=0) + + +MMS_FA = Wav2Vec2FABundle( + "https://dl.fbaipublicfiles.com/mms/torchaudio/ctc_alignment_mling_uroman/model.pt", + { + "extractor_mode": "layer_norm", + "extractor_conv_layer_config": [ + (512, 10, 5), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 3, 2), + (512, 2, 2), + (512, 2, 2), + ], + "extractor_conv_bias": True, + "encoder_embed_dim": 1024, + "encoder_projection_dropout": 0.0, + "encoder_pos_conv_kernel": 128, + "encoder_pos_conv_groups": 16, + "encoder_num_layers": 24, + "encoder_num_heads": 16, + "encoder_attention_dropout": 0.0, + "encoder_ff_interm_features": 4096, + "encoder_ff_interm_dropout": 0.1, + "encoder_dropout": 0.0, + "encoder_layer_norm_first": True, + "encoder_layer_drop": 0.1, + "aux_num_out": 28, + }, + _labels=utils._get_mms_labels(), + _sample_rate=16000, + _normalize_waveform=True, + _model_type="Wav2Vec2", +) +MMS_FA.__doc__ = """ +Trained on 31K hours of data in 1,130 languages from *Scaling Speech Technology to 1,000+ Languages* :cite:`pratap2023scaling`. + +Published by the authors of *Scaling Speech Technology to 1,000+ Languages* :cite:`pratap2023scaling` under [`CC-BY-NC 4.0 License `__]. + +Please refer to :py:class:`torchaudio.pipelines.Wav2Vec2FABundle` for usage details. + +.. note:: + + Unlike other Wav2Vec2 bundles, this model does not have a token for word boundary (like `|`). This makes the post-processing of alignments slightly different. +""" # noqa: E501 diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/utils.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e690e8103c7a47a01d719e746e6c98a9c7f6c8db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/_wav2vec2/utils.py @@ -0,0 +1,346 @@ +from typing import List, Optional, Tuple + +import torch +from torch import nn, Tensor + +from torchaudio._internal import load_state_dict_from_url +from torchaudio.models import wav2vec2_model, Wav2Vec2Model, wavlm_model + + +def _get_model(type_, params): + factories = { + "Wav2Vec2": wav2vec2_model, + "WavLM": wavlm_model, + } + if type_ not in factories: + raise ValueError(f"Supported model types are {tuple(factories.keys())}. Found: {type_}") + factory = factories[type_] + return factory(**params) + + +class _Wav2Vec2Model(nn.Module): + """Wrapper class for :py:class:`~torchaudio.models.Wav2Vec2Model`. + + This is used for layer normalization at the input + """ + + def __init__(self, model: Wav2Vec2Model, normalize_waveform: bool, apply_log_softmax: bool, append_star: bool): + super().__init__() + self.model = model + self.normalize_waveform = normalize_waveform + self.apply_log_softmax = apply_log_softmax + self.append_star = append_star + + def forward(self, waveforms: Tensor, lengths: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]: + if self.normalize_waveform: + waveforms = nn.functional.layer_norm(waveforms, waveforms.shape) + output, output_lengths = self.model(waveforms, lengths) + if self.apply_log_softmax: + output = torch.nn.functional.log_softmax(output, dim=-1) + if self.append_star: + star_dim = torch.zeros((1, output.size(1), 1), dtype=output.dtype, device=output.device) + output = torch.cat((output, star_dim), dim=-1) + return output, output_lengths + + @torch.jit.export + def extract_features( + self, + waveforms: Tensor, + lengths: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> Tuple[List[Tensor], Optional[Tensor]]: + if self.normalize_waveform: + waveforms = nn.functional.layer_norm(waveforms, waveforms.shape) + return self.model.extract_features(waveforms, lengths, num_layers) + + +def _extend_model(module, normalize_waveform, apply_log_softmax=False, append_star=False): + """Add extra transformations to the model""" + return _Wav2Vec2Model(module, normalize_waveform, apply_log_softmax, append_star) + + +def _remove_aux_axes(state_dict, axes): + # Remove the seemingly unnecessary axis + # For ASR task, the pretrained weights originated from fairseq has unrelated dimensions at index 1, 2, 3 + # It's originated from the Dictionary implementation of fairseq, which was intended for NLP tasks, + # but not used during the ASR training. + # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/data/dictionary.py#L21-L37 + # https://github.com/pytorch/fairseq/blob/c5ff181125c7e6126b49a85e5ebdd5f5b6a07914/fairseq/criterions/ctc.py#L126-L129 + # + # Also, some pretrained weights originated from voxpopuli has an extra dimensions that almost never used and + # that resembles mistake. + # The label `1` shows up in the training dataset of German (1 out of 16M), + # English (1 / 28M), Spanish (1 / 9.4M), Romanian (1 / 4.7M) and Polish (6 / 5.8M) + for key in ["aux.weight", "aux.bias"]: + mat = state_dict[key] + state_dict[key] = torch.stack([mat[i] for i in range(mat.size(0)) if i not in axes]) + + +def _get_state_dict(url, dl_kwargs, remove_axes=None): + if not url.startswith("https"): + url = f"https://download.pytorch.org/torchaudio/models/{url}" + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + if remove_axes: + _remove_aux_axes(state_dict, remove_axes) + return state_dict + + +def _get_en_labels(): + return ( + "|", + "E", + "T", + "A", + "O", + "N", + "I", + "H", + "S", + "R", + "D", + "L", + "U", + "M", + "W", + "C", + "F", + "G", + "Y", + "P", + "B", + "V", + "K", + "'", + "X", + "J", + "Q", + "Z", + ) + + +def _get_de_labels(): + return ( + "|", + "e", + "n", + "i", + "r", + "s", + "t", + "a", + "d", + "h", + "u", + "l", + "g", + "c", + "m", + "o", + "b", + "w", + "f", + "k", + "z", + "p", + "v", + "ü", + "ä", + "ö", + "j", + "ß", + "y", + "x", + "q", + ) + + +def _get_vp_en_labels(): + return ( + "|", + "e", + "t", + "o", + "i", + "a", + "n", + "s", + "r", + "h", + "l", + "d", + "c", + "u", + "m", + "p", + "f", + "g", + "w", + "y", + "b", + "v", + "k", + "x", + "j", + "q", + "z", + ) + + +def _get_es_labels(): + return ( + "|", + "e", + "a", + "o", + "s", + "n", + "r", + "i", + "l", + "d", + "c", + "t", + "u", + "p", + "m", + "b", + "q", + "y", + "g", + "v", + "h", + "ó", + "f", + "í", + "á", + "j", + "z", + "ñ", + "é", + "x", + "ú", + "k", + "w", + "ü", + ) + + +def _get_fr_labels(): + return ( + "|", + "e", + "s", + "n", + "i", + "t", + "r", + "a", + "o", + "u", + "l", + "d", + "c", + "p", + "m", + "é", + "v", + "q", + "f", + "g", + "b", + "h", + "x", + "à", + "j", + "è", + "y", + "ê", + "z", + "ô", + "k", + "ç", + "œ", + "û", + "ù", + "î", + "â", + "w", + "ï", + "ë", + "ü", + "æ", + ) + + +def _get_it_labels(): + return ( + "|", + "e", + "i", + "a", + "o", + "n", + "t", + "r", + "l", + "s", + "c", + "d", + "u", + "p", + "m", + "g", + "v", + "h", + "z", + "f", + "b", + "q", + "à", + "è", + "ù", + "é", + "ò", + "ì", + "k", + "y", + "x", + "w", + "j", + "ó", + "í", + "ï", + ) + + +def _get_mms_labels(): + return ( + "a", + "i", + "e", + "n", + "o", + "u", + "t", + "s", + "r", + "m", + "k", + "l", + "d", + "g", + "h", + "y", + "b", + "p", + "w", + "c", + "v", + "j", + "z", + "f", + "'", + "q", + "x", + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/pipelines/rnnt_pipeline.py b/venv/lib/python3.10/site-packages/torchaudio/pipelines/rnnt_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..11b5a479f3785241a00313a85ead1405b2f673cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/pipelines/rnnt_pipeline.py @@ -0,0 +1,380 @@ +import json +import math +from abc import ABC, abstractmethod +from dataclasses import dataclass +from functools import partial +from typing import Callable, List, Tuple + +import torch +import torchaudio +from torchaudio._internal import module_utils +from torchaudio.models import emformer_rnnt_base, RNNT, RNNTBeamSearch + + +__all__ = [] + +_decibel = 2 * 20 * math.log10(torch.iinfo(torch.int16).max) +_gain = pow(10, 0.05 * _decibel) + + +def _piecewise_linear_log(x): + x[x > math.e] = torch.log(x[x > math.e]) + x[x <= math.e] = x[x <= math.e] / math.e + return x + + +class _FunctionalModule(torch.nn.Module): + def __init__(self, functional): + super().__init__() + self.functional = functional + + def forward(self, input): + return self.functional(input) + + +class _GlobalStatsNormalization(torch.nn.Module): + def __init__(self, global_stats_path): + super().__init__() + + with open(global_stats_path) as f: + blob = json.loads(f.read()) + + self.register_buffer("mean", torch.tensor(blob["mean"])) + self.register_buffer("invstddev", torch.tensor(blob["invstddev"])) + + def forward(self, input): + return (input - self.mean) * self.invstddev + + +class _FeatureExtractor(ABC): + @abstractmethod + def __call__(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Generates features and length output from the given input tensor. + + Args: + input (torch.Tensor): input tensor. + + Returns: + (torch.Tensor, torch.Tensor): + torch.Tensor: + Features, with shape `(length, *)`. + torch.Tensor: + Length, with shape `(1,)`. + """ + + +class _TokenProcessor(ABC): + @abstractmethod + def __call__(self, tokens: List[int], **kwargs) -> str: + """Decodes given list of tokens to text sequence. + + Args: + tokens (List[int]): list of tokens to decode. + + Returns: + str: + Decoded text sequence. + """ + + +class _ModuleFeatureExtractor(torch.nn.Module, _FeatureExtractor): + """``torch.nn.Module``-based feature extraction pipeline. + + Args: + pipeline (torch.nn.Module): module that implements feature extraction logic. + """ + + def __init__(self, pipeline: torch.nn.Module) -> None: + super().__init__() + self.pipeline = pipeline + + def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Generates features and length output from the given input tensor. + + Args: + input (torch.Tensor): input tensor. + + Returns: + (torch.Tensor, torch.Tensor): + torch.Tensor: + Features, with shape `(length, *)`. + torch.Tensor: + Length, with shape `(1,)`. + """ + features = self.pipeline(input) + length = torch.tensor([features.shape[0]]) + return features, length + + +class _SentencePieceTokenProcessor(_TokenProcessor): + """SentencePiece-model-based token processor. + + Args: + sp_model_path (str): path to SentencePiece model. + """ + + def __init__(self, sp_model_path: str) -> None: + if not module_utils.is_module_available("sentencepiece"): + raise RuntimeError("SentencePiece is not available. Please install it.") + + import sentencepiece as spm + + self.sp_model = spm.SentencePieceProcessor(model_file=sp_model_path) + self.post_process_remove_list = { + self.sp_model.unk_id(), + self.sp_model.eos_id(), + self.sp_model.pad_id(), + } + + def __call__(self, tokens: List[int], lstrip: bool = True) -> str: + """Decodes given list of tokens to text sequence. + + Args: + tokens (List[int]): list of tokens to decode. + lstrip (bool, optional): if ``True``, returns text sequence with leading whitespace + removed. (Default: ``True``). + + Returns: + str: + Decoded text sequence. + """ + filtered_hypo_tokens = [ + token_index for token_index in tokens[1:] if token_index not in self.post_process_remove_list + ] + output_string = "".join(self.sp_model.id_to_piece(filtered_hypo_tokens)).replace("\u2581", " ") + + if lstrip: + return output_string.lstrip() + else: + return output_string + + +@dataclass +class RNNTBundle: + """Dataclass that bundles components for performing automatic speech recognition (ASR, speech-to-text) + inference with an RNN-T model. + + More specifically, the class provides methods that produce the featurization pipeline, + decoder wrapping the specified RNN-T model, and output token post-processor that together + constitute a complete end-to-end ASR inference pipeline that produces a text sequence + given a raw waveform. + + It can support non-streaming (full-context) inference as well as streaming inference. + + Users should not directly instantiate objects of this class; rather, users should use the + instances (representing pre-trained models) that exist within the module, + e.g. :data:`torchaudio.pipelines.EMFORMER_RNNT_BASE_LIBRISPEECH`. + + Example + >>> import torchaudio + >>> from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH + >>> import torch + >>> + >>> # Non-streaming inference. + >>> # Build feature extractor, decoder with RNN-T model, and token processor. + >>> feature_extractor = EMFORMER_RNNT_BASE_LIBRISPEECH.get_feature_extractor() + 100%|███████████████████████████████| 3.81k/3.81k [00:00<00:00, 4.22MB/s] + >>> decoder = EMFORMER_RNNT_BASE_LIBRISPEECH.get_decoder() + Downloading: "https://download.pytorch.org/torchaudio/models/emformer_rnnt_base_librispeech.pt" + 100%|███████████████████████████████| 293M/293M [00:07<00:00, 42.1MB/s] + >>> token_processor = EMFORMER_RNNT_BASE_LIBRISPEECH.get_token_processor() + 100%|███████████████████████████████| 295k/295k [00:00<00:00, 25.4MB/s] + >>> + >>> # Instantiate LibriSpeech dataset; retrieve waveform for first sample. + >>> dataset = torchaudio.datasets.LIBRISPEECH("/home/librispeech", url="test-clean") + >>> waveform = next(iter(dataset))[0].squeeze() + >>> + >>> with torch.no_grad(): + >>> # Produce mel-scale spectrogram features. + >>> features, length = feature_extractor(waveform) + >>> + >>> # Generate top-10 hypotheses. + >>> hypotheses = decoder(features, length, 10) + >>> + >>> # For top hypothesis, convert predicted tokens to text. + >>> text = token_processor(hypotheses[0][0]) + >>> print(text) + he hoped there would be stew for dinner turnips and carrots and bruised potatoes and fat mutton pieces to [...] + >>> + >>> + >>> # Streaming inference. + >>> hop_length = EMFORMER_RNNT_BASE_LIBRISPEECH.hop_length + >>> num_samples_segment = EMFORMER_RNNT_BASE_LIBRISPEECH.segment_length * hop_length + >>> num_samples_segment_right_context = ( + >>> num_samples_segment + EMFORMER_RNNT_BASE_LIBRISPEECH.right_context_length * hop_length + >>> ) + >>> + >>> # Build streaming inference feature extractor. + >>> streaming_feature_extractor = EMFORMER_RNNT_BASE_LIBRISPEECH.get_streaming_feature_extractor() + >>> + >>> # Process same waveform as before, this time sequentially across overlapping segments + >>> # to simulate streaming inference. Note the usage of ``streaming_feature_extractor`` and ``decoder.infer``. + >>> state, hypothesis = None, None + >>> for idx in range(0, len(waveform), num_samples_segment): + >>> segment = waveform[idx: idx + num_samples_segment_right_context] + >>> segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment))) + >>> with torch.no_grad(): + >>> features, length = streaming_feature_extractor(segment) + >>> hypotheses, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis) + >>> hypothesis = hypotheses[0] + >>> transcript = token_processor(hypothesis[0]) + >>> if transcript: + >>> print(transcript, end=" ", flush=True) + he hoped there would be stew for dinner turn ips and car rots and bru 'd oes and fat mut ton pieces to [...] + """ + + class FeatureExtractor(_FeatureExtractor): + """Interface of the feature extraction part of RNN-T pipeline""" + + class TokenProcessor(_TokenProcessor): + """Interface of the token processor part of RNN-T pipeline""" + + _rnnt_path: str + _rnnt_factory_func: Callable[[], RNNT] + _global_stats_path: str + _sp_model_path: str + _right_padding: int + _blank: int + _sample_rate: int + _n_fft: int + _n_mels: int + _hop_length: int + _segment_length: int + _right_context_length: int + + def _get_model(self) -> RNNT: + model = self._rnnt_factory_func() + path = torchaudio.utils.download_asset(self._rnnt_path) + state_dict = torch.load(path) + model.load_state_dict(state_dict) + model.eval() + return model + + @property + def sample_rate(self) -> int: + """Sample rate (in cycles per second) of input waveforms. + + :type: int + """ + return self._sample_rate + + @property + def n_fft(self) -> int: + """Size of FFT window to use. + + :type: int + """ + return self._n_fft + + @property + def n_mels(self) -> int: + """Number of mel spectrogram features to extract from input waveforms. + + :type: int + """ + return self._n_mels + + @property + def hop_length(self) -> int: + """Number of samples between successive frames in input expected by model. + + :type: int + """ + return self._hop_length + + @property + def segment_length(self) -> int: + """Number of frames in segment in input expected by model. + + :type: int + """ + return self._segment_length + + @property + def right_context_length(self) -> int: + """Number of frames in right contextual block in input expected by model. + + :type: int + """ + return self._right_context_length + + def get_decoder(self) -> RNNTBeamSearch: + """Constructs RNN-T decoder. + + Returns: + RNNTBeamSearch + """ + model = self._get_model() + return RNNTBeamSearch(model, self._blank) + + def get_feature_extractor(self) -> FeatureExtractor: + """Constructs feature extractor for non-streaming (full-context) ASR. + + Returns: + FeatureExtractor + """ + local_path = torchaudio.utils.download_asset(self._global_stats_path) + return _ModuleFeatureExtractor( + torch.nn.Sequential( + torchaudio.transforms.MelSpectrogram( + sample_rate=self.sample_rate, n_fft=self.n_fft, n_mels=self.n_mels, hop_length=self.hop_length + ), + _FunctionalModule(lambda x: x.transpose(1, 0)), + _FunctionalModule(lambda x: _piecewise_linear_log(x * _gain)), + _GlobalStatsNormalization(local_path), + _FunctionalModule(lambda x: torch.nn.functional.pad(x, (0, 0, 0, self._right_padding))), + ) + ) + + def get_streaming_feature_extractor(self) -> FeatureExtractor: + """Constructs feature extractor for streaming (simultaneous) ASR. + + Returns: + FeatureExtractor + """ + local_path = torchaudio.utils.download_asset(self._global_stats_path) + return _ModuleFeatureExtractor( + torch.nn.Sequential( + torchaudio.transforms.MelSpectrogram( + sample_rate=self.sample_rate, n_fft=self.n_fft, n_mels=self.n_mels, hop_length=self.hop_length + ), + _FunctionalModule(lambda x: x.transpose(1, 0)), + _FunctionalModule(lambda x: _piecewise_linear_log(x * _gain)), + _GlobalStatsNormalization(local_path), + ) + ) + + def get_token_processor(self) -> TokenProcessor: + """Constructs token processor. + + Returns: + TokenProcessor + """ + local_path = torchaudio.utils.download_asset(self._sp_model_path) + return _SentencePieceTokenProcessor(local_path) + + +EMFORMER_RNNT_BASE_LIBRISPEECH = RNNTBundle( + _rnnt_path="models/emformer_rnnt_base_librispeech.pt", + _rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=4097), + _global_stats_path="pipeline-assets/global_stats_rnnt_librispeech.json", + _sp_model_path="pipeline-assets/spm_bpe_4096_librispeech.model", + _right_padding=4, + _blank=4096, + _sample_rate=16000, + _n_fft=400, + _n_mels=80, + _hop_length=160, + _segment_length=16, + _right_context_length=4, +) +EMFORMER_RNNT_BASE_LIBRISPEECH.__doc__ = """ASR pipeline based on Emformer-RNNT, +pretrained on *LibriSpeech* dataset :cite:`7178964`, +capable of performing both streaming and non-streaming inference. + +The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base` +and utilizes weights trained on LibriSpeech using training script ``train.py`` +`here `__ with default arguments. + +Please refer to :py:class:`RNNTBundle` for usage instructions. +""" diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8f48fc88232b5e4c4c313073ca1f65a5da9b16d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e4a6194f48027caf10f3dcbbada53719a14d4a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__init__.py @@ -0,0 +1,4 @@ +from .musan import Musan + + +__all__ = ["Musan"] diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c752d4c378bf8955e5e27f35224241d1855c7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__pycache__/musan.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__pycache__/musan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a73554c0b204dbbff8cac23b3cb38c0a3d7f2d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/__pycache__/musan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/musan.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/musan.py new file mode 100644 index 0000000000000000000000000000000000000000..c4592bb3e4097f51064bfac01467873ba7263ec8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/datasets/musan.py @@ -0,0 +1,67 @@ +from pathlib import Path +from typing import Tuple, Union + +import torch +from torch.utils.data import Dataset +from torchaudio.datasets.utils import _load_waveform + + +_SUBSETS = ["music", "noise", "speech"] +_SAMPLE_RATE = 16_000 + + +class Musan(Dataset): + r"""*MUSAN* :cite:`musan2015` dataset. + + Args: + root (str or Path): Root directory where the dataset's top-level directory exists. + subset (str): Subset of the dataset to use. Options: [``"music"``, ``"noise"``, ``"speech"``]. + """ + + def __init__(self, root: Union[str, Path], subset: str): + if subset not in _SUBSETS: + raise ValueError(f"Invalid subset '{subset}' given. Please provide one of {_SUBSETS}") + + subset_path = Path(root) / subset + self._walker = [str(p) for p in subset_path.glob("*/*.*")] + + def get_metadata(self, n: int) -> Tuple[str, int, str]: + r"""Get metadata for the n-th sample in the dataset. Returns filepath instead of waveform, + but otherwise returns the same fields as :py:func:`__getitem__`. + + Args: + n (int): Index of sample to be loaded. + + Returns: + (str, int, str): + str + Path to audio. + int + Sample rate. + str + File name. + """ + audio_path = self._walker[n] + return audio_path, _SAMPLE_RATE, Path(audio_path).name + + def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]: + r"""Return the n-th sample in the dataset. + + Args: + n (int): Index of sample to be loaded. + + Returns: + (torch.Tensor, int, str): + torch.Tensor + Waveform. + int + Sample rate. + str + File name. + """ + audio_path, sample_rate, filename = self.get_metadata(n) + path = Path(audio_path) + return _load_waveform(path.parent, path.name, sample_rate), sample_rate, filename + + def __len__(self) -> int: + return len(self._walker) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..20bc181731eba87faeb77a36e7e1cdce4101f496 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__init__.py @@ -0,0 +1,26 @@ +from ._dsp import ( + adsr_envelope, + exp_sigmoid, + extend_pitch, + filter_waveform, + frequency_impulse_response, + oscillator_bank, + sinc_impulse_response, +) +from ._rir import ray_tracing, simulate_rir_ism +from .functional import barkscale_fbanks, chroma_filterbank + + +__all__ = [ + "adsr_envelope", + "exp_sigmoid", + "barkscale_fbanks", + "chroma_filterbank", + "extend_pitch", + "filter_waveform", + "frequency_impulse_response", + "oscillator_bank", + "ray_tracing", + "sinc_impulse_response", + "simulate_rir_ism", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63eed704132c866d86d1dc79dac624249bb9e476 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/_dsp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/_dsp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36f0caec9087157ba346fb8782cb89f8ec3b883b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/_dsp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/_rir.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/_rir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b067f1eeaab1e2441d2b424721fc25d1fdada577 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/_rir.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/functional.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef097ed4c4cc5b020602a85302ec0b9f0c5bff65 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/__pycache__/functional.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/_dsp.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/_dsp.py new file mode 100644 index 0000000000000000000000000000000000000000..72b1a153f57eaec1b464ad42199cf6f6e331ae26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/_dsp.py @@ -0,0 +1,433 @@ +import warnings +from typing import List, Optional, Union + +import torch + +from torchaudio.functional import fftconvolve + + +def oscillator_bank( + frequencies: torch.Tensor, + amplitudes: torch.Tensor, + sample_rate: float, + reduction: str = "sum", + dtype: Optional[torch.dtype] = torch.float64, +) -> torch.Tensor: + """Synthesize waveform from the given instantaneous frequencies and amplitudes. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Note: + The phase information of the output waveform is found by taking the cumulative sum + of the given instantaneous frequencies (``frequencies``). + This incurs roundoff error when the data type does not have enough precision. + Using ``torch.float64`` can work around this. + + The following figure shows the difference between ``torch.float32`` and + ``torch.float64`` when generating a sin wave of constant frequency and amplitude + with sample rate 8000 [Hz]. + Notice that ``torch.float32`` version shows artifacts that are not seen in + ``torch.float64`` version. + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/oscillator_precision.png + + Args: + frequencies (Tensor): Sample-wise oscillator frequencies (Hz). Shape `(..., time, N)`. + amplitudes (Tensor): Sample-wise oscillator amplitude. Shape: `(..., time, N)`. + sample_rate (float): Sample rate + reduction (str): Reduction to perform. + Valid values are ``"sum"``, ``"mean"`` or ``"none"``. Default: ``"sum"`` + dtype (torch.dtype or None, optional): The data type on which cumulative sum operation is performed. + Default: ``torch.float64``. Pass ``None`` to disable the casting. + + Returns: + Tensor: + The resulting waveform. + + If ``reduction`` is ``"none"``, then the shape is + `(..., time, N)`, otherwise the shape is `(..., time)`. + """ + if frequencies.shape != amplitudes.shape: + raise ValueError( + "The shapes of `frequencies` and `amplitudes` must match. " + f"Found: {frequencies.shape} and {amplitudes.shape} respectively." + ) + reductions = ["sum", "mean", "none"] + if reduction not in reductions: + raise ValueError(f"The value of reduction must be either {reductions}. Found: {reduction}") + + invalid = torch.abs(frequencies) >= sample_rate / 2 + if torch.any(invalid): + warnings.warn( + "Some frequencies are above nyquist frequency. " + "Setting the corresponding amplitude to zero. " + "This might cause numerically unstable gradient." + ) + amplitudes = torch.where(invalid, 0.0, amplitudes) + + pi2 = 2.0 * torch.pi + freqs = frequencies * pi2 / sample_rate % pi2 + phases = torch.cumsum(freqs, dim=-2, dtype=dtype) + if dtype is not None and freqs.dtype != dtype: + phases = phases.to(freqs.dtype) + + waveform = amplitudes * torch.sin(phases) + if reduction == "sum": + return waveform.sum(-1) + if reduction == "mean": + return waveform.mean(-1) + return waveform + + +def adsr_envelope( + num_frames: int, + *, + attack: float = 0.0, + hold: float = 0.0, + decay: float = 0.0, + sustain: float = 1.0, + release: float = 0.0, + n_decay: int = 2, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, +): + """Generate ADSR Envelope + + .. devices:: CPU CUDA + + Args: + num_frames (int): The number of output frames. + attack (float, optional): + The relative *time* it takes to reach the maximum level from + the start. (Default: ``0.0``) + hold (float, optional): + The relative *time* the maximum level is held before + it starts to decay. (Default: ``0.0``) + decay (float, optional): + The relative *time* it takes to sustain from + the maximum level. (Default: ``0.0``) + sustain (float, optional): The relative *level* at which + the sound should sustain. (Default: ``1.0``) + + .. Note:: + The duration of sustain is derived as `1.0 - (The sum of attack, hold, decay and release)`. + + release (float, optional): The relative *time* it takes for the sound level to + reach zero after the sustain. (Default: ``0.0``) + n_decay (int, optional): The degree of polynomial decay. Default: ``2``. + dtype (torch.dtype, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default + (see :py:func:`torch.set_default_tensor_type`). + device (torch.device, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :py:func:`torch.set_default_tensor_type`). + device will be the CPU for CPU tensor types and the current CUDA + device for CUDA tensor types. + + Returns: + Tensor: ADSR Envelope. Shape: `(num_frames, )` + + Example + .. image:: https://download.pytorch.org/torchaudio/doc-assets/adsr_examples.png + + """ + if not 0 <= attack <= 1: + raise ValueError(f"The value of `attack` must be within [0, 1]. Found: {attack}") + if not 0 <= decay <= 1: + raise ValueError(f"The value of `decay` must be within [0, 1]. Found: {decay}") + if not 0 <= sustain <= 1: + raise ValueError(f"The value of `sustain` must be within [0, 1]. Found: {sustain}") + if not 0 <= hold <= 1: + raise ValueError(f"The value of `hold` must be within [0, 1]. Found: {hold}") + if not 0 <= release <= 1: + raise ValueError(f"The value of `release` must be within [0, 1]. Found: {release}") + if attack + decay + release + hold > 1: + raise ValueError("The sum of `attack`, `hold`, `decay` and `release` must not exceed 1.") + + nframes = num_frames - 1 + num_a = int(nframes * attack) + num_h = int(nframes * hold) + num_d = int(nframes * decay) + num_r = int(nframes * release) + + # Initialize with sustain + out = torch.full((num_frames,), float(sustain), device=device, dtype=dtype) + + # attack + if num_a > 0: + torch.linspace(0.0, 1.0, num_a + 1, out=out[: num_a + 1]) + + # hold + if num_h > 0: + out[num_a : num_a + num_h + 1] = 1.0 + + # decay + if num_d > 0: + # Compute: sustain + (1.0 - sustain) * (linspace[1, 0] ** n_decay) + i = num_a + num_h + decay = out[i : i + num_d + 1] + torch.linspace(1.0, 0.0, num_d + 1, out=decay) + decay **= n_decay + decay *= 1.0 - sustain + decay += sustain + + # sustain is handled by initialization + + # release + if num_r > 0: + torch.linspace(sustain, 0, num_r + 1, out=out[-num_r - 1 :]) + + return out + + +def extend_pitch( + base: torch.Tensor, + pattern: Union[int, List[float], torch.Tensor], +): + """Extend the given time series values with multipliers of them. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Given a series of fundamental frequencies (pitch), this function appends + its harmonic overtones or inharmonic partials. + + Args: + base (torch.Tensor): + Base time series, like fundamental frequencies (Hz). Shape: `(..., time, 1)`. + pattern (int, list of floats or torch.Tensor): + If ``int``, the number of pitch series after the operation. + `pattern - 1` tones are added, so that the resulting Tensor contains + up to `pattern`-th overtones of the given series. + + If list of float or ``torch.Tensor``, it must be one dimensional, + representing the custom multiplier of the fundamental frequency. + + Returns: + Tensor: Oscillator frequencies (Hz). Shape: `(..., time, num_tones)`. + + Example + >>> # fundamental frequency + >>> f0 = torch.linspace(1, 5, 5).unsqueeze(-1) + >>> f0 + tensor([[1.], + [2.], + [3.], + [4.], + [5.]]) + >>> # Add harmonic overtones, up to 3rd. + >>> f = extend_pitch(f0, 3) + >>> f.shape + torch.Size([5, 3]) + >>> f + tensor([[ 1., 2., 3.], + [ 2., 4., 6.], + [ 3., 6., 9.], + [ 4., 8., 12.], + [ 5., 10., 15.]]) + >>> # Add custom (inharmonic) partials. + >>> f = extend_pitch(f0, torch.tensor([1, 2.1, 3.3, 4.5])) + >>> f.shape + torch.Size([5, 4]) + >>> f + tensor([[ 1.0000, 2.1000, 3.3000, 4.5000], + [ 2.0000, 4.2000, 6.6000, 9.0000], + [ 3.0000, 6.3000, 9.9000, 13.5000], + [ 4.0000, 8.4000, 13.2000, 18.0000], + [ 5.0000, 10.5000, 16.5000, 22.5000]]) + """ + if isinstance(pattern, torch.Tensor): + mult = pattern + elif isinstance(pattern, int): + mult = torch.linspace(1.0, float(pattern), pattern, device=base.device, dtype=base.dtype) + else: + mult = torch.tensor(pattern, dtype=base.dtype, device=base.device) + h_freq = base @ mult.unsqueeze(0) + return h_freq + + +def sinc_impulse_response(cutoff: torch.Tensor, window_size: int = 513, high_pass: bool = False): + """Create windowed-sinc impulse response for given cutoff frequencies. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + cutoff (Tensor): Cutoff frequencies for low-pass sinc filter. + + window_size (int, optional): Size of the Hamming window to apply. Must be odd. + (Default: 513) + + high_pass (bool, optional): + If ``True``, convert the resulting filter to high-pass. + Otherwise low-pass filter is returned. Default: ``False``. + + Returns: + Tensor: A series of impulse responses. Shape: `(..., window_size)`. + """ + if window_size % 2 == 0: + raise ValueError(f"`window_size` must be odd. Given: {window_size}") + + half = window_size // 2 + device, dtype = cutoff.device, cutoff.dtype + idx = torch.linspace(-half, half, window_size, device=device, dtype=dtype) + + filt = torch.special.sinc(cutoff.unsqueeze(-1) * idx.unsqueeze(0)) + filt = filt * torch.hamming_window(window_size, device=device, dtype=dtype, periodic=False).unsqueeze(0) + filt = filt / filt.sum(dim=-1, keepdim=True).abs() + + # High pass IR is obtained by subtracting low_pass IR from delta function. + # https://courses.engr.illinois.edu/ece401/fa2020/slides/lec10.pdf + if high_pass: + filt = -filt + filt[..., half] = 1.0 + filt[..., half] + return filt + + +def frequency_impulse_response(magnitudes): + """Create filter from desired frequency response + + Args: + magnitudes: The desired frequency responses. Shape: `(..., num_fft_bins)` + + Returns: + Tensor: Impulse response. Shape `(..., 2 * (num_fft_bins - 1))` + """ + if magnitudes.min() < 0.0: + # Negative magnitude does not make sense but allowing so that autograd works + # around 0. + # Should we raise error? + warnings.warn("The input frequency response should not contain negative values.") + ir = torch.fft.fftshift(torch.fft.irfft(magnitudes), dim=-1) + device, dtype = magnitudes.device, magnitudes.dtype + window = torch.hann_window(ir.size(-1), periodic=False, device=device, dtype=dtype).expand_as(ir) + return ir * window + + +def _overlap_and_add(waveform, stride): + num_frames, frame_size = waveform.shape[-2:] + numel = (num_frames - 1) * stride + frame_size + buffer = torch.zeros(waveform.shape[:-2] + (numel,), device=waveform.device, dtype=waveform.dtype) + for i in range(num_frames): + start = i * stride + end = start + frame_size + buffer[..., start:end] += waveform[..., i, :] + return buffer + + +def filter_waveform(waveform: torch.Tensor, kernels: torch.Tensor, delay_compensation: int = -1): + """Applies filters along time axis of the given waveform. + + This function applies the given filters along time axis in the following manner: + + 1. Split the given waveform into chunks. The number of chunks is equal to the number of given filters. + 2. Filter each chunk with corresponding filter. + 3. Place the filtered chunks at the original indices while adding up the overlapping parts. + 4. Crop the resulting waveform so that delay introduced by the filter is removed and its length + matches that of the input waveform. + + The following figure illustrates this. + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/filter_waveform.png + + .. note:: + + If the number of filters is one, then the operation becomes stationary. + i.e. the same filtering is applied across the time axis. + + Args: + waveform (Tensor): Shape `(..., time)`. + kernels (Tensor): Impulse responses. + Valid inputs are 2D tensor with shape `(num_filters, filter_length)` or + `(N+1)`-D tensor with shape `(..., num_filters, filter_length)`, where `N` is + the dimension of waveform. + + In case of 2D input, the same set of filters is used across channels and batches. + Otherwise, different sets of filters are applied. In this case, the shape of + the first `N-1` dimensions of filters must match (or be broadcastable to) that of waveform. + + delay_compensation (int): Control how the waveform is cropped after full convolution. + If the value is zero or positive, it is interpreted as the length of crop at the + beginning of the waveform. The value cannot be larger than the size of filter kernel. + Otherwise the initial crop is ``filter_size // 2``. + When cropping happens, the waveform is also cropped from the end so that the + length of the resulting waveform matches the input waveform. + + Returns: + Tensor: `(..., time)`. + """ + if kernels.ndim not in [2, waveform.ndim + 1]: + raise ValueError( + "`kernels` must be 2 or N+1 dimension where " + f"N is the dimension of waveform. Found: {kernels.ndim} (N={waveform.ndim})" + ) + + num_filters, filter_size = kernels.shape[-2:] + num_frames = waveform.size(-1) + + if delay_compensation > filter_size: + raise ValueError( + "When `delay_compenstation` is provided, it cannot be larger than the size of filters." + f"Found: delay_compensation={delay_compensation}, filter_size={filter_size}" + ) + + # Transform waveform's time axis into (num_filters x chunk_length) with optional padding + chunk_length = num_frames // num_filters + if num_frames % num_filters > 0: + chunk_length += 1 + num_pad = chunk_length * num_filters - num_frames + waveform = torch.nn.functional.pad(waveform, [0, num_pad], "constant", 0) + chunked = waveform.unfold(-1, chunk_length, chunk_length) + assert chunked.numel() >= waveform.numel() + + # Broadcast kernels + if waveform.ndim + 1 > kernels.ndim: + expand_shape = waveform.shape[:-1] + kernels.shape + kernels = kernels.expand(expand_shape) + + convolved = fftconvolve(chunked, kernels) + restored = _overlap_and_add(convolved, chunk_length) + + # Trim in a way that the number of samples are same as input, + # and the filter delay is compensated + if delay_compensation >= 0: + start = delay_compensation + else: + start = filter_size // 2 + num_crops = restored.size(-1) - num_frames + end = num_crops - start + result = restored[..., start:-end] + return result + + +def exp_sigmoid( + input: torch.Tensor, exponent: float = 10.0, max_value: float = 2.0, threshold: float = 1e-7 +) -> torch.Tensor: + """Exponential Sigmoid pointwise nonlinearity. + Implements the equation: + ``max_value`` * sigmoid(``input``) ** (log(``exponent``)) + ``threshold`` + + The output has a range of [``threshold``, ``max_value``]. + ``exponent`` controls the slope of the output. + + .. devices:: CPU CUDA + + Args: + input (Tensor): Input Tensor + exponent (float, optional): Exponent. Controls the slope of the output + max_value (float, optional): Maximum value of the output + threshold (float, optional): Minimum value of the output + + Returns: + Tensor: Exponential Sigmoid output. Shape: same as input + + """ + + return max_value * torch.pow( + torch.nn.functional.sigmoid(input), + torch.log(torch.tensor(exponent, device=input.device, dtype=input.dtype)), + ) + torch.tensor(threshold, device=input.device, dtype=input.dtype) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/_rir.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/_rir.py new file mode 100644 index 0000000000000000000000000000000000000000..0e67a5494d204182d83cc09166064ea9d4355176 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/_rir.py @@ -0,0 +1,379 @@ +import math +from typing import Optional, Tuple, Union + +import torch +import torchaudio +from torch import Tensor + + +def _compute_image_sources( + room: torch.Tensor, + source: torch.Tensor, + max_order: int, + absorption: torch.Tensor, + scatter: Optional[torch.Tensor] = None, +) -> Tuple[Tensor, Tensor]: + """Compute image sources in a shoebox-like room. + + Args: + room (torch.Tensor): The 1D Tensor to determine the room size. The shape is + `(D,)`, where ``D`` is 2 if room is a 2D room, or 3 if room is a 3D room. + source (torch.Tensor): The coordinate of the sound source. Tensor with dimensions + `(D)`. + max_order (int): The maximum number of reflections of the source. + absorption (torch.Tensor): The absorption coefficients of wall materials. + ``absorption`` is a Tensor with dimensions `(num_band, num_wall)`. + The shape options are ``[(1, 4), (1, 6), (7, 4), (7, 6)]``. + ``num_band`` is `1` if the coefficients is the same for all frequencies, or is `7` + if the coefficients are different to different frequencies. `7` refers to the default number + of octave bands. (See note in `simulate_rir_ism` method). + ``num_wall`` is `4` if the room is a 2D room, representing absorption coefficients + of ``"west"``, ``"east"``, ``"south"``, and ``"north"`` walls, respectively. + Or it is `6` if the room is a 3D room, representing absorption coefficients + of ``"west"``, ``"east"``, ``"south"``, ``"north"``, ``"floor"``, and ``"ceiling"``, respectively. + scatter (torch.Tensor): The scattering coefficients of wall materials. + The shape of ``scatter`` must match that of ``absorption``. If ``None``, it is not + used in image source computation. (Default: ``None``) + + Returns: + (torch.Tensor): The coordinates of all image sources within ``max_order`` number of reflections. + Tensor with dimensions `(num_image_source, D)`. + (torch.Tensor): The attenuation of corresponding image sources. Tensor with dimensions + `(num_band, num_image_source)`. + """ + if scatter is None: + tr = torch.sqrt(1 - absorption) + else: + tr = torch.sqrt(1 - absorption) * torch.sqrt(1 - scatter) + + ind = torch.arange(-max_order, max_order + 1, device=source.device) + if room.shape[0] == 2: + XYZ = torch.meshgrid(ind, ind, indexing="ij") + else: + XYZ = torch.meshgrid(ind, ind, ind, indexing="ij") + XYZ = torch.stack([c.reshape((-1,)) for c in XYZ], dim=-1) + XYZ = XYZ[XYZ.abs().sum(dim=-1) <= max_order] + + # compute locations of image sources + d = room[None, :] + s = source[None, :] + img_loc = torch.where(XYZ % 2 == 1, d * (XYZ + 1) - s, d * XYZ + s) + + # attenuation + exp_lo = abs(torch.floor((XYZ / 2))) + exp_hi = abs(torch.floor((XYZ + 1) / 2)) + t_lo = tr[:, ::2].unsqueeze(1).repeat(1, XYZ.shape[0], 1) # (num_band, left walls) + t_hi = tr[:, 1::2].unsqueeze(1).repeat(1, XYZ.shape[0], 1) # (num_band, right walls) + att = torch.prod((t_lo**exp_lo) * (t_hi**exp_hi), dim=-1) # (num_band, num_image_source) + return img_loc, att + + +def _hann(x: torch.Tensor, T: int): + """Compute the Hann window where the values are truncated based on window length. + torch.hann_window can only sample window function at integer points, the method is to sample + continuous window function at non-integer points. + + Args: + x (torch.Tensor): The fractional component of time delay Tensor. + T (torch.Tensor): The window length of sinc function. + + Returns: + (torch.Tensor): The hann window Tensor where values outside + the sinc window (`T`) is set to zero. + """ + y = torch.where( + torch.abs(x) <= T / 2, + 0.5 * (1 + torch.cos(2 * math.pi * x / T)), + x.new_zeros(1), + ) + return y + + +def _frac_delay(delay: torch.Tensor, delay_i: torch.Tensor, delay_filter_length: int): + """Compute fractional delay of impulse response signal. + + Args: + delay (torch.Tensor): The time delay Tensor in samples. + delay_i (torch.Tensor): The integer part of delay. + delay_filter_length (int): The window length for sinc function. + + Returns: + (torch.Tensor): The impulse response Tensor for all image sources. + """ + if delay_filter_length % 2 != 1: + raise ValueError("The filter length must be odd") + + pad = delay_filter_length // 2 + n = torch.arange(-pad, pad + 1, device=delay.device) + delay_i[..., None] + delay = delay[..., None] + + return torch.special.sinc(n - delay) * _hann(n - delay, 2 * pad) + + +def _adjust_coeff(coeffs: Union[float, torch.Tensor], name: str) -> torch.Tensor: + """Validates and converts absorption or scattering parameters to a tensor with appropriate shape + + Args: + coeff (float or torch.Tensor): The absorption coefficients of wall materials. + + If the dtype is ``float``, the absorption coefficient is identical for all walls and + all frequencies. + + If ``absorption`` is a 1D Tensor, the shape must be `(2*dim,)`, + where the values represent absorption coefficients of ``"west"``, ``"east"``, + ``"south"``, ``"north"``, ``"floor"``, and ``"ceiling"``, respectively. + + If ``absorption`` is a 2D Tensor, the shape must be `(7, 2*dim)`, + where 7 represents the number of octave bands. + + Returns: + (torch.Tensor): The expanded coefficient. + The shape is `(1, 6)` for single octave band case, and + `(7, 6)` for multi octave band case. + """ + num_walls = 6 + if isinstance(coeffs, float): + if coeffs < 0: + raise ValueError(f"`{name}` must be non-negative. Found: {coeffs}") + return torch.full((1, num_walls), coeffs) + if isinstance(coeffs, Tensor): + if torch.any(coeffs < 0): + raise ValueError(f"`{name}` must be non-negative. Found: {coeffs}") + if coeffs.ndim == 1: + if coeffs.numel() != num_walls: + raise ValueError( + f"The shape of `{name}` must be ({num_walls},) when it is a 1D Tensor. " + f"Found the shape {coeffs.shape}." + ) + return coeffs.unsqueeze(0) + if coeffs.ndim == 2: + if coeffs.shape[1] != num_walls: + raise ValueError( + f"The shape of `{name}` must be (NUM_BANDS, {num_walls}) when it " + f"is a 2D Tensor. Found: {coeffs.shape}." + ) + return coeffs + raise TypeError(f"`{name}` must be float or Tensor.") + + +def _validate_inputs( + room: torch.Tensor, + source: torch.Tensor, + mic_array: torch.Tensor, +): + """Validate dimensions of input arguments, and normalize different kinds of absorption into the same dimension. + + Args: + room (torch.Tensor): The size of the room. width, length (and height) + source (torch.Tensor): Sound source coordinates. Tensor with dimensions `(dim,)`. + mic_array (torch.Tensor): Microphone coordinates. Tensor with dimensions `(channel, dim)`. + """ + if not (room.ndim == 1 and room.numel() == 3): + raise ValueError(f"`room` must be a 1D Tensor with 3 elements. Found {room.shape}.") + if not (source.ndim == 1 and source.numel() == 3): + raise ValueError(f"`source` must be 1D Tensor with 3 elements. Found {source.shape}.") + if not (mic_array.ndim == 2 and mic_array.shape[1] == 3): + raise ValueError(f"`mic_array` must be a 2D Tensor with shape (num_channels, 3). Found {mic_array.shape}.") + + +def simulate_rir_ism( + room: torch.Tensor, + source: torch.Tensor, + mic_array: torch.Tensor, + max_order: int, + absorption: Union[float, torch.Tensor], + output_length: Optional[int] = None, + delay_filter_length: int = 81, + center_frequency: Optional[torch.Tensor] = None, + sound_speed: float = 343.0, + sample_rate: float = 16000.0, +) -> Tensor: + r"""Compute Room Impulse Response (RIR) based on the *image source method* :cite:`allen1979image`. + The implementation is based on *pyroomacoustics* :cite:`scheibler2018pyroomacoustics`. + + .. devices:: CPU + + .. properties:: TorchScript + + Args: + room (torch.Tensor): Room coordinates. The shape of `room` must be `(3,)` which represents + three dimensions of the room. + source (torch.Tensor): Sound source coordinates. Tensor with dimensions `(3,)`. + mic_array (torch.Tensor): Microphone coordinates. Tensor with dimensions `(channel, 3)`. + max_order (int): The maximum number of reflections of the source. + absorption (float or torch.Tensor): The *absorption* :cite:`wiki:Absorption_(acoustics)` + coefficients of wall materials for sound energy. + If the dtype is ``float``, the absorption coefficient is identical for all walls and + all frequencies. + If ``absorption`` is a 1D Tensor, the shape must be `(6,)`, where the values represent + absorption coefficients of ``"west"``, ``"east"``, ``"south"``, ``"north"``, ``"floor"``, + and ``"ceiling"``, respectively. + If ``absorption`` is a 2D Tensor, the shape must be `(7, 6)`, where 7 represents the number of octave bands. + output_length (int or None, optional): The output length of simulated RIR signal. If ``None``, + the length is defined as + + .. math:: + \frac{\text{max\_d} \cdot \text{sample\_rate}}{\text{sound\_speed}} + \text{delay\_filter\_length} + + where ``max_d`` is the maximum distance between image sources and microphones. + delay_filter_length (int, optional): The filter length for computing sinc function. (Default: ``81``) + center_frequency (torch.Tensor, optional): The center frequencies of octave bands for multi-band walls. + Only used when ``absorption`` is a 2D Tensor. + sound_speed (float, optional): The speed of sound. (Default: ``343.0``) + sample_rate (float, optional): The sample rate of the generated room impulse response signal. + (Default: ``16000.0``) + + Returns: + (torch.Tensor): The simulated room impulse response waveform. Tensor with dimensions + `(channel, rir_length)`. + + Note: + If ``absorption`` is a 2D Tensor and ``center_frequency`` is set to ``None``, the center frequencies + of octave bands are fixed to ``[125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0]``. + Users need to tune the values of ``absorption`` to the corresponding frequencies. + """ + _validate_inputs(room, source, mic_array) + absorption = _adjust_coeff(absorption, "absorption") + img_location, att = _compute_image_sources(room, source, max_order, absorption) + + # compute distances between image sources and microphones + vec = img_location[:, None, :] - mic_array[None, :, :] + dist = torch.linalg.norm(vec, dim=-1) # (image_source, channel) + + img_src_att = att[..., None] / dist[None, ...] # (band, image_source, channel) + + # separate delays in integer / frac part + delay = dist * sample_rate / sound_speed # distance to delay in samples + delay_i = torch.ceil(delay) # integer part + + # compute the shorts IRs corresponding to each image source + irs = img_src_att[..., None] * _frac_delay(delay, delay_i, delay_filter_length)[None, ...] + + rir_length = int(delay_i.max() + irs.shape[-1]) + rir = torch.ops.torchaudio._simulate_rir(irs, delay_i.type(torch.int32), rir_length) + + # multi-band processing + if absorption.shape[0] > 1: + if center_frequency is None: + center = torch.tensor( + [125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0], dtype=room.dtype, device=room.device + ) + else: + center = center_frequency + # n_fft is set to 512 by default. + filters = torch.ops.torchaudio._make_rir_filter(center, sample_rate, n_fft=512) + rir = torchaudio.functional.fftconvolve(rir, filters.unsqueeze(1).repeat(1, rir.shape[1], 1), mode="same") + + # sum up rir signals of all image sources into one waveform. + rir = rir.sum(0) + + if output_length is not None: + if output_length > rir.shape[-1]: + rir = torch.nn.functional.pad(rir, (0, output_length - rir.shape[-1]), "constant", 0.0) + else: + rir = rir[..., :output_length] + + return rir + + +def ray_tracing( + room: torch.Tensor, + source: torch.Tensor, + mic_array: torch.Tensor, + num_rays: int, + absorption: Union[float, torch.Tensor] = 0.0, + scattering: Union[float, torch.Tensor] = 0.0, + mic_radius: float = 0.5, + sound_speed: float = 343.0, + energy_thres: float = 1e-7, + time_thres: float = 10.0, + hist_bin_size: float = 0.004, +) -> torch.Tensor: + r"""Compute energy histogram via ray tracing. + + The implementation is based on *pyroomacoustics* :cite:`scheibler2018pyroomacoustics`. + + ``num_rays`` rays are casted uniformly in all directions from the source; + when a ray intersects a wall, it is reflected and part of its energy is absorbed. + It is also scattered (sent directly to the microphone(s)) according to the ``scattering`` + coefficient. + When a ray is close to the microphone, its current energy is recorded in the output + histogram for that given time slot. + + .. devices:: CPU + + .. properties:: TorchScript + + Args: + room (torch.Tensor): Room coordinates. The shape of `room` must be `(3,)` which represents + three dimensions of the room. + source (torch.Tensor): Sound source coordinates. Tensor with dimensions `(3,)`. + mic_array (torch.Tensor): Microphone coordinates. Tensor with dimensions `(channel, 3)`. + absorption (float or torch.Tensor, optional): The absorption coefficients of wall materials. + (Default: ``0.0``). + If the type is ``float``, the absorption coefficient is identical to all walls and + all frequencies. + If ``absorption`` is a 1D Tensor, the shape must be `(6,)`, representing absorption + coefficients of ``"west"``, ``"east"``, ``"south"``, ``"north"``, ``"floor"``, and + ``"ceiling"``, respectively. + If ``absorption`` is a 2D Tensor, the shape must be `(num_bands, 6)`. + ``num_bands`` is the number of frequency bands (usually 7). + scattering(float or torch.Tensor, optional): The scattering coefficients of wall materials. (Default: ``0.0``) + The shape and type of this parameter is the same as for ``absorption``. + mic_radius(float, optional): The radius of the microphone in meters. (Default: 0.5) + sound_speed (float, optional): The speed of sound in meters per second. (Default: ``343.0``) + energy_thres (float, optional): The energy level below which we stop tracing a ray. (Default: ``1e-7``) + The initial energy of each ray is ``2 / num_rays``. + time_thres (float, optional): The maximal duration for which rays are traced. (Unit: seconds) (Default: 10.0) + hist_bin_size (float, optional): The size of each bin in the output histogram. (Unit: seconds) (Default: 0.004) + + Returns: + (torch.Tensor): The 3D histogram(s) where the energy of the traced ray is recorded. + Each bin corresponds to a given time slot. + The shape is `(channel, num_bands, num_bins)`, where + ``num_bins = ceil(time_thres / hist_bin_size)``. + If both ``absorption`` and ``scattering`` are floats, then ``num_bands == 1``. + """ + if time_thres < hist_bin_size: + raise ValueError( + "`time_thres` must be greater than `hist_bin_size`. " + f"Found: hist_bin_size={hist_bin_size}, time_thres={time_thres}." + ) + + if room.dtype != source.dtype or source.dtype != mic_array.dtype: + raise ValueError( + "dtype of `room`, `source` and `mic_array` must match. " + f"Found: `room` ({room.dtype}), `source` ({source.dtype}) and " + f"`mic_array` ({mic_array.dtype})" + ) + + _validate_inputs(room, source, mic_array) + absorption = _adjust_coeff(absorption, "absorption").to(room.dtype) + scattering = _adjust_coeff(scattering, "scattering").to(room.dtype) + + # Bring absorption and scattering to the same shape + if absorption.shape[0] == 1 and scattering.shape[0] > 1: + absorption = absorption.expand(scattering.shape) + if scattering.shape[0] == 1 and absorption.shape[0] > 1: + scattering = scattering.expand(absorption.shape) + if absorption.shape != scattering.shape: + raise ValueError( + "`absorption` and `scattering` must be broadcastable to the same number of bands and walls. " + f"Inferred shapes absorption={absorption.shape} and scattering={scattering.shape}" + ) + + histograms = torch.ops.torchaudio.ray_tracing( + room, + source, + mic_array, + num_rays, + absorption, + scattering, + mic_radius, + sound_speed, + energy_thres, + time_thres, + hist_bin_size, + ) + + return histograms diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/functional.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..0805a252af4ef6946606a32a532188cd937321b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/functional/functional.py @@ -0,0 +1,190 @@ +import math +import warnings +from typing import Optional + +import torch +from torchaudio.functional.functional import _create_triangular_filterbank + + +def _hz_to_bark(freqs: float, bark_scale: str = "traunmuller") -> float: + r"""Convert Hz to Barks. + + Args: + freqs (float): Frequencies in Hz + bark_scale (str, optional): Scale to use: ``traunmuller``, ``schroeder`` or ``wang``. (Default: ``traunmuller``) + + Returns: + barks (float): Frequency in Barks + """ + + if bark_scale not in ["schroeder", "traunmuller", "wang"]: + raise ValueError('bark_scale should be one of "schroeder", "traunmuller" or "wang".') + + if bark_scale == "wang": + return 6.0 * math.asinh(freqs / 600.0) + elif bark_scale == "schroeder": + return 7.0 * math.asinh(freqs / 650.0) + # Traunmuller Bark scale + barks = ((26.81 * freqs) / (1960.0 + freqs)) - 0.53 + # Bark value correction + if barks < 2: + barks += 0.15 * (2 - barks) + elif barks > 20.1: + barks += 0.22 * (barks - 20.1) + + return barks + + +def _bark_to_hz(barks: torch.Tensor, bark_scale: str = "traunmuller") -> torch.Tensor: + """Convert bark bin numbers to frequencies. + + Args: + barks (torch.Tensor): Bark frequencies + bark_scale (str, optional): Scale to use: ``traunmuller``,``schroeder`` or ``wang``. (Default: ``traunmuller``) + + Returns: + freqs (torch.Tensor): Barks converted in Hz + """ + + if bark_scale not in ["schroeder", "traunmuller", "wang"]: + raise ValueError('bark_scale should be one of "traunmuller", "schroeder" or "wang".') + + if bark_scale == "wang": + return 600.0 * torch.sinh(barks / 6.0) + elif bark_scale == "schroeder": + return 650.0 * torch.sinh(barks / 7.0) + # Bark value correction + if any(barks < 2): + idx = barks < 2 + barks[idx] = (barks[idx] - 0.3) / 0.85 + elif any(barks > 20.1): + idx = barks > 20.1 + barks[idx] = (barks[idx] + 4.422) / 1.22 + + # Traunmuller Bark scale + freqs = 1960 * ((barks + 0.53) / (26.28 - barks)) + + return freqs + + +def _hz_to_octs(freqs, tuning=0.0, bins_per_octave=12): + a440 = 440.0 * 2.0 ** (tuning / bins_per_octave) + return torch.log2(freqs / (a440 / 16)) + + +def barkscale_fbanks( + n_freqs: int, + f_min: float, + f_max: float, + n_barks: int, + sample_rate: int, + bark_scale: str = "traunmuller", +) -> torch.Tensor: + r"""Create a frequency bin conversion matrix. + + .. devices:: CPU + + .. properties:: TorchScript + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/bark_fbanks.png + :alt: Visualization of generated filter bank + + Args: + n_freqs (int): Number of frequencies to highlight/apply + f_min (float): Minimum frequency (Hz) + f_max (float): Maximum frequency (Hz) + n_barks (int): Number of mel filterbanks + sample_rate (int): Sample rate of the audio waveform + bark_scale (str, optional): Scale to use: ``traunmuller``,``schroeder`` or ``wang``. (Default: ``traunmuller``) + + Returns: + torch.Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_barks``) + meaning number of frequencies to highlight/apply to x the number of filterbanks. + Each column is a filterbank so that assuming there is a matrix A of + size (..., ``n_freqs``), the applied result would be + ``A * barkscale_fbanks(A.size(-1), ...)``. + + """ + + # freq bins + all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) + + # calculate bark freq bins + m_min = _hz_to_bark(f_min, bark_scale=bark_scale) + m_max = _hz_to_bark(f_max, bark_scale=bark_scale) + + m_pts = torch.linspace(m_min, m_max, n_barks + 2) + f_pts = _bark_to_hz(m_pts, bark_scale=bark_scale) + + # create filterbank + fb = _create_triangular_filterbank(all_freqs, f_pts) + + if (fb.max(dim=0).values == 0.0).any(): + warnings.warn( + "At least one bark filterbank has all zero values. " + f"The value for `n_barks` ({n_barks}) may be set too high. " + f"Or, the value for `n_freqs` ({n_freqs}) may be set too low." + ) + + return fb + + +def chroma_filterbank( + sample_rate: int, + n_freqs: int, + n_chroma: int, + *, + tuning: float = 0.0, + ctroct: float = 5.0, + octwidth: Optional[float] = 2.0, + norm: int = 2, + base_c: bool = True, +): + """Create a frequency-to-chroma conversion matrix. Implementation adapted from librosa. + + Args: + sample_rate (int): Sample rate. + n_freqs (int): Number of input frequencies. + n_chroma (int): Number of output chroma. + tuning (float, optional): Tuning deviation from A440 in fractions of a chroma bin. (Default: 0.0) + ctroct (float, optional): Center of Gaussian dominance window to weight filters by, in octaves. (Default: 5.0) + octwidth (float or None, optional): Width of Gaussian dominance window to weight filters by, in octaves. + If ``None``, then disable weighting altogether. (Default: 2.0) + norm (int, optional): order of norm to normalize filter bank by. (Default: 2) + base_c (bool, optional): If True, then start filter bank at C. Otherwise, start at A. (Default: True) + + Returns: + torch.Tensor: Chroma filter bank, with shape `(n_freqs, n_chroma)`. + """ + # Skip redundant upper half of frequency range. + freqs = torch.linspace(0, sample_rate // 2, n_freqs)[1:] + freq_bins = n_chroma * _hz_to_octs(freqs, bins_per_octave=n_chroma, tuning=tuning) + freq_bins = torch.cat((torch.tensor([freq_bins[0] - 1.5 * n_chroma]), freq_bins)) + freq_bin_widths = torch.cat( + ( + torch.maximum(freq_bins[1:] - freq_bins[:-1], torch.tensor(1.0)), + torch.tensor([1]), + ) + ) + + # (n_freqs, n_chroma) + D = freq_bins.unsqueeze(1) - torch.arange(0, n_chroma) + + n_chroma2 = round(n_chroma / 2) + + # Project to range [-n_chroma/2, n_chroma/2 - 1] + D = torch.remainder(D + n_chroma2, n_chroma) - n_chroma2 + + fb = torch.exp(-0.5 * (2 * D / torch.tile(freq_bin_widths.unsqueeze(1), (1, n_chroma))) ** 2) + fb = torch.nn.functional.normalize(fb, p=norm, dim=1) + + if octwidth is not None: + fb *= torch.tile( + torch.exp(-0.5 * (((freq_bins.unsqueeze(1) / n_chroma - ctroct) / octwidth) ** 2)), + (1, n_chroma), + ) + + if base_c: + fb = torch.roll(fb, -3 * (n_chroma // 12), dims=1) + + return fb diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc1b62974644a672bce2916c0f6d04e80e55a2e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__init__.py @@ -0,0 +1,36 @@ +from ._conformer_wav2vec2 import ( + conformer_wav2vec2_base, + conformer_wav2vec2_model, + conformer_wav2vec2_pretrain_base, + conformer_wav2vec2_pretrain_large, + conformer_wav2vec2_pretrain_model, + ConformerWav2Vec2PretrainModel, +) +from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model +from .conv_emformer import ConvEmformer +from .hifi_gan import hifigan_vocoder, hifigan_vocoder_v1, hifigan_vocoder_v2, hifigan_vocoder_v3, HiFiGANVocoder +from .rnnt import conformer_rnnt_base, conformer_rnnt_biasing, conformer_rnnt_biasing_base, conformer_rnnt_model +from .rnnt_decoder import Hypothesis, RNNTBeamSearchBiasing + +__all__ = [ + "conformer_rnnt_base", + "conformer_rnnt_model", + "conformer_rnnt_biasing", + "conformer_rnnt_biasing_base", + "ConvEmformer", + "conformer_wav2vec2_model", + "conformer_wav2vec2_base", + "conformer_wav2vec2_pretrain_model", + "conformer_wav2vec2_pretrain_base", + "conformer_wav2vec2_pretrain_large", + "ConformerWav2Vec2PretrainModel", + "emformer_hubert_base", + "emformer_hubert_model", + "Hypothesis", + "RNNTBeamSearchBiasing", + "HiFiGANVocoder", + "hifigan_vocoder_v1", + "hifigan_vocoder_v2", + "hifigan_vocoder_v3", + "hifigan_vocoder", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af1375065f6f1a38f042c365fbf09984e04e4f06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/_conformer_wav2vec2.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/_conformer_wav2vec2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d5021c2336491172f41d81893919319f97af7fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/_conformer_wav2vec2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/_emformer_hubert.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/_emformer_hubert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8828ecd1ddadda6be14fb3ffc090f58953801525 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/_emformer_hubert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/conv_emformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/conv_emformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7b62495a05b17dafb480722d4fb10d5c4e75597 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/conv_emformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/hifi_gan.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/hifi_gan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbe9cbb38e55049b5b2563509f6b6c253b58a6cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/hifi_gan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/rnnt.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/rnnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b99e0aa008ff7f9d3bdb7217cc3fddf10d911ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/rnnt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/rnnt_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/rnnt_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d40fac492effd0b9011db71054f9e6a0bfb0c0d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/__pycache__/rnnt_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/_conformer_wav2vec2.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/_conformer_wav2vec2.py new file mode 100644 index 0000000000000000000000000000000000000000..b1ea86a81c831a8f346dd1290e221ece67be4734 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/_conformer_wav2vec2.py @@ -0,0 +1,794 @@ +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn, Tensor +from torch.nn import Module, ModuleList +from torchaudio.models import Wav2Vec2Model +from torchaudio.models.conformer import ConformerLayer +from torchaudio.models.rnnt import _TimeReduction +from torchaudio.models.wav2vec2 import components + + +def _buffered_arange(max) -> Tensor: + """Compute arange using a buffered tensor across function calls. + Produces same result as torch.arange(end=max). + + Args: + max (int): Ending value for arange. + """ + if not hasattr(_buffered_arange, "buf"): + _buffered_arange.buf = torch.LongTensor() + if max > _buffered_arange.buf.numel(): + _buffered_arange.buf.resize_(max) + torch.arange(max, out=_buffered_arange.buf) + return _buffered_arange.buf[:max] + + +def _sample_negatives(input: Tensor, num_negatives: int, cross_sample_negatives: int) -> Tuple[Tensor, Tensor]: + """Sample negative examples from masked input. + + Args: + input (Tensor): Tensor of dimension `(batch, frame, dim)`. + num_negatives (int): Number of negative examples to sample. + cross_sample_negatives (int): Number of negative examples to cross sample. + + Returns: + (Tensor, Tensor): + Tensor + The negative samples. + Tensor + The indices of the negative samples. + """ + if num_negatives == 0 and cross_sample_negatives == 0: + return ( + torch.zeros(0).to(input.device, input.dtype), + torch.zeros(0).to(input.device, input.dtype), + ) + + B, T, D = input.shape + input = input.view(-1, D) + + cross_high = T * B + high = T + + assert high > 1 + + if num_negatives > 0: + tszs = _buffered_arange(T).unsqueeze(-1).expand(-1, num_negatives).flatten() + + neg_idxs = torch.randint(low=0, high=high - 1, size=(B, num_negatives * T)) + neg_idxs[neg_idxs >= tszs] += 1 + + if cross_sample_negatives > 0: + tszs = _buffered_arange(T).unsqueeze(-1).expand(-1, cross_sample_negatives).flatten() + + cross_neg_idxs = torch.randint(low=0, high=cross_high - 1, size=(B, cross_sample_negatives * T)) + cross_neg_idxs[cross_neg_idxs >= tszs] += 1 + + if num_negatives > 0: + neg_idxs = neg_idxs + (torch.arange(B).unsqueeze(1) * high) + else: + neg_idxs = cross_neg_idxs + + if cross_sample_negatives > 0 and num_negatives > 0: + neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1) + + negs = input[neg_idxs.view(-1)] + negs = negs.view(B, T, num_negatives + cross_sample_negatives, D).permute(2, 0, 1, 3) # NxBxCxT + + return negs, neg_idxs + + +class NegativeSampler(Module): + r"""Applies preprocessing to input and then computes negative sampling. + + Args: + preprocessor (nn.Module): Transforms input tensor prior to negative sampling. + num_negatives (int): Number of negative examples to sample. + cross_sample_negatives (int): Number of negative examples to cross sample. + """ + + def __init__( + self, + preprocessor: Module, + num_negatives: int, + cross_sample_negatives: int, + ): + super().__init__() + self.preprocessor = preprocessor + self.num_negatives = num_negatives + self.cross_sample_negatives = cross_sample_negatives + + def forward(self, input: Tensor) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + """ + Args: + input (Tensor): Tensor of dimension `(B, T, D)`. + + Returns: + (Tensor, Tensor, Optional[Tensor]): + Tensor + The input tensor after preprocessing, prior to being sampled. + Tensor + The negative samples. + Tensor + The indices of the negative samples. + """ + preprocessed = self.preprocessor(input) + negs, neg_idxs = _sample_negatives(preprocessed, self.num_negatives, self.cross_sample_negatives) + return preprocessed, negs, neg_idxs + + +class FeatureEncoder(Module): + """Feature Encoder class, consisting of time reduction and linear layer. + + Args: + stride (int): Number of frames to merge for the output frame. + input_dim (int): Input dimension of the tensor. + output_dim (int): Output dimension of the tensor. + """ + + def __init__(self, input_dim: int, output_dim: int, stride: int): + super().__init__() + self.time_reduction_layer = _TimeReduction(stride=stride) + self.linear_layer = nn.Linear(input_dim * stride, output_dim) + + def forward( + self, + x: Tensor, + lengths: Optional[Tensor], + ) -> Tuple[Tensor, Optional[Tensor]]: + """ + Args: + x (Tensor): Feature Tensor representing log Mel Spectrogram output. shape ``(B, T, D)``. + lengths (Tensor or None): + Valid length of each input sample. shape: ``(B, )``. + + Returns: + (Tensor, Optional[Tensor]): + Tensor: output sequence after undergoing time reduction and linear projection. + Shape ``(B, T // stride, D * stride). + Optional[Tensor]: output lengths of shape ``(B,)`` if lengths parameter is provided, + otherwise `None`. + """ + if lengths is None: + B, T, D = x.shape + dummy_lengths = torch.full((B,), T) + x, _ = self.time_reduction_layer(x, dummy_lengths) + x = self.linear_layer(x) + return x, None + + x, lengths = self.time_reduction_layer(x, lengths) + x = self.linear_layer(x) + return x, lengths + + +class ConformerEncoder(Module): + """Conformer Encoder class, consisting of feature projection and conformer modules. + + Args: + feature_projection (nn.Module): + Projects feature to encoder dimension. + conformer (nn.ModuleList) + List of Conformer layers. + """ + + def __init__( + self, + feature_projection: Module, + conformer: ModuleList, + ): + super().__init__() + self.feature_projection = feature_projection + self.conformer = conformer + + def _preprocess( + self, + features: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + x = self.feature_projection(features) + if lengths is not None: + mask = components._get_padding_mask(x, lengths) + else: + mask = None + return x, mask + + def _get_intermediate_outputs( + self, + x: Tensor, + mask: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> List[Tensor]: + if num_layers is not None: + if not 0 < num_layers <= len(self.conformer): + raise ValueError(f"`num_layers` must be between [1, {len(self.conformer)}]") + + ret: List[Tensor] = [] + + x = x.transpose(0, 1) + for layer in self.conformer: + x = layer(x, mask) + ret.append(x.transpose(0, 1)) + if num_layers is not None and len(ret) >= num_layers: + return ret + return ret + + def forward( + self, + features: Tensor, + lengths: Optional[Tensor] = None, + ) -> Tensor: + """ + Args: + features (Tensor): Tensor of features of shape ``(B, T, D)``. + lengths (Tensor or None, optional): Valid length of each input sample. shape: ``(B, )``. + + Returns: + Tensor: result after applying conformer encoder to features. + """ + x, mask = self._preprocess(features, lengths) + x = x.transpose(0, 1) + for layer in self.conformer: + x = layer(x, mask) + return x.transpose(0, 1) + + def extract_features( + self, + features: Tensor, + lengths: Optional[Tensor] = None, + num_layers: Optional[int] = None, + ) -> List[Tensor]: + """Returns the list of outputs from the intermediate layers of conformer block in the encoder. + + Args: + features (Tensor): Tensor of features of shape ``(B, T, D)``. + lengths (Tensor or None, optional): Valid length of each input sample. shape: ``(B, )``. + + Returns: + List[Tensor]: + Features from requested layers. Each Tensor is of shape: `(batch, time frame, feature dimension)`. + """ + x, masks = self._preprocess(features, lengths) + return self._get_intermediate_outputs(x, mask=masks, num_layers=num_layers) + + +class ConformerWav2Vec2PretrainModel(Module): + """Conformer Wav2Vec2 pre-train model for training from scratch. + + Note: + To build the model, please use one of the factory functions, + :py:func:`conformer_wav2vec2_base` or :py:func:`conformer_wav2vec2_large` + + Args: + wav2vec2 (nn.Module): + Conformer based Wav2Vec2 model, including feature extractor and conformer encoder components. + mask_generator (nn.Module): + Mask generator that generates the mask for masked prediction during training. + negative_sampler (nn.Module): + Negative sampler to apply after masking. + + """ + + def __init__( + self, + wav2vec2: Wav2Vec2Model, + mask_generator: Module, + negative_sampler: Module, + ): + super().__init__() + self.wav2vec2 = wav2vec2 + self.mask_generator = mask_generator + self.negative_sampler = negative_sampler + + def forward( + self, + features: Tensor, + audio_lengths: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor], Tensor, Tensor]: + """ + Args: + features (Tensor): + Tensor of audio features of shape `(batch, frame, dim)`. + audio_lengths (Tensor or None, optional): + Tensor of valid length of each valid auidio in the batch. + shape: `(batch, )` (Default: ``None``) + + Returns: + (Tensor, Optional[Tensor], Tensor, Tensor, Tensor, Tensor): + Tensor + The masked sequences of probability distribution of shape `(batch, frame dim)`. + Tensor or None + If ``lengths`` argument was provided, a Tensor of shape `(batch, )` representing + valid length in time axis is returns. + Tensor + The mask indices. + Tensor + The targets, prior to negative sampling. + Tensor + The negative samples. + Tensor + The indices of the negative samples. + """ + x, lengths = self.wav2vec2.feature_extractor(features, audio_lengths) + + if lengths is not None: + padding_mask = components._get_padding_mask(x, lengths) + else: + padding_mask = None + + x = self.wav2vec2.encoder.feature_projection.layer_norm(x) + x = self.wav2vec2.encoder.feature_projection.dropout(x) + + # Unmasked feature is used to generate positive and negative samples. + unmasked_x = x.clone() + # Apply masking to x before passing it to Conformer layers. + x, mask_idxs = self.mask_generator(x, padding_mask) + # Select the frames from masked indices for negative sampling. + unmasked_x = unmasked_x[mask_idxs].view(x.shape[0], -1, x.shape[-1]) + targets, negs, neg_idxs = self.negative_sampler(unmasked_x) + + x = self.wav2vec2.encoder.feature_projection.projection(x) + x = x.transpose(0, 1) + for conformer_layer in self.wav2vec2.encoder.conformer: + x = conformer_layer(x, padding_mask) + x = x.transpose(0, 1) + + return x, lengths, mask_idxs, targets, negs, neg_idxs + + +################################################################################ +def _get_conformer_feature_extractor( + input_dim: int, + output_dim: int, + stride: int, +) -> FeatureEncoder: + """Construct Feature Extractor + + Args: + input_dim (int): Input dimension of features. + output_dim (int): Output dimension after feature extraction. + stride (int): Stride used in Time Reduction layer of feature extractor. + + Returns: + FeatureEncoder: The resulting feature extraction. + """ + return FeatureEncoder(input_dim, output_dim, stride) + + +def _get_conformer_encoder( + in_features: int, + embed_dim: int, + dropout_input: float, + num_layers: int, + num_heads: int, + ff_interm_features: int, + dropout: float, + depthwise_conv_kernel_size: Union[int, List[int]], + convolution_first: bool, + use_group_norm: bool, +) -> ConformerEncoder: + """Construct Conformer Encoder + + Args: + in_features (int): The number of input features. + embed_dim (int): The dimension of the embedding in the feature projection. + dropout_input (float): The dropout probability applied after the input feature + is projected to ``embed_dim``. + num_layers (int): Number of Conformer layers in the encoder. + num_heads (int): Number of heads in each Conformer layer. + ff_interm_features (int): Hidden layer dimension of the feedforward network in + each Conformer layer. + dropout (float): Dropout probability in each Conformer layer. + depthwise_conv_kernel_size (int or List[int]): List of kernel sizes corresponding + to each of the Conformer layers.If int is provided, all layers will have the + same kernel size. + convolution_first (bool): Whether to apply the convolution module ahead of the + attention module in each Conformer layer. + use_group_norm (bool): Whether to use ``GroupNorm`` rather than ``BatchNorm1d`` in + the convolution module in each Conformer layer. + + Returns: + ConformerEncoder: + The resulting conformer encoder module. + """ + feature_projection = components.FeatureProjection(in_features, embed_dim, dropout_input) + + if type(depthwise_conv_kernel_size) == int: + depthwise_conv_kernel_size = [depthwise_conv_kernel_size] * num_layers + + assert len(depthwise_conv_kernel_size) == num_layers + + conformer_layers = [] + for l in range(num_layers): + layer = ConformerLayer( + input_dim=embed_dim, + ffn_dim=ff_interm_features, + num_attention_heads=num_heads, + depthwise_conv_kernel_size=depthwise_conv_kernel_size[l], + dropout=dropout, + use_group_norm=use_group_norm, + convolution_first=convolution_first, + ) + conformer_layers.append(layer) + + return ConformerEncoder(feature_projection, ModuleList(conformer_layers)) + + +def _get_conformer_negativer_sampler( + input_dim: int, + output_dim: int, + num_negatives: int, + cross_sample_negatives: int, +) -> NegativeSampler: + """Build custom NegativeSampler module, including linear layer and negative sampling. + + Args: + input_dim (int): Dimension of input after feature extraction. + output_dim (int): Dimension of embedding for use in negative sampling. Same as the + embedding in the feature projection. + num_negatives (int): Number of negatives to sample. + cross_sample_negatives (int): Number of cross sampled negatives. + + Returns: + NegativeSampler: + The resulting negative sampler module. + """ + preprocessor = nn.Linear(input_dim, output_dim) + return NegativeSampler(preprocessor, num_negatives, cross_sample_negatives) + + +def conformer_wav2vec2_model( + extractor_input_dim: int, + extractor_output_dim: int, + extractor_stride: int, + encoder_embed_dim: int, + encoder_projection_dropout: float, + encoder_num_layers: int, + encoder_num_heads: int, + encoder_ff_interm_features: int, + encoder_depthwise_conv_kernel_size: Union[int, List[int]], + encoder_dropout: float, + encoder_convolution_first: bool, + encoder_use_group_norm: bool, +) -> Wav2Vec2Model: + """Build a custom Conformer Wav2Vec2Model + + Args: + extractor_input_dim (int): Input dimension of the features. + extractor_output_dim (int): Output dimension after feature extraction. + extractor_stride (int): Stride used in time reduction layer of feature extraction. + encoder_embed_dim (int): The dimension of the embedding in the feature projection. + encoder_projection_dropout (float): + The dropout probability applied after the input feature is projected to ``embed_dim`` + encoder_num_layers (int): Number of Conformer layers in the encoder. + encoder_num_heads (int): Number of heads in each Conformer layer. + encoder_ff_interm_features (int): + Hidden layer dimension of the feedforward network in each Conformer layer. + encoder_depthwise_conv_kernel_size (int or List[int]): + List of kernel sizes corresponding to each of the Conformer layers. + If int is provided, all layers will have the same kernel size. + encoder_dropout (float): Dropout probability in each Conformer layer. + encoder_convolution_first (bool): + Whether to apply the convolution module ahead of the attention module + in each Conformer layer. + encoder_use_group_norm (bool): + Whether to use ``GroupNorm`` rather than ``BatchNorm1d`` in the convolution + module in each Conformer layer. + + Returns: + Wav2Vec2Model: + The resulting wav2vec2 model with a conformer encoder. + """ + feature_extractor = _get_conformer_feature_extractor( + extractor_input_dim, + extractor_output_dim, + extractor_stride, + ) + + encoder = _get_conformer_encoder( + in_features=extractor_output_dim, + embed_dim=encoder_embed_dim, + dropout_input=encoder_projection_dropout, + num_layers=encoder_num_layers, + num_heads=encoder_num_heads, + ff_interm_features=encoder_ff_interm_features, + depthwise_conv_kernel_size=encoder_depthwise_conv_kernel_size, + dropout=encoder_dropout, + convolution_first=encoder_convolution_first, + use_group_norm=encoder_use_group_norm, + ) + + return Wav2Vec2Model(feature_extractor, encoder) + + +def conformer_wav2vec2_base( + extractor_input_dim: int = 64, + extractor_output_dim: int = 256, + encoder_projection_dropout: float = 0.0, +) -> Wav2Vec2Model: + """ + Build Conformer Wav2Vec2 Model with "small" architecture from + *Conformer-Based Slef-Supervised Learning for Non-Speech Audio Tasks* :cite:`9746490` + + Args: + extractor_input_dim (int, optional): Input dimension of feature extractor. (Default: 64) + extractor_output_dim (int, optional): Output dimension of feature extractor. (Default: 256) + encoder_projection_dropout (float, optional): + Dropout probability applied after feature projection. (Default: 0.0) + + Returns: + Wav2Vec2Model: + The resulting wav2vec2 model with a conformer encoder and ``base`` configuration. + """ + return conformer_wav2vec2_model( + extractor_input_dim=extractor_input_dim, + extractor_output_dim=extractor_output_dim, + extractor_stride=4, + encoder_embed_dim=256, + encoder_projection_dropout=encoder_projection_dropout, + encoder_num_layers=12, + encoder_num_heads=8, + encoder_ff_interm_features=1024, + encoder_depthwise_conv_kernel_size=[31] + [15] * 11, + encoder_dropout=0.1, + encoder_convolution_first=True, + encoder_use_group_norm=True, + ) + + +def conformer_wav2vec2_pretrain_model( + extractor_input_dim: int, + extractor_output_dim: int, + extractor_stride: int, + encoder_embed_dim: int, + encoder_projection_dropout: float, + encoder_num_layers: int, + encoder_num_heads: int, + encoder_ff_interm_features: int, + encoder_depthwise_conv_kernel_size: int, + encoder_dropout: float, + encoder_convolution_first: bool, + encoder_use_group_norm: bool, + mask_prob: float, + mask_selection: str, + mask_other: float, + mask_length: int, + no_mask_overlap: bool, + mask_min_space: int, + mask_channel_prob: float, + mask_channel_selection: str, + mask_channel_other: float, + mask_channel_length: int, + no_mask_channel_overlap: bool, + mask_channel_min_space: int, + num_negatives: int, + cross_sample_negatives: int, +) -> ConformerWav2Vec2PretrainModel: + """Build a custom Conformer Wav2Vec2 Model for pre-training + + Args: + extractor_input_dim (int): Input dimension of the features. + extractor_output_dim (int): Output dimension after feature extraction. + extractor_stride (int): + Stride used in time reduction layer of feature extraction. + encoder_embed_dim (int): + The dimension of the embedding in the feature projection. + encoder_projection_dropout (float): + The dropout probability applied after the input feature is projected to + ``embed_dim`` + encoder_num_layers (int): + Number of Conformer layers in the encoder. + encoder_num_heads (int): + Number of heads in each Conformer layer. + encoder_ff_interm_features (int): + Hidden layer dimension of the feedforward network in each Conformer layer. + encoder_depthwise_conv_kernel_size (int or List[int]): + List of kernel sizes corresponding to each of the Conformer layers. + If int is provided, all layers will have the same kernel size. + encoder_dropout (float): + Dropout probability in each Conformer layer. + encoder_convolution_first (bool): + Whether to apply the convolution module ahead of the attention module + in each Conformer layer. + encoder_use_group_norm (bool): + Whether to use ``GroupNorm`` rather than ``BatchNorm1d`` in the convolution + module in each Conformer layer. + mask_prob (float): + Probability for each token to be chosen as start of the span to be masked. + mask_selection (str) + How to choose the mask length. Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + mask_other (float): + Secondary mask argument (used for more complex distributions). + mask_length (int): + The lengths of the mask. + no_mask_overlap (bool): + Whether to allow masks to overlap. + mask_min_space (int): + Minimum space between spans (if no overlap is enabled). + mask_channel_prob: (float): + The probability of replacing a feature with 0. + mask_channel_selection (str): + How to choose the mask length for channel masking. + Options: [``static``, ``uniform``, ``normal``, ``poisson``]. + mask_channel_other (float): + Secondary mask argument for channel masking (used for more complex distributions). + mask_channel_length (int): + Minimum space between spans (if no overlap is enabled) for channel masking. + no_mask_channel_overlap (bool): + Whether to allow channel masks to overlap. + mask_channel_min_space (int): + Minimum space between spans for channel masking (if no overlap is enabled). + num_negatives (int): + Number of negatives to sample. + cross_sample_negatives (int): + Number of cross sampled negatives. + + Returns: + ConformerWav2Vec2PretrainModel: + The resulting model. + """ + wav2vec2 = conformer_wav2vec2_model( + extractor_input_dim, + extractor_output_dim, + extractor_stride, + encoder_embed_dim, + encoder_projection_dropout, + encoder_num_layers, + encoder_num_heads, + encoder_ff_interm_features, + encoder_depthwise_conv_kernel_size, + encoder_dropout, + encoder_convolution_first, + encoder_use_group_norm, + ) + + mask_generator = components.MaskGenerator( + extractor_output_dim, + mask_prob, + mask_selection, + mask_other, + mask_length, + no_mask_overlap, + mask_min_space, + mask_channel_prob, + mask_channel_selection, + mask_channel_other, + mask_channel_length, + no_mask_channel_overlap, + mask_channel_min_space, + ) + + negative_sampler = _get_conformer_negativer_sampler( + extractor_output_dim, + encoder_embed_dim, + num_negatives, + cross_sample_negatives, + ) + + return ConformerWav2Vec2PretrainModel( + wav2vec2=wav2vec2, + mask_generator=mask_generator, + negative_sampler=negative_sampler, + ) + + +def conformer_wav2vec2_pretrain_base( + extractor_input_dim: int = 64, + extractor_output_dim: int = 256, + encoder_projection_dropout: float = 0.0, + mask_prob: float = 0.3, + mask_length: int = 3, + num_negatives: int = 100, + cross_sample_negatives: int = 0, +) -> ConformerWav2Vec2PretrainModel: + """Build Conformer Wav2Vec2 Model for pre-training with "small" architecture from + *Conformer-Based Self-Supervised Learning for Non-Speech Audio Tasks* :cite:`9746490` + + Args: + extractor_input_dim (int, optional): Input dimension of the features. (Default: 64) + extractor_output_dim (int, optional): Output dimension after feature extraction. (Default: 256) + encoder_projection_dropout (float, optional): + The dropout probability applied after the input feature is projected to + ``embed_dim``. (Default: 0.0) + mask_prob (float, optional): + Probability for each token to be chosen as start of the span to be masked. (Default: 0.3) + mask_length (int, optional): + The lengths of the mask. (Default: 3) + num_negatives (int, optional): + Number of sampled negatives. (Default: 0) + cross_sample_negatives (int, optional): + Number of cross sampled negatives. (Default: 0) + + Returns: + ConformerWav2Vec2PretrainModel: + The resulting model. + """ + return conformer_wav2vec2_pretrain_model( + extractor_input_dim=extractor_input_dim, + extractor_output_dim=extractor_output_dim, + extractor_stride=4, + encoder_embed_dim=256, + encoder_projection_dropout=encoder_projection_dropout, + encoder_num_layers=12, + encoder_num_heads=8, + encoder_ff_interm_features=1024, + encoder_depthwise_conv_kernel_size=[31] + [15] * 11, + encoder_dropout=0.1, + encoder_convolution_first=True, + encoder_use_group_norm=True, + mask_prob=mask_prob, + mask_selection="static", + mask_other=0.0, + mask_length=mask_length, + no_mask_overlap=False, + mask_min_space=0, + mask_channel_prob=0, + mask_channel_selection="static", + mask_channel_other=0, + mask_channel_length=10, + no_mask_channel_overlap=False, + mask_channel_min_space=1, + num_negatives=num_negatives, + cross_sample_negatives=cross_sample_negatives, + ) + + +def conformer_wav2vec2_pretrain_large( + extractor_input_dim: int = 64, + extractor_output_dim: int = 256, + encoder_projection_dropout: float = 0.0, + mask_prob: float = 0.3, + mask_length: int = 3, + num_negatives: int = 100, + cross_sample_negatives: int = 0, +) -> ConformerWav2Vec2PretrainModel: + """Build Conformer Wav2Vec2 Model for pre-training with "large" architecture from + *Conformer-Based Slef-Supervised Learning for Non-Speech Audio Tasks* :cite:`9746490` + + Args: + extractor_input_dim (int, optional): Input dimension of the features. (Default: 64) + extractor_output_dim (int, optional): Output dimension after feature extraction. (Default: 256) + encoder_projection_dropout (float, optional): + The dropout probability applied after the input feature is projected to + ``embed_dim``. (Default: 0.0) + mask_prob (float, optional): + Probability for each token to be chosen as start of the span to be masked. (Default: 0.3) + mask_length (int, optional): + The lengths of the mask. (Default: 3) + num_negatives (int, optional): + Number of sampled negatives. (Default: 0) + cross_sample_negatives (int, optional): + Number of cross sampled negatives. (Default: 0) + + Returns: + ConformerWav2Vec2PretrainModel: + The resulting model. + """ + return conformer_wav2vec2_pretrain_model( + extractor_input_dim=extractor_input_dim, + extractor_output_dim=extractor_output_dim, + extractor_stride=4, + encoder_embed_dim=768, + encoder_projection_dropout=encoder_projection_dropout, + encoder_num_layers=12, + encoder_num_heads=12, + encoder_ff_interm_features=1024, + encoder_depthwise_conv_kernel_size=[31] + [15] * 11, + encoder_dropout=0.1, + encoder_convolution_first=True, + encoder_use_group_norm=True, + mask_prob=mask_prob, + mask_selection="static", + mask_other=0.0, + mask_length=mask_length, + no_mask_overlap=False, + mask_min_space=0, + mask_channel_prob=0, + mask_channel_selection="static", + mask_channel_other=0, + mask_channel_length=10, + no_mask_channel_overlap=False, + mask_channel_min_space=1, + num_negatives=num_negatives, + cross_sample_negatives=cross_sample_negatives, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/_emformer_hubert.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/_emformer_hubert.py new file mode 100644 index 0000000000000000000000000000000000000000..872c6ce90191a841d7a1387bf17a1803b1689b83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/_emformer_hubert.py @@ -0,0 +1,333 @@ +from typing import List, Optional, Tuple + +import torch +from torchaudio.models import Wav2Vec2Model +from torchaudio.models.emformer import Emformer +from torchaudio.models.rnnt import _TimeReduction + + +class FeatureEncoder(torch.nn.Module): + """Extract features from log-mel spectrogram input. Consists of linear layer and time reduction layer. + + Args: + input_dim (int): The feature dimension of log-mel spectrogram feature. + output_dim (int): The feature dimension after linear layer. + use_bias (bool): If ``True``, enable bias parameter in the linear layer. + stride (int): Number of frames to merge for the output frame. + """ + + def __init__(self, input_dim: int, output_dim: int, use_bias: bool, stride: int): + super().__init__() + self.linear = torch.nn.Linear(input_dim, output_dim, bias=use_bias) + self.time_reduction = _TimeReduction(stride) + + def forward( + self, input: torch.Tensor, lengths: Optional[torch.Tensor] + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Args: + input (torch.Tensor): The log-mel spectrogram input. + Tensor with dimensions `(batch, time, input_dim)`. + lengths (torch.Tensor or None): Valid length of each input sample. + Tensor with dimension `(batch, )`. + + Returns: + (torch.Tensor, torch.Tensor or None): + torch.Tensor + Returned feature Tensor after linear layer and time reduction layer. + Tensor with dimensions `(batch, time // stride, output_dim)`. + torch.Tensor or None + The reduced lengths Tensor. + """ + output = self.linear(input) + if lengths is None: + B, T, _ = input.shape + dummy_lengths = torch.full((B,), T) + output, _ = self.time_reduction(output, dummy_lengths) + else: + output, lengths = self.time_reduction(output, lengths) + return output, lengths + + +class EmformerEncoder(torch.nn.Module): + """Emformer Encoder class for HuBERT pre-training. Consists of emformer module, + linear layer and layer normalization layer. + + Args: + emformer (torch.nn.Module): + :py:class:`torchaudio.models.Emformer` module that consists of a list of emformer layers. + output_linear (torch.nn.Module): + Linear layer after emformer module. + layer_norm (torch.nn.Module): + Apply layer normalization to the output. + """ + + def __init__( + self, + emformer: torch.nn.Module, + output_linear: torch.nn.Module, + layer_norm: torch.nn.Module, + ): + super().__init__() + self.emformer = emformer + self.output_linear = output_linear + self.layer_norm = layer_norm + + def forward( + self, + input: torch.Tensor, + lengths: Optional[torch.Tensor], + ) -> torch.Tensor: + """ + Args: + input (torch.Tensor): The input feature for emformer encoder. + Tensor with dimensions `(batch, time, feature_dim)`. + lengths (torch.Tensor or None): Valid length of each input sample. + Tensor with dimension `(batch, )`. + + Returns: + torch.Tensor: The feature Tensor after emformer encoder. + """ + if lengths is None: + B, T, _ = input.shape + dummy_lengths = torch.full((B,), T) + output, _ = self.emformer(input, dummy_lengths) + else: + output, lengths = self.emformer(input, lengths) + output = self.output_linear(output) + output = self.layer_norm(output) + return output + + def extract_features( + self, + input: torch.Tensor, + lengths: Optional[torch.Tensor], + num_layers: Optional[int] = None, + ) -> List[torch.Tensor]: + """Extract output Tensors of the emformer layers. + + Args: + input (torch.Tensor): The input feature for emformer encoder. + Tensor with dimensions `(batch, time, feature_dim)`. + lengths (torch.Tensor or None): Valid length of each input sample. + Tensor with dimension `(batch, )`. + num_layers (int or None, optional): If not ``None``, returns the first + `num_layers` layers of Tensors as the output, otherwise returns the + Tensors from all emformer layers. + + Returns: + List[torch.Tensor]: + Output Tensors of selected emformer layers. + """ + if num_layers is not None: + if not 0 < num_layers <= len(self.emformer.emformer_layers): + raise ValueError(f"`num_layers` must be between [1, {len(self.emformer.emformer_layers)}]") + + ret: List[torch.Tensor] = [] + + input = input.permute(1, 0, 2) + right_context = self.emformer._gen_right_context(input) + utterance = input[: input.size(0) - self.emformer.right_context_length] + attention_mask = self.emformer._gen_attention_mask(utterance) + mems = ( + self.emformer.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)[:-1] + if self.emformer.use_mem + else torch.empty(0).to(dtype=input.dtype, device=input.device) + ) + output = utterance + if lengths is None: + B, T, _ = input.shape + lengths = torch.full((B,), T) + for layer in self.emformer.emformer_layers: + output, right_context, mems = layer(output, lengths, right_context, mems, attention_mask) + ret.append(output.permute(1, 0, 2)) + if num_layers is not None and len(ret) >= num_layers: + return ret + return ret + + +def _get_emformer_feature_extractor(input_dim: int, output_dim: int, use_bias: bool, stride: int) -> FeatureEncoder: + """Construct FeatureEncoder for emformer model. + + Args: + input_dim (int): The feature dimension of log-mel spectrogram feature. + output_dim (int): The feature dimension after linear layer. + use_bias (bool): If ``True``, enable bias parameter in the linear layer. + stride (int): Number of frames to merge for the output frame. + + Returns: + FeatureEncoder: The resulting FeatureEncoder module. + """ + return FeatureEncoder(input_dim, output_dim, use_bias, stride) + + +def _get_emformer_encoder( + input_dim: int, + output_dim: int, + num_heads: int, + ffn_dim: int, + num_layers: int, + segment_length: int, + left_context_length: int, + right_context_length: int, + dropout: float, + activation: str, + max_memory_size: int, + weight_init_scale_strategy: Optional[str], + tanh_on_mem: bool, +) -> EmformerEncoder: + """Construct EmformerEncoder for emformer model. + + Args: + input_dim (int): The feature dimension of input Tensor. + output_dim (int): The feature dimension after EmformerEncoder. + num_heads (int): Number of attention heads in each Emformer layer. + ffn_dim: (int): Hidden layer dimension of feedforward network. + num_layers (int): Number of Emformer layers to instantiate. + segment_length (int): Length of each input segment. + left_context_length (int): Length of left context. + right_context_length (int): Length of right context. + dropout (float): Dropout probability. + activation (str): Activation function to use in each Emformer layer's + feedforward network. Must be one of ("relu", "gelu", "silu"). + max_memory_size (int): Maximum number of memory elements to use. + weight_init_scale_strategy (str or None): Per-layer weight initialization scaling + strategy. Must be one of ("depthwise", "constant", ``None``). + tanh_on_mem (bool): If ``True``, applies tanh to memory elements. + + Returns: + EmformerEncoder: The resulting EmformerEncoder module. + """ + emformer = Emformer( + input_dim=input_dim, + num_heads=num_heads, + ffn_dim=ffn_dim, + num_layers=num_layers, + segment_length=segment_length, + left_context_length=left_context_length, + right_context_length=right_context_length, + dropout=dropout, + activation=activation, + max_memory_size=max_memory_size, + weight_init_scale_strategy=weight_init_scale_strategy, + tanh_on_mem=tanh_on_mem, + ) + output_linear = torch.nn.Linear(input_dim, output_dim) + layer_norm = torch.nn.LayerNorm(output_dim) + return EmformerEncoder(emformer, output_linear, layer_norm) + + +def emformer_hubert_model( + extractor_input_dim: int, + extractor_output_dim: int, + extractor_use_bias: bool, + extractor_stride: int, + encoder_input_dim: int, + encoder_output_dim: int, + encoder_num_heads: int, + encoder_ffn_dim: int, + encoder_num_layers: int, + encoder_segment_length: int, + encoder_left_context_length: int, + encoder_right_context_length: int, + encoder_dropout: float, + encoder_activation: str, + encoder_max_memory_size: int, + encoder_weight_init_scale_strategy: Optional[str], + encoder_tanh_on_mem: bool, + aux_num_out: Optional[int], +) -> Wav2Vec2Model: + """Build a custom Emformer HuBERT model. + + Args: + extractor_input_dim (int): The input dimension for feature extractor. + extractor_output_dim (int): The output dimension after feature extractor. + extractor_use_bias (bool): If ``True``, enable bias parameter in the linear layer of feature extractor. + extractor_stride (int): Number of frames to merge for the output frame in feature extractor. + encoder_input_dim (int): The input dimension for Emformer layer. + encoder_output_dim (int): The output dimension after EmformerEncoder. + encoder_num_heads (int): Number of attention heads in each Emformer layer. + encoder_ffn_dim (int): Hidden layer dimension of feedforward network in Emformer. + encoder_num_layers (int): Number of Emformer layers to instantiate. + encoder_segment_length (int): Length of each input segment. + encoder_left_context_length (int): Length of left context. + encoder_right_context_length (int): Length of right context. + encoder_dropout (float): Dropout probability. + encoder_activation (str): Activation function to use in each Emformer layer's + feedforward network. Must be one of ("relu", "gelu", "silu"). + encoder_max_memory_size (int): Maximum number of memory elements to use. + encoder_weight_init_scale_strategy (str or None): Per-layer weight initialization scaling + strategy. Must be one of ("depthwise", "constant", ``None``). + encoder_tanh_on_mem (bool): If ``True``, applies tanh to memory elements. + aux_num_out (int or None): + When provided, attach an extra linear layer on top of encoder, which can be + used for fine-tuning. + + Returns: + Wav2Vec2Model: + The resulting :py:class:`torchaudio.models.Wav2Vec2Model` model + with a :py:class:`torchaudio.models.Emformer` encoder. + """ + feature_extractor = _get_emformer_feature_extractor( + extractor_input_dim, extractor_output_dim, extractor_use_bias, extractor_stride + ) + emformer = _get_emformer_encoder( + encoder_input_dim, + encoder_output_dim, + encoder_num_heads, + encoder_ffn_dim, + encoder_num_layers, + encoder_segment_length, + encoder_left_context_length, + encoder_right_context_length, + encoder_dropout, + encoder_activation, + encoder_max_memory_size, + encoder_weight_init_scale_strategy, + encoder_tanh_on_mem, + ) + aux = None + if aux_num_out is not None: + aux = torch.nn.Linear(in_features=encoder_output_dim, out_features=aux_num_out) + return Wav2Vec2Model(feature_extractor, emformer, aux) + + +def emformer_hubert_base( + extractor_input_dim: int = 80, + extractor_output_dim: int = 128, + encoder_dropout: float = 0.1, + aux_num_out: Optional[int] = None, +) -> Wav2Vec2Model: + """Build Emformer HuBERT Model with 20 Emformer layers. + + Args: + extractor_input_dim (int, optional): The input dimension for feature extractor. (Default: 80) + extractor_output_dim (int, optional): The output dimension after feature extractor. (Default: 128) + encoder_dropout (float, optional): Dropout probability in Emformer. (Default: 0.1) + aux_num_out (int or None, optional): Output dimension of aux layer for fine-tuning. (Default: ``None``) + + Returns: + Wav2Vec2Model: + The resulting :py:class:`torchaudio.models.Wav2Vec2Model` model + with a :py:class:`torchaudio.models.Emformer` encoder. + """ + return emformer_hubert_model( + extractor_input_dim=extractor_input_dim, + extractor_output_dim=extractor_output_dim, + extractor_use_bias=False, + extractor_stride=4, + encoder_input_dim=512, + encoder_output_dim=1024, + encoder_num_heads=8, + encoder_ffn_dim=2048, + encoder_num_layers=20, + encoder_segment_length=4, + encoder_left_context_length=30, + encoder_right_context_length=1, + encoder_dropout=encoder_dropout, + encoder_activation="gelu", + encoder_max_memory_size=0, + encoder_weight_init_scale_strategy="depthwise", + encoder_tanh_on_mem=True, + aux_num_out=aux_num_out, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/conv_emformer.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/conv_emformer.py new file mode 100644 index 0000000000000000000000000000000000000000..b5495cfcd3a4753cdf17d426a1b2bcc8479ee3fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/conv_emformer.py @@ -0,0 +1,525 @@ +import math +from typing import List, Optional, Tuple + +import torch +from torchaudio.models.emformer import _EmformerAttention, _EmformerImpl, _get_weight_init_gains + + +def _get_activation_module(activation: str) -> torch.nn.Module: + if activation == "relu": + return torch.nn.ReLU() + elif activation == "gelu": + return torch.nn.GELU() + elif activation == "silu": + return torch.nn.SiLU() + else: + raise ValueError(f"Unsupported activation {activation}") + + +class _ResidualContainer(torch.nn.Module): + def __init__(self, module: torch.nn.Module, output_weight: int): + super().__init__() + self.module = module + self.output_weight = output_weight + + def forward(self, input: torch.Tensor): + output = self.module(input) + return output * self.output_weight + input + + +class _ConvolutionModule(torch.nn.Module): + def __init__( + self, + input_dim: int, + segment_length: int, + right_context_length: int, + kernel_size: int, + activation: str = "silu", + dropout: float = 0.0, + ): + super().__init__() + self.input_dim = input_dim + self.segment_length = segment_length + self.right_context_length = right_context_length + self.state_size = kernel_size - 1 + + self.pre_conv = torch.nn.Sequential( + torch.nn.LayerNorm(input_dim), torch.nn.Linear(input_dim, 2 * input_dim, bias=True), torch.nn.GLU() + ) + self.conv = torch.nn.Conv1d( + in_channels=input_dim, + out_channels=input_dim, + kernel_size=kernel_size, + stride=1, + padding=0, + groups=input_dim, + ) + self.post_conv = torch.nn.Sequential( + torch.nn.LayerNorm(input_dim), + _get_activation_module(activation), + torch.nn.Linear(input_dim, input_dim, bias=True), + torch.nn.Dropout(p=dropout), + ) + + def _split_right_context(self, utterance: torch.Tensor, right_context: torch.Tensor) -> torch.Tensor: + T, B, D = right_context.size() + if T % self.right_context_length != 0: + raise ValueError("Tensor length should be divisible by its right context length") + num_segments = T // self.right_context_length + # (num_segments, right context length, B, D) + right_context_segments = right_context.reshape(num_segments, self.right_context_length, B, D) + right_context_segments = right_context_segments.permute(0, 2, 1, 3).reshape( + num_segments * B, self.right_context_length, D + ) + + pad_segments = [] # [(kernel_size - 1, B, D), ...] + for seg_idx in range(num_segments): + end_idx = min(self.state_size + (seg_idx + 1) * self.segment_length, utterance.size(0)) + start_idx = end_idx - self.state_size + pad_segments.append(utterance[start_idx:end_idx, :, :]) + + pad_segments = torch.cat(pad_segments, dim=1).permute(1, 0, 2) # (num_segments * B, kernel_size - 1, D) + return torch.cat([pad_segments, right_context_segments], dim=1).permute(0, 2, 1) + + def _merge_right_context(self, right_context: torch.Tensor, B: int) -> torch.Tensor: + # (num_segments * B, D, right_context_length) + right_context = right_context.reshape(-1, B, self.input_dim, self.right_context_length) + right_context = right_context.permute(0, 3, 1, 2) + return right_context.reshape(-1, B, self.input_dim) # (right_context_length * num_segments, B, D) + + def forward( + self, utterance: torch.Tensor, right_context: torch.Tensor, state: Optional[torch.Tensor] + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + input = torch.cat((right_context, utterance)) # input: (T, B, D) + x = self.pre_conv(input) + x_right_context, x_utterance = x[: right_context.size(0), :, :], x[right_context.size(0) :, :, :] + x_utterance = x_utterance.permute(1, 2, 0) # (B, D, T_utterance) + + if state is None: + state = torch.zeros( + input.size(1), + input.size(2), + self.state_size, + device=input.device, + dtype=input.dtype, + ) # (B, D, T) + state_x_utterance = torch.cat([state, x_utterance], dim=2) + + conv_utterance = self.conv(state_x_utterance) # (B, D, T_utterance) + conv_utterance = conv_utterance.permute(2, 0, 1) + + if self.right_context_length > 0: + # (B * num_segments, D, right_context_length + kernel_size - 1) + right_context_block = self._split_right_context(state_x_utterance.permute(2, 0, 1), x_right_context) + conv_right_context_block = self.conv(right_context_block) # (B * num_segments, D, right_context_length) + # (T_right_context, B, D) + conv_right_context = self._merge_right_context(conv_right_context_block, input.size(1)) + y = torch.cat([conv_right_context, conv_utterance], dim=0) + else: + y = conv_utterance + + output = self.post_conv(y) + input + new_state = state_x_utterance[:, :, -self.state_size :] + return output[right_context.size(0) :], output[: right_context.size(0)], new_state + + def infer( + self, utterance: torch.Tensor, right_context: torch.Tensor, state: Optional[torch.Tensor] + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + input = torch.cat((utterance, right_context)) + x = self.pre_conv(input) # (T, B, D) + x = x.permute(1, 2, 0) # (B, D, T) + + if state is None: + state = torch.zeros( + input.size(1), + input.size(2), + self.state_size, + device=input.device, + dtype=input.dtype, + ) # (B, D, T) + state_x = torch.cat([state, x], dim=2) + conv_out = self.conv(state_x) + conv_out = conv_out.permute(2, 0, 1) # T, B, D + output = self.post_conv(conv_out) + input + new_state = state_x[:, :, -self.state_size - right_context.size(0) : -right_context.size(0)] + return output[: utterance.size(0)], output[utterance.size(0) :], new_state + + +class _ConvEmformerLayer(torch.nn.Module): + r"""Convolution-augmented Emformer layer that constitutes ConvEmformer. + + Args: + input_dim (int): input dimension. + num_heads (int): number of attention heads. + ffn_dim: (int): hidden layer dimension of feedforward network. + segment_length (int): length of each input segment. + kernel_size (int): size of kernel to use in convolution module. + dropout (float, optional): dropout probability. (Default: 0.0) + ffn_activation (str, optional): activation function to use in feedforward network. + Must be one of ("relu", "gelu", "silu"). (Default: "relu") + left_context_length (int, optional): length of left context. (Default: 0) + right_context_length (int, optional): length of right context. (Default: 0) + max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) + weight_init_gain (float or None, optional): scale factor to apply when initializing + attention module parameters. (Default: ``None``) + tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) + conv_activation (str, optional): activation function to use in convolution module. + Must be one of ("relu", "gelu", "silu"). (Default: "silu") + """ + + def __init__( + self, + input_dim: int, + num_heads: int, + ffn_dim: int, + segment_length: int, + kernel_size: int, + dropout: float = 0.0, + ffn_activation: str = "relu", + left_context_length: int = 0, + right_context_length: int = 0, + max_memory_size: int = 0, + weight_init_gain: Optional[float] = None, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + conv_activation: str = "silu", + ): + super().__init__() + # TODO: implement talking heads attention. + self.attention = _EmformerAttention( + input_dim=input_dim, + num_heads=num_heads, + dropout=dropout, + weight_init_gain=weight_init_gain, + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + ) + self.dropout = torch.nn.Dropout(dropout) + self.memory_op = torch.nn.AvgPool1d(kernel_size=segment_length, stride=segment_length, ceil_mode=True) + + activation_module = _get_activation_module(ffn_activation) + self.ffn0 = _ResidualContainer( + torch.nn.Sequential( + torch.nn.LayerNorm(input_dim), + torch.nn.Linear(input_dim, ffn_dim), + activation_module, + torch.nn.Dropout(dropout), + torch.nn.Linear(ffn_dim, input_dim), + torch.nn.Dropout(dropout), + ), + 0.5, + ) + self.ffn1 = _ResidualContainer( + torch.nn.Sequential( + torch.nn.LayerNorm(input_dim), + torch.nn.Linear(input_dim, ffn_dim), + activation_module, + torch.nn.Dropout(dropout), + torch.nn.Linear(ffn_dim, input_dim), + torch.nn.Dropout(dropout), + ), + 0.5, + ) + self.layer_norm_input = torch.nn.LayerNorm(input_dim) + self.layer_norm_output = torch.nn.LayerNorm(input_dim) + + self.conv = _ConvolutionModule( + input_dim=input_dim, + kernel_size=kernel_size, + activation=conv_activation, + dropout=dropout, + segment_length=segment_length, + right_context_length=right_context_length, + ) + + self.left_context_length = left_context_length + self.segment_length = segment_length + self.max_memory_size = max_memory_size + self.input_dim = input_dim + self.kernel_size = kernel_size + self.use_mem = max_memory_size > 0 + + def _init_state(self, batch_size: int, device: Optional[torch.device]) -> List[torch.Tensor]: + empty_memory = torch.zeros(self.max_memory_size, batch_size, self.input_dim, device=device) + left_context_key = torch.zeros(self.left_context_length, batch_size, self.input_dim, device=device) + left_context_val = torch.zeros(self.left_context_length, batch_size, self.input_dim, device=device) + past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device) + conv_cache = torch.zeros( + batch_size, + self.input_dim, + self.kernel_size - 1, + device=device, + ) + return [empty_memory, left_context_key, left_context_val, past_length, conv_cache] + + def _unpack_state(self, state: List[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + past_length = state[3][0][0].item() + past_left_context_length = min(self.left_context_length, past_length) + past_mem_length = min(self.max_memory_size, math.ceil(past_length / self.segment_length)) + pre_mems = state[0][self.max_memory_size - past_mem_length :] + lc_key = state[1][self.left_context_length - past_left_context_length :] + lc_val = state[2][self.left_context_length - past_left_context_length :] + conv_cache = state[4] + return pre_mems, lc_key, lc_val, conv_cache + + def _pack_state( + self, + next_k: torch.Tensor, + next_v: torch.Tensor, + update_length: int, + mems: torch.Tensor, + conv_cache: torch.Tensor, + state: List[torch.Tensor], + ) -> List[torch.Tensor]: + new_k = torch.cat([state[1], next_k]) + new_v = torch.cat([state[2], next_v]) + state[0] = torch.cat([state[0], mems])[-self.max_memory_size :] + state[1] = new_k[new_k.shape[0] - self.left_context_length :] + state[2] = new_v[new_v.shape[0] - self.left_context_length :] + state[3] = state[3] + update_length + state[4] = conv_cache + return state + + def _apply_pre_attention( + self, utterance: torch.Tensor, right_context: torch.Tensor, summary: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + x = torch.cat([right_context, utterance, summary]) + ffn0_out = self.ffn0(x) + layer_norm_input_out = self.layer_norm_input(ffn0_out) + layer_norm_input_right_context, layer_norm_input_utterance, layer_norm_input_summary = ( + layer_norm_input_out[: right_context.size(0)], + layer_norm_input_out[right_context.size(0) : right_context.size(0) + utterance.size(0)], + layer_norm_input_out[right_context.size(0) + utterance.size(0) :], + ) + return ffn0_out, layer_norm_input_right_context, layer_norm_input_utterance, layer_norm_input_summary + + def _apply_post_attention( + self, + rc_output: torch.Tensor, + ffn0_out: torch.Tensor, + conv_cache: Optional[torch.Tensor], + rc_length: int, + utterance_length: int, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + result = self.dropout(rc_output) + ffn0_out[: rc_length + utterance_length] + conv_utterance, conv_right_context, conv_cache = self.conv(result[rc_length:], result[:rc_length], conv_cache) + result = torch.cat([conv_right_context, conv_utterance]) + result = self.ffn1(result) + result = self.layer_norm_output(result) + output_utterance, output_right_context = result[rc_length:], result[:rc_length] + return output_utterance, output_right_context, conv_cache + + def forward( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + mems: torch.Tensor, + attention_mask: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + D: feature dimension of each frame; + T: number of utterance frames; + R: number of right context frames; + M: number of memory elements. + + Args: + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``utterance``. + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. + attention_mask (torch.Tensor): attention mask for underlying attention module. + + Returns: + (Tensor, Tensor, Tensor): + Tensor + encoded utterance frames, with shape `(T, B, D)`. + Tensor + updated right context frames, with shape `(R, B, D)`. + Tensor + updated memory elements, with shape `(M, B, D)`. + """ + if self.use_mem: + summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1) + else: + summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device) + + ( + ffn0_out, + layer_norm_input_right_context, + layer_norm_input_utterance, + layer_norm_input_summary, + ) = self._apply_pre_attention(utterance, right_context, summary) + + rc_output, output_mems = self.attention( + utterance=layer_norm_input_utterance, + lengths=lengths, + right_context=layer_norm_input_right_context, + summary=layer_norm_input_summary, + mems=mems, + attention_mask=attention_mask, + ) + + output_utterance, output_right_context, _ = self._apply_post_attention( + rc_output, ffn0_out, None, right_context.size(0), utterance.size(0) + ) + + return output_utterance, output_right_context, output_mems + + @torch.jit.export + def infer( + self, + utterance: torch.Tensor, + lengths: torch.Tensor, + right_context: torch.Tensor, + state: Optional[List[torch.Tensor]], + mems: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor], torch.Tensor]: + r"""Forward pass for inference. + + B: batch size; + D: feature dimension of each frame; + T: number of utterance frames; + R: number of right context frames; + M: number of memory elements. + + Args: + utterance (torch.Tensor): utterance frames, with shape `(T, B, D)`. + lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``utterance``. + right_context (torch.Tensor): right context frames, with shape `(R, B, D)`. + state (List[torch.Tensor] or None): list of tensors representing layer internal state + generated in preceding invocation of ``infer``. + mems (torch.Tensor): memory elements, with shape `(M, B, D)`. + + Returns: + (Tensor, Tensor, List[torch.Tensor], Tensor): + Tensor + encoded utterance frames, with shape `(T, B, D)`. + Tensor + updated right context frames, with shape `(R, B, D)`. + List[Tensor] + list of tensors representing layer internal state + generated in current invocation of ``infer``. + Tensor + updated memory elements, with shape `(M, B, D)`. + """ + if self.use_mem: + summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)[:1] + else: + summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device) + + ( + ffn0_out, + layer_norm_input_right_context, + layer_norm_input_utterance, + layer_norm_input_summary, + ) = self._apply_pre_attention(utterance, right_context, summary) + + if state is None: + state = self._init_state(layer_norm_input_utterance.size(1), device=layer_norm_input_utterance.device) + pre_mems, lc_key, lc_val, conv_cache = self._unpack_state(state) + + rc_output, next_m, next_k, next_v = self.attention.infer( + utterance=layer_norm_input_utterance, + lengths=lengths, + right_context=layer_norm_input_right_context, + summary=layer_norm_input_summary, + mems=pre_mems, + left_context_key=lc_key, + left_context_val=lc_val, + ) + + output_utterance, output_right_context, conv_cache = self._apply_post_attention( + rc_output, ffn0_out, conv_cache, right_context.size(0), utterance.size(0) + ) + output_state = self._pack_state(next_k, next_v, utterance.size(0), mems, conv_cache, state) + return output_utterance, output_right_context, output_state, next_m + + +class ConvEmformer(_EmformerImpl): + r"""Implements the convolution-augmented streaming transformer architecture introduced in + *Streaming Transformer Transducer based Speech Recognition Using Non-Causal Convolution* + :cite:`9747706`. + + Args: + input_dim (int): input dimension. + num_heads (int): number of attention heads in each ConvEmformer layer. + ffn_dim (int): hidden layer dimension of each ConvEmformer layer's feedforward network. + num_layers (int): number of ConvEmformer layers to instantiate. + segment_length (int): length of each input segment. + kernel_size (int): size of kernel to use in convolution modules. + dropout (float, optional): dropout probability. (Default: 0.0) + ffn_activation (str, optional): activation function to use in feedforward networks. + Must be one of ("relu", "gelu", "silu"). (Default: "relu") + left_context_length (int, optional): length of left context. (Default: 0) + right_context_length (int, optional): length of right context. (Default: 0) + max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0) + weight_init_scale_strategy (str or None, optional): per-layer weight initialization scaling + strategy. Must be one of ("depthwise", "constant", ``None``). (Default: "depthwise") + tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8) + conv_activation (str, optional): activation function to use in convolution modules. + Must be one of ("relu", "gelu", "silu"). (Default: "silu") + + Examples: + >>> conv_emformer = ConvEmformer(80, 4, 1024, 12, 16, 8, right_context_length=4) + >>> input = torch.rand(10, 200, 80) + >>> lengths = torch.randint(1, 200, (10,)) + >>> output, lengths = conv_emformer(input, lengths) + >>> input = torch.rand(4, 20, 80) + >>> lengths = torch.ones(4) * 20 + >>> output, lengths, states = conv_emformer.infer(input, lengths, None) + """ + + def __init__( + self, + input_dim: int, + num_heads: int, + ffn_dim: int, + num_layers: int, + segment_length: int, + kernel_size: int, + dropout: float = 0.0, + ffn_activation: str = "relu", + left_context_length: int = 0, + right_context_length: int = 0, + max_memory_size: int = 0, + weight_init_scale_strategy: Optional[str] = "depthwise", + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + conv_activation: str = "silu", + ): + weight_init_gains = _get_weight_init_gains(weight_init_scale_strategy, num_layers) + emformer_layers = torch.nn.ModuleList( + [ + _ConvEmformerLayer( + input_dim, + num_heads, + ffn_dim, + segment_length, + kernel_size, + dropout=dropout, + ffn_activation=ffn_activation, + left_context_length=left_context_length, + right_context_length=right_context_length, + max_memory_size=max_memory_size, + weight_init_gain=weight_init_gains[layer_idx], + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + conv_activation=conv_activation, + ) + for layer_idx in range(num_layers) + ] + ) + super().__init__( + emformer_layers, + segment_length, + left_context_length=left_context_length, + right_context_length=right_context_length, + max_memory_size=max_memory_size, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/hifi_gan.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/hifi_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..93d92e1854651358367c6388fa3d916941961367 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/hifi_gan.py @@ -0,0 +1,336 @@ +""" +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import Conv1d, ConvTranspose1d + + +class HiFiGANVocoder(torch.nn.Module): + """Generator part of *HiFi GAN* :cite:`NEURIPS2020_c5d73680`. + Source: https://github.com/jik876/hifi-gan/blob/4769534d45265d52a904b850da5a622601885777/models.py#L75 + + Note: + To build the model, please use one of the factory functions: :py:func:`hifigan_vocoder`, + :py:func:`hifigan_vocoder_v1`, :py:func:`hifigan_vocoder_v2`, :py:func:`hifigan_vocoder_v3`. + + Args: + in_channels (int): Number of channels in the input features. + upsample_rates (tuple of ``int``): Factors by which each upsampling layer increases the time dimension. + upsample_initial_channel (int): Number of channels in the input feature tensor. + upsample_kernel_sizes (tuple of ``int``): Kernel size for each upsampling layer. + resblock_kernel_sizes (tuple of ``int``): Kernel size for each residual block. + resblock_dilation_sizes (tuple of tuples of ``int``): Dilation sizes for each 1D convolutional layer in each + residual block. For resblock type 1 inner tuples should have length 3, because there are 3 + convolutions in each layer. For resblock type 2 they should have length 2. + resblock_type (int, 1 or 2): Determines whether ``ResBlock1`` or ``ResBlock2`` will be used. + lrelu_slope (float): Slope of leaky ReLUs in activations. + """ + + def __init__( + self, + in_channels: int, + upsample_rates: Tuple[int, ...], + upsample_initial_channel: int, + upsample_kernel_sizes: Tuple[int, ...], + resblock_kernel_sizes: Tuple[int, ...], + resblock_dilation_sizes: Tuple[Tuple[int, ...], ...], + resblock_type: int, + lrelu_slope: float, + ): + super(HiFiGANVocoder, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(in_channels, upsample_initial_channel, 7, 1, padding=3) + resblock = ResBlock1 if resblock_type == 1 else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for (k, d) in zip(resblock_kernel_sizes, resblock_dilation_sizes): + self.resblocks.append(resblock(ch, k, d, lrelu_slope)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3) + self.lrelu_slope = lrelu_slope + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x (Tensor): Feature input tensor of shape `(batch_size, num_channels, time_length)`. + + Returns: + Tensor of shape `(batch_size, 1, time_length * upsample_rate)`, where `upsample_rate` is the product + of upsample rates for all layers. + """ + x = self.conv_pre(x) + for i, upsampling_layer in enumerate(self.ups): + x = F.leaky_relu(x, self.lrelu_slope) + x = upsampling_layer(x) + xs = torch.zeros_like(x) + for j in range(self.num_kernels): + res_block: ResBlockInterface = self.resblocks[i * self.num_kernels + j] + xs += res_block.forward(x) + x = xs / self.num_kernels + + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + +@torch.jit.interface +class ResBlockInterface(torch.nn.Module): + """Interface for ResBlock - necessary to make type annotations in ``HiFiGANVocoder.forward`` compatible + with TorchScript + """ + + def forward(self, x: torch.Tensor) -> torch.Tensor: + pass + + +class ResBlock1(torch.nn.Module): + """Residual block of type 1 for HiFiGAN Vocoder :cite:`NEURIPS2020_c5d73680`. + Args: + channels (int): Number of channels in the input features. + kernel_size (int, optional): Kernel size for 1D convolutions. (Default: ``3``) + dilation (tuple of 3 ``int``, optional): Dilations for each 1D convolution. (Default: ``(1, 3, 5)``) + lrelu_slope (float): Slope of leaky ReLUs in activations. + """ + + def __init__( + self, channels: int, kernel_size: int = 3, dilation: Tuple[int, int, int] = (1, 3, 5), lrelu_slope: float = 0.1 + ): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList( + [ + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ), + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ), + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ), + ] + ) + + self.convs2 = nn.ModuleList( + [ + Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)), + Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)), + Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)), + ] + ) + self.lrelu_slope = lrelu_slope + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x (Tensor): input of shape ``(batch_size, channels, time_length)``. + Returns: + Tensor of the same shape as input. + """ + for conv1, conv2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, self.lrelu_slope) + xt = conv1(xt) + xt = F.leaky_relu(xt, self.lrelu_slope) + xt = conv2(xt) + x = xt + x + return x + + +class ResBlock2(torch.nn.Module): + """Residual block of type 2 for HiFiGAN Vocoder :cite:`NEURIPS2020_c5d73680`. + Args: + channels (int): Number of channels in the input features. + kernel_size (int, optional): Kernel size for 1D convolutions. (Default: ``3``) + dilation (tuple of 2 ``int``, optional): Dilations for each 1D convolution. (Default: ``(1, 3)``) + lrelu_slope (float): Slope of leaky ReLUs in activations. + """ + + def __init__( + self, channels: int, kernel_size: int = 3, dilation: Tuple[int, int] = (1, 3), lrelu_slope: float = 0.1 + ): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList( + [ + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ), + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ), + ] + ) + self.lrelu_slope = lrelu_slope + + def forward(self, x: torch.Tensor): + """ + Args: + x (Tensor): input of shape ``(batch_size, channels, time_length)``. + Returns: + Tensor of the same shape as input. + """ + for c in self.convs: + xt = F.leaky_relu(x, self.lrelu_slope) + xt = c(xt) + x = xt + x + return x + + +def get_padding(kernel_size, dilation=1): + """Find padding for which 1D convolution preserves the input shape.""" + return int((kernel_size * dilation - dilation) / 2) + + +def hifigan_vocoder( + in_channels: int, + upsample_rates: Tuple[int, ...], + upsample_initial_channel: int, + upsample_kernel_sizes: Tuple[int, ...], + resblock_kernel_sizes: Tuple[int, ...], + resblock_dilation_sizes: Tuple[Tuple[int, ...], ...], + resblock_type: int, + lrelu_slope: float, +) -> HiFiGANVocoder: + r"""Builds HiFi GAN Vocoder :cite:`NEURIPS2020_c5d73680`. + + Args: + in_channels (int): See :py:class:`HiFiGANVocoder`. + upsample_rates (tuple of ``int``): See :py:class:`HiFiGANVocoder`. + upsample_initial_channel (int): See :py:class:`HiFiGANVocoder`. + upsample_kernel_sizes (tuple of ``int``): See :py:class:`HiFiGANVocoder`. + resblock_kernel_sizes (tuple of ``int``): See :py:class:`HiFiGANVocoder`. + resblock_dilation_sizes (tuple of tuples of ``int``): See :py:class:`HiFiGANVocoder`. + resblock_type (int, 1 or 2): See :py:class:`HiFiGANVocoder`. + Returns: + HiFiGANVocoder: generated model. + """ + + return HiFiGANVocoder( + upsample_rates=upsample_rates, + resblock_kernel_sizes=resblock_kernel_sizes, + resblock_dilation_sizes=resblock_dilation_sizes, + resblock_type=resblock_type, + upsample_initial_channel=upsample_initial_channel, + upsample_kernel_sizes=upsample_kernel_sizes, + in_channels=in_channels, + lrelu_slope=lrelu_slope, + ) + + +def hifigan_vocoder_v1() -> HiFiGANVocoder: + r"""Builds HiFiGAN Vocoder with V1 architecture :cite:`NEURIPS2020_c5d73680`. + + Returns: + HiFiGANVocoder: generated model. + """ + return hifigan_vocoder( + upsample_rates=(8, 8, 2, 2), + upsample_kernel_sizes=(16, 16, 4, 4), + upsample_initial_channel=512, + resblock_kernel_sizes=(3, 7, 11), + resblock_dilation_sizes=((1, 3, 5), (1, 3, 5), (1, 3, 5)), + resblock_type=1, + in_channels=80, + lrelu_slope=0.1, + ) + + +def hifigan_vocoder_v2() -> HiFiGANVocoder: + r"""Builds HiFiGAN Vocoder with V2 architecture :cite:`NEURIPS2020_c5d73680`. + + Returns: + HiFiGANVocoder: generated model. + """ + return hifigan_vocoder( + upsample_rates=(8, 8, 2, 2), + upsample_kernel_sizes=(16, 16, 4, 4), + upsample_initial_channel=128, + resblock_kernel_sizes=(3, 7, 11), + resblock_dilation_sizes=((1, 3, 5), (1, 3, 5), (1, 3, 5)), + resblock_type=1, + in_channels=80, + lrelu_slope=0.1, + ) + + +def hifigan_vocoder_v3() -> HiFiGANVocoder: + r"""Builds HiFiGAN Vocoder with V3 architecture :cite:`NEURIPS2020_c5d73680`. + + Returns: + HiFiGANVocoder: generated model. + """ + return hifigan_vocoder( + upsample_rates=(8, 8, 4), + upsample_kernel_sizes=(16, 16, 8), + upsample_initial_channel=256, + resblock_kernel_sizes=(3, 5, 7), + resblock_dilation_sizes=((1, 2), (2, 6), (3, 12)), + resblock_type=2, + in_channels=80, + lrelu_slope=0.1, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/rnnt.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/rnnt.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7e32d5b961a80b2637995a3e3be07eaa5d7165 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/rnnt.py @@ -0,0 +1,711 @@ +import math +from typing import Dict, List, Optional, Tuple + +import torch +from torchaudio.models import Conformer, RNNT +from torchaudio.models.rnnt import _Joiner, _Predictor, _TimeReduction, _Transcriber + + +TrieNode = Tuple[Dict[int, "TrieNode"], int, Optional[Tuple[int, int]]] + + +class _ConformerEncoder(torch.nn.Module, _Transcriber): + def __init__( + self, + *, + input_dim: int, + output_dim: int, + time_reduction_stride: int, + conformer_input_dim: int, + conformer_ffn_dim: int, + conformer_num_layers: int, + conformer_num_heads: int, + conformer_depthwise_conv_kernel_size: int, + conformer_dropout: float, + ) -> None: + super().__init__() + self.time_reduction = _TimeReduction(time_reduction_stride) + self.input_linear = torch.nn.Linear(input_dim * time_reduction_stride, conformer_input_dim) + self.conformer = Conformer( + num_layers=conformer_num_layers, + input_dim=conformer_input_dim, + ffn_dim=conformer_ffn_dim, + num_heads=conformer_num_heads, + depthwise_conv_kernel_size=conformer_depthwise_conv_kernel_size, + dropout=conformer_dropout, + use_group_norm=True, + convolution_first=True, + ) + self.output_linear = torch.nn.Linear(conformer_input_dim, output_dim) + self.layer_norm = torch.nn.LayerNorm(output_dim) + + def forward(self, input: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + time_reduction_out, time_reduction_lengths = self.time_reduction(input, lengths) + input_linear_out = self.input_linear(time_reduction_out) + x, lengths = self.conformer(input_linear_out, time_reduction_lengths) + output_linear_out = self.output_linear(x) + layer_norm_out = self.layer_norm(output_linear_out) + return layer_norm_out, lengths + + def infer( + self, + input: torch.Tensor, + lengths: torch.Tensor, + states: Optional[List[List[torch.Tensor]]], + ) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]: + raise RuntimeError("Conformer does not support streaming inference.") + + +class _JoinerBiasing(torch.nn.Module): + r"""Recurrent neural network transducer (RNN-T) joint network. + + Args: + input_dim (int): source and target input dimension. + output_dim (int): output dimension. + activation (str, optional): activation function to use in the joiner. + Must be one of ("relu", "tanh"). (Default: "relu") + biasing (bool): perform biasing + deepbiasing (bool): perform deep biasing + attndim (int): dimension of the biasing vector hptr + + """ + + def __init__( + self, + input_dim: int, + output_dim: int, + activation: str = "relu", + biasing: bool = False, + deepbiasing: bool = False, + attndim: int = 1, + ) -> None: + super().__init__() + self.linear = torch.nn.Linear(input_dim, output_dim, bias=True) + self.biasing = biasing + self.deepbiasing = deepbiasing + if self.biasing and self.deepbiasing: + self.biasinglinear = torch.nn.Linear(attndim, input_dim, bias=True) + self.attndim = attndim + if activation == "relu": + self.activation = torch.nn.ReLU() + elif activation == "tanh": + self.activation = torch.nn.Tanh() + else: + raise ValueError(f"Unsupported activation {activation}") + + def forward( + self, + source_encodings: torch.Tensor, + source_lengths: torch.Tensor, + target_encodings: torch.Tensor, + target_lengths: torch.Tensor, + hptr: torch.Tensor = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + T: maximum source sequence length in batch; + U: maximum target sequence length in batch; + D: dimension of each source and target sequence encoding. + + Args: + source_encodings (torch.Tensor): source encoding sequences, with + shape `(B, T, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``source_encodings``. + target_encodings (torch.Tensor): target encoding sequences, with shape `(B, U, D)`. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``target_encodings``. + hptr (torch.Tensor): deep biasing vector with shape `(B, T, U, A)`. + + Returns: + (torch.Tensor, torch.Tensor, torch.Tensor): + torch.Tensor + joint network output, with shape `(B, T, U, output_dim)`. + torch.Tensor + output source lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 1 for i-th batch element in joint network output. + torch.Tensor + output target lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 2 for i-th batch element in joint network output. + torch.Tensor + joint network second last layer output (i.e. before self.linear), with shape `(B, T, U, D)`. + """ + joint_encodings = source_encodings.unsqueeze(2).contiguous() + target_encodings.unsqueeze(1).contiguous() + if self.biasing and self.deepbiasing and hptr is not None: + hptr = self.biasinglinear(hptr) + joint_encodings += hptr + elif self.biasing and self.deepbiasing: + # Hack here for unused parameters + joint_encodings += self.biasinglinear(joint_encodings.new_zeros(1, self.attndim)).mean() * 0 + activation_out = self.activation(joint_encodings) + output = self.linear(activation_out) + return output, source_lengths, target_lengths, activation_out + + +class RNNTBiasing(RNNT): + r"""torchaudio.models.RNNT() + + Recurrent neural network transducer (RNN-T) model. + + Note: + To build the model, please use one of the factory functions. + + Args: + transcriber (torch.nn.Module): transcription network. + predictor (torch.nn.Module): prediction network. + joiner (torch.nn.Module): joint network. + attndim (int): TCPGen attention dimension + biasing (bool): If true, use biasing, otherwise use standard RNN-T + deepbiasing (bool): If true, use deep biasing by extracting the biasing vector + embdim (int): dimension of symbol embeddings + jointdim (int): dimension of the joint network joint dimension + charlist (list): The list of word piece tokens in the same order as the output layer + encoutdim (int): dimension of the encoder output vectors + dropout_tcpgen (float): dropout rate for TCPGen + tcpsche (int): The epoch at which TCPGen starts to train + DBaverage (bool): If true, instead of TCPGen, use DBRNNT for biasing + """ + + def __init__( + self, + transcriber: _Transcriber, + predictor: _Predictor, + joiner: _Joiner, + attndim: int, + biasing: bool, + deepbiasing: bool, + embdim: int, + jointdim: int, + charlist: List[str], + encoutdim: int, + dropout_tcpgen: float, + tcpsche: int, + DBaverage: bool, + ) -> None: + super().__init__(transcriber, predictor, joiner) + self.attndim = attndim + self.deepbiasing = deepbiasing + self.jointdim = jointdim + self.embdim = embdim + self.encoutdim = encoutdim + self.char_list = charlist or [] + self.blank_idx = self.char_list.index("") + self.nchars = len(self.char_list) + self.DBaverage = DBaverage + self.biasing = biasing + if self.biasing: + if self.deepbiasing and self.DBaverage: + # Deep biasing without TCPGen + self.biasingemb = torch.nn.Linear(self.nchars, self.attndim, bias=False) + else: + # TCPGen parameters + self.ooKBemb = torch.nn.Embedding(1, self.embdim) + self.Qproj_char = torch.nn.Linear(self.embdim, self.attndim) + self.Qproj_acoustic = torch.nn.Linear(self.encoutdim, self.attndim) + self.Kproj = torch.nn.Linear(self.embdim, self.attndim) + self.pointer_gate = torch.nn.Linear(self.attndim + self.jointdim, 1) + self.dropout_tcpgen = torch.nn.Dropout(dropout_tcpgen) + self.tcpsche = tcpsche + + def forward( + self, + sources: torch.Tensor, + source_lengths: torch.Tensor, + targets: torch.Tensor, + target_lengths: torch.Tensor, + tries: TrieNode, + current_epoch: int, + predictor_state: Optional[List[List[torch.Tensor]]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, List[List[torch.Tensor]], torch.Tensor, torch.Tensor]: + r"""Forward pass for training. + + B: batch size; + T: maximum source sequence length in batch; + U: maximum target sequence length in batch; + D: feature dimension of each source sequence element. + + Args: + sources (torch.Tensor): source frame sequences right-padded with right context, with + shape `(B, T, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``sources``. + targets (torch.Tensor): target sequences, with shape `(B, U)` and each element + mapping to a target symbol. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + number of valid frames for i-th batch element in ``targets``. + tries (TrieNode): wordpiece prefix trees representing the biasing list to be searched + current_epoch (Int): the current epoch number to determine if TCPGen should be trained + at this epoch + predictor_state (List[List[torch.Tensor]] or None, optional): list of lists of tensors + representing prediction network internal state generated in preceding invocation + of ``forward``. (Default: ``None``) + + Returns: + (torch.Tensor, torch.Tensor, torch.Tensor, List[List[torch.Tensor]]): + torch.Tensor + joint network output, with shape + `(B, max output source length, max output target length, output_dim (number of target symbols))`. + torch.Tensor + output source lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 1 for i-th batch element in joint network output. + torch.Tensor + output target lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 2 for i-th batch element in joint network output. + List[List[torch.Tensor]] + output states; list of lists of tensors + representing prediction network internal state generated in current invocation + of ``forward``. + torch.Tensor + TCPGen distribution, with shape + `(B, max output source length, max output target length, output_dim (number of target symbols))`. + torch.Tensor + Generation probability (or copy probability), with shape + `(B, max output source length, max output target length, 1)`. + """ + source_encodings, source_lengths = self.transcriber( + input=sources, + lengths=source_lengths, + ) + target_encodings, target_lengths, predictor_state = self.predictor( + input=targets, + lengths=target_lengths, + state=predictor_state, + ) + # Forward TCPGen + hptr = None + tcpgen_dist, p_gen = None, None + if self.biasing and current_epoch >= self.tcpsche and tries != []: + ptrdist_mask, p_gen_mask = self.get_tcpgen_step_masks(targets, tries) + hptr, tcpgen_dist = self.forward_tcpgen(targets, ptrdist_mask, source_encodings) + hptr = self.dropout_tcpgen(hptr) + elif self.biasing: + # Hack here to bypass unused parameters + if self.DBaverage and self.deepbiasing: + dummy = self.biasingemb(source_encodings.new_zeros(1, len(self.char_list))).mean() + else: + dummy = source_encodings.new_zeros(1, self.embdim) + dummy = self.Qproj_char(dummy).mean() + dummy += self.Qproj_acoustic(source_encodings.new_zeros(1, source_encodings.size(-1))).mean() + dummy += self.Kproj(source_encodings.new_zeros(1, self.embdim)).mean() + dummy += self.pointer_gate(source_encodings.new_zeros(1, self.attndim + self.jointdim)).mean() + dummy += self.ooKBemb.weight.mean() + dummy = dummy * 0 + source_encodings += dummy + + output, source_lengths, target_lengths, jointer_activation = self.joiner( + source_encodings=source_encodings, + source_lengths=source_lengths, + target_encodings=target_encodings, + target_lengths=target_lengths, + hptr=hptr, + ) + + # Calculate Generation Probability + if self.biasing and hptr is not None and tcpgen_dist is not None: + p_gen = torch.sigmoid(self.pointer_gate(torch.cat((jointer_activation, hptr), dim=-1))) + # avoid collapsing to ooKB token in the first few updates + # if current_epoch == self.tcpsche: + # p_gen = p_gen * 0.1 + p_gen = p_gen.masked_fill(p_gen_mask.bool().unsqueeze(1).unsqueeze(-1), 0) + + return (output, source_lengths, target_lengths, predictor_state, tcpgen_dist, p_gen) + + def get_tcpgen_distribution(self, query, ptrdist_mask): + # Make use of the predictor embedding matrix + keyvalues = torch.cat([self.predictor.embedding.weight.data, self.ooKBemb.weight], dim=0) + keyvalues = self.dropout_tcpgen(self.Kproj(keyvalues)) + # B * T * U * attndim, nbpe * attndim -> B * T * U * nbpe + tcpgendist = torch.einsum("ntuj,ij->ntui", query, keyvalues) + tcpgendist = tcpgendist / math.sqrt(query.size(-1)) + ptrdist_mask = ptrdist_mask.unsqueeze(1).repeat(1, tcpgendist.size(1), 1, 1) + tcpgendist.masked_fill_(ptrdist_mask.bool(), -1e9) + tcpgendist = torch.nn.functional.softmax(tcpgendist, dim=-1) + # B * T * U * nbpe, nbpe * attndim -> B * T * U * attndim + hptr = torch.einsum("ntui,ij->ntuj", tcpgendist[:, :, :, :-1], keyvalues[:-1, :]) + return hptr, tcpgendist + + def forward_tcpgen(self, targets, ptrdist_mask, source_encodings): + tcpgen_dist = None + if self.DBaverage and self.deepbiasing: + hptr = self.biasingemb(1 - ptrdist_mask[:, :, :-1].float()).unsqueeze(1) + else: + query_char = self.predictor.embedding(targets) + query_char = self.Qproj_char(query_char).unsqueeze(1) # B * 1 * U * attndim + query_acoustic = self.Qproj_acoustic(source_encodings).unsqueeze(2) # B * T * 1 * attndim + query = query_char + query_acoustic # B * T * U * attndim + hptr, tcpgen_dist = self.get_tcpgen_distribution(query, ptrdist_mask) + return hptr, tcpgen_dist + + def get_tcpgen_step_masks(self, yseqs, resettrie): + seqlen = len(yseqs[0]) + batch_masks = yseqs.new_ones(len(yseqs), seqlen, len(self.char_list) + 1) + p_gen_masks = [] + for i, yseq in enumerate(yseqs): + new_tree = resettrie + p_gen_mask = [] + for j, vy in enumerate(yseq): + vy = vy.item() + new_tree = new_tree[0] + if vy in [self.blank_idx]: + new_tree = resettrie + p_gen_mask.append(0) + elif self.char_list[vy].endswith("▁"): + if vy in new_tree and new_tree[vy][0] != {}: + new_tree = new_tree[vy] + else: + new_tree = resettrie + p_gen_mask.append(0) + elif vy not in new_tree: + new_tree = [{}] + p_gen_mask.append(1) + else: + new_tree = new_tree[vy] + p_gen_mask.append(0) + batch_masks[i, j, list(new_tree[0].keys())] = 0 + # In the original paper, ooKB node was not masked + # In this implementation, if not masking ooKB, ooKB probability + # would quickly collapse to 1.0 in the first few updates. + # Haven't found out why this happened. + # batch_masks[i, j, -1] = 0 + p_gen_masks.append(p_gen_mask + [1] * (seqlen - len(p_gen_mask))) + p_gen_masks = torch.Tensor(p_gen_masks).to(yseqs.device).byte() + return batch_masks, p_gen_masks + + def get_tcpgen_step_masks_prefix(self, yseqs, resettrie): + # Implemented for prefix-based wordpieces, not tested yet + seqlen = len(yseqs[0]) + batch_masks = yseqs.new_ones(len(yseqs), seqlen, len(self.char_list) + 1) + p_gen_masks = [] + for i, yseq in enumerate(yseqs): + p_gen_mask = [] + new_tree = resettrie + for j, vy in enumerate(yseq): + vy = vy.item() + new_tree = new_tree[0] + if vy in [self.blank_idx]: + new_tree = resettrie + batch_masks[i, j, list(new_tree[0].keys())] = 0 + elif self.char_list[vy].startswith("▁"): + new_tree = resettrie + if vy not in new_tree[0]: + batch_masks[i, j, list(new_tree[0].keys())] = 0 + else: + new_tree = new_tree[0][vy] + batch_masks[i, j, list(new_tree[0].keys())] = 0 + if new_tree[1] != -1: + batch_masks[i, j, list(resettrie[0].keys())] = 0 + else: + if vy not in new_tree: + new_tree = resettrie + batch_masks[i, j, list(new_tree[0].keys())] = 0 + else: + new_tree = new_tree[vy] + batch_masks[i, j, list(new_tree[0].keys())] = 0 + if new_tree[1] != -1: + batch_masks[i, j, list(resettrie[0].keys())] = 0 + p_gen_mask.append(0) + # batch_masks[i, j, -1] = 0 + p_gen_masks.append(p_gen_mask + [1] * (seqlen - len(p_gen_mask))) + p_gen_masks = torch.Tensor(p_gen_masks).to(yseqs.device).byte() + + return batch_masks, p_gen_masks + + def get_tcpgen_step(self, vy, trie, resettrie): + new_tree = trie[0] + if vy in [self.blank_idx]: + new_tree = resettrie + elif self.char_list[vy].endswith("▁"): + if vy in new_tree and new_tree[vy][0] != {}: + new_tree = new_tree[vy] + else: + new_tree = resettrie + elif vy not in new_tree: + new_tree = [{}] + else: + new_tree = new_tree[vy] + return new_tree + + def join( + self, + source_encodings: torch.Tensor, + source_lengths: torch.Tensor, + target_encodings: torch.Tensor, + target_lengths: torch.Tensor, + hptr: torch.Tensor = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + r"""Applies joint network to source and target encodings. + + B: batch size; + T: maximum source sequence length in batch; + U: maximum target sequence length in batch; + D: dimension of each source and target sequence encoding. + A: TCPGen attention dimension + + Args: + source_encodings (torch.Tensor): source encoding sequences, with + shape `(B, T, D)`. + source_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``source_encodings``. + target_encodings (torch.Tensor): target encoding sequences, with shape `(B, U, D)`. + target_lengths (torch.Tensor): with shape `(B,)` and i-th element representing + valid sequence length of i-th batch element in ``target_encodings``. + hptr (torch.Tensor): deep biasing vector with shape `(B, T, U, A)`. + + Returns: + (torch.Tensor, torch.Tensor, torch.Tensor): + torch.Tensor + joint network output, with shape `(B, T, U, output_dim)`. + torch.Tensor + output source lengths, with shape `(B,)` and i-th element representing + number of valid elements along dim 1 for i-th batch element in joint network output. + torch.Tensor + joint network second last layer output, with shape `(B, T, U, D)`. + """ + output, source_lengths, target_lengths, jointer_activation = self.joiner( + source_encodings=source_encodings, + source_lengths=source_lengths, + target_encodings=target_encodings, + target_lengths=target_lengths, + hptr=hptr, + ) + return output, source_lengths, jointer_activation + + +def conformer_rnnt_model( + *, + input_dim: int, + encoding_dim: int, + time_reduction_stride: int, + conformer_input_dim: int, + conformer_ffn_dim: int, + conformer_num_layers: int, + conformer_num_heads: int, + conformer_depthwise_conv_kernel_size: int, + conformer_dropout: float, + num_symbols: int, + symbol_embedding_dim: int, + num_lstm_layers: int, + lstm_hidden_dim: int, + lstm_layer_norm: int, + lstm_layer_norm_epsilon: int, + lstm_dropout: int, + joiner_activation: str, +) -> RNNT: + r"""Builds Conformer-based recurrent neural network transducer (RNN-T) model. + + Args: + input_dim (int): dimension of input sequence frames passed to transcription network. + encoding_dim (int): dimension of transcription- and prediction-network-generated encodings + passed to joint network. + time_reduction_stride (int): factor by which to reduce length of input sequence. + conformer_input_dim (int): dimension of Conformer input. + conformer_ffn_dim (int): hidden layer dimension of each Conformer layer's feedforward network. + conformer_num_layers (int): number of Conformer layers to instantiate. + conformer_num_heads (int): number of attention heads in each Conformer layer. + conformer_depthwise_conv_kernel_size (int): kernel size of each Conformer layer's depthwise convolution layer. + conformer_dropout (float): Conformer dropout probability. + num_symbols (int): cardinality of set of target tokens. + symbol_embedding_dim (int): dimension of each target token embedding. + num_lstm_layers (int): number of LSTM layers to instantiate. + lstm_hidden_dim (int): output dimension of each LSTM layer. + lstm_layer_norm (bool): if ``True``, enables layer normalization for LSTM layers. + lstm_layer_norm_epsilon (float): value of epsilon to use in LSTM layer normalization layers. + lstm_dropout (float): LSTM dropout probability. + joiner_activation (str): activation function to use in the joiner. + Must be one of ("relu", "tanh"). (Default: "relu") + + Returns: + RNNT: + Conformer RNN-T model. + """ + encoder = _ConformerEncoder( + input_dim=input_dim, + output_dim=encoding_dim, + time_reduction_stride=time_reduction_stride, + conformer_input_dim=conformer_input_dim, + conformer_ffn_dim=conformer_ffn_dim, + conformer_num_layers=conformer_num_layers, + conformer_num_heads=conformer_num_heads, + conformer_depthwise_conv_kernel_size=conformer_depthwise_conv_kernel_size, + conformer_dropout=conformer_dropout, + ) + predictor = _Predictor( + num_symbols=num_symbols, + output_dim=encoding_dim, + symbol_embedding_dim=symbol_embedding_dim, + num_lstm_layers=num_lstm_layers, + lstm_hidden_dim=lstm_hidden_dim, + lstm_layer_norm=lstm_layer_norm, + lstm_layer_norm_epsilon=lstm_layer_norm_epsilon, + lstm_dropout=lstm_dropout, + ) + joiner = _Joiner(encoding_dim, num_symbols, activation=joiner_activation) + return RNNT(encoder, predictor, joiner) + + +def conformer_rnnt_base() -> RNNT: + r"""Builds basic version of Conformer RNN-T model. + + Returns: + RNNT: + Conformer RNN-T model. + """ + return conformer_rnnt_model( + input_dim=80, + encoding_dim=1024, + time_reduction_stride=4, + conformer_input_dim=256, + conformer_ffn_dim=1024, + conformer_num_layers=16, + conformer_num_heads=4, + conformer_depthwise_conv_kernel_size=31, + conformer_dropout=0.1, + num_symbols=1024, + symbol_embedding_dim=256, + num_lstm_layers=2, + lstm_hidden_dim=512, + lstm_layer_norm=True, + lstm_layer_norm_epsilon=1e-5, + lstm_dropout=0.3, + joiner_activation="tanh", + ) + + +def conformer_rnnt_biasing( + *, + input_dim: int, + encoding_dim: int, + time_reduction_stride: int, + conformer_input_dim: int, + conformer_ffn_dim: int, + conformer_num_layers: int, + conformer_num_heads: int, + conformer_depthwise_conv_kernel_size: int, + conformer_dropout: float, + num_symbols: int, + symbol_embedding_dim: int, + num_lstm_layers: int, + lstm_hidden_dim: int, + lstm_layer_norm: int, + lstm_layer_norm_epsilon: int, + lstm_dropout: int, + joiner_activation: str, + attndim: int, + biasing: bool, + charlist: List[str], + deepbiasing: bool, + tcpsche: int, + DBaverage: bool, +) -> RNNTBiasing: + r"""Builds Conformer-based recurrent neural network transducer (RNN-T) model. + + Args: + input_dim (int): dimension of input sequence frames passed to transcription network. + encoding_dim (int): dimension of transcription- and prediction-network-generated encodings + passed to joint network. + time_reduction_stride (int): factor by which to reduce length of input sequence. + conformer_input_dim (int): dimension of Conformer input. + conformer_ffn_dim (int): hidden layer dimension of each Conformer layer's feedforward network. + conformer_num_layers (int): number of Conformer layers to instantiate. + conformer_num_heads (int): number of attention heads in each Conformer layer. + conformer_depthwise_conv_kernel_size (int): kernel size of each Conformer layer's depthwise convolution layer. + conformer_dropout (float): Conformer dropout probability. + num_symbols (int): cardinality of set of target tokens. + symbol_embedding_dim (int): dimension of each target token embedding. + num_lstm_layers (int): number of LSTM layers to instantiate. + lstm_hidden_dim (int): output dimension of each LSTM layer. + lstm_layer_norm (bool): if ``True``, enables layer normalization for LSTM layers. + lstm_layer_norm_epsilon (float): value of epsilon to use in LSTM layer normalization layers. + lstm_dropout (float): LSTM dropout probability. + joiner_activation (str): activation function to use in the joiner. + Must be one of ("relu", "tanh"). (Default: "relu") + attndim (int): TCPGen attention dimension + biasing (bool): If true, use biasing, otherwise use standard RNN-T + charlist (list): The list of word piece tokens in the same order as the output layer + deepbiasing (bool): If true, use deep biasing by extracting the biasing vector + tcpsche (int): The epoch at which TCPGen starts to train + DBaverage (bool): If true, instead of TCPGen, use DBRNNT for biasing + + Returns: + RNNT: + Conformer RNN-T model with TCPGen-based biasing support. + """ + encoder = _ConformerEncoder( + input_dim=input_dim, + output_dim=encoding_dim, + time_reduction_stride=time_reduction_stride, + conformer_input_dim=conformer_input_dim, + conformer_ffn_dim=conformer_ffn_dim, + conformer_num_layers=conformer_num_layers, + conformer_num_heads=conformer_num_heads, + conformer_depthwise_conv_kernel_size=conformer_depthwise_conv_kernel_size, + conformer_dropout=conformer_dropout, + ) + predictor = _Predictor( + num_symbols=num_symbols, + output_dim=encoding_dim, + symbol_embedding_dim=symbol_embedding_dim, + num_lstm_layers=num_lstm_layers, + lstm_hidden_dim=lstm_hidden_dim, + lstm_layer_norm=lstm_layer_norm, + lstm_layer_norm_epsilon=lstm_layer_norm_epsilon, + lstm_dropout=lstm_dropout, + ) + joiner = _JoinerBiasing( + encoding_dim, + num_symbols, + activation=joiner_activation, + deepbiasing=deepbiasing, + attndim=attndim, + biasing=biasing, + ) + return RNNTBiasing( + encoder, + predictor, + joiner, + attndim, + biasing, + deepbiasing, + symbol_embedding_dim, + encoding_dim, + charlist, + encoding_dim, + conformer_dropout, + tcpsche, + DBaverage, + ) + + +def conformer_rnnt_biasing_base(charlist=None, biasing=True) -> RNNT: + r"""Builds basic version of Conformer RNN-T model with TCPGen. + + Returns: + RNNT: + Conformer RNN-T model with TCPGen-based biasing support. + """ + return conformer_rnnt_biasing( + input_dim=80, + encoding_dim=576, + time_reduction_stride=4, + conformer_input_dim=144, + conformer_ffn_dim=576, + conformer_num_layers=16, + conformer_num_heads=4, + conformer_depthwise_conv_kernel_size=31, + conformer_dropout=0.1, + num_symbols=601, + symbol_embedding_dim=256, + num_lstm_layers=1, + lstm_hidden_dim=320, + lstm_layer_norm=True, + lstm_layer_norm_epsilon=1e-5, + lstm_dropout=0.3, + joiner_activation="tanh", + attndim=256, + biasing=biasing, + charlist=charlist, + deepbiasing=True, + tcpsche=30, + DBaverage=False, + ) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/models/rnnt_decoder.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/rnnt_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..4e7c4ac661e29da7d104f80541dc0c9919b98ea0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/models/rnnt_decoder.py @@ -0,0 +1,399 @@ +from typing import Callable, Dict, List, Optional, Tuple + +import torch +from torchaudio.models import RNNT +from torchaudio.prototype.models.rnnt import TrieNode + +__all__ = ["Hypothesis", "RNNTBeamSearchBiasing"] + + +Hypothesis = Tuple[List[int], torch.Tensor, List[List[torch.Tensor]], float, list] +Hypothesis.__doc__ = """Hypothesis generated by RNN-T beam search decoder, + represented as tuple of (tokens, prediction network output, prediction network state, score). + """ + + +def _get_hypo_tokens(hypo: Hypothesis) -> List[int]: + return hypo[0] + + +def _get_hypo_predictor_out(hypo: Hypothesis) -> torch.Tensor: + return hypo[1] + + +def _get_hypo_state(hypo: Hypothesis) -> List[List[torch.Tensor]]: + return hypo[2] + + +def _get_hypo_score(hypo: Hypothesis) -> float: + return hypo[3] + + +def _get_hypo_trie(hypo: Hypothesis) -> TrieNode: + return hypo[4] + + +def _set_hypo_trie(hypo: Hypothesis, trie: TrieNode) -> None: + hypo[4] = trie + + +def _get_hypo_key(hypo: Hypothesis) -> str: + return str(hypo[0]) + + +def _batch_state(hypos: List[Hypothesis]) -> List[List[torch.Tensor]]: + states: List[List[torch.Tensor]] = [] + for i in range(len(_get_hypo_state(hypos[0]))): + batched_state_components: List[torch.Tensor] = [] + for j in range(len(_get_hypo_state(hypos[0])[i])): + batched_state_components.append(torch.cat([_get_hypo_state(hypo)[i][j] for hypo in hypos])) + states.append(batched_state_components) + return states + + +def _slice_state(states: List[List[torch.Tensor]], idx: int, device: torch.device) -> List[List[torch.Tensor]]: + idx_tensor = torch.tensor([idx], device=device) + return [[state.index_select(0, idx_tensor) for state in state_tuple] for state_tuple in states] + + +def _default_hypo_sort_key(hypo: Hypothesis) -> float: + return _get_hypo_score(hypo) / (len(_get_hypo_tokens(hypo)) + 1) + + +def _compute_updated_scores( + hypos: List[Hypothesis], + next_token_probs: torch.Tensor, + beam_width: int, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + hypo_scores = torch.tensor([_get_hypo_score(h) for h in hypos]).unsqueeze(1) + nonblank_scores = hypo_scores + next_token_probs[:, :-1] # [beam_width, num_tokens - 1] + nonblank_nbest_scores, nonblank_nbest_idx = nonblank_scores.reshape(-1).topk(beam_width) + nonblank_nbest_hypo_idx = nonblank_nbest_idx.div(nonblank_scores.shape[1], rounding_mode="trunc") + nonblank_nbest_token = nonblank_nbest_idx % nonblank_scores.shape[1] + return nonblank_nbest_scores, nonblank_nbest_hypo_idx, nonblank_nbest_token + + +def _remove_hypo(hypo: Hypothesis, hypo_list: List[Hypothesis]) -> None: + for i, elem in enumerate(hypo_list): + if _get_hypo_key(hypo) == _get_hypo_key(elem): + del hypo_list[i] + break + + +class RNNTBeamSearchBiasing(torch.nn.Module): + r"""Beam search decoder for RNN-T model with biasing support. + + Args: + model (RNNT): RNN-T model to use. + blank (int): index of blank token in vocabulary. + temperature (float, optional): temperature to apply to joint network output. + Larger values yield more uniform samples. (Default: 1.0) + hypo_sort_key (Callable[[Hypothesis], float] or None, optional): callable that computes a score + for a given hypothesis to rank hypotheses by. If ``None``, defaults to callable that returns + hypothesis score normalized by token sequence length. (Default: None) + step_max_tokens (int, optional): maximum number of tokens to emit per input time step. (Default: 100) + trie (list, optional): the prefix tree for TCPGen biasing + biasing (bool, optional): If true, do biasing, otherwise use standard RNN-T support + """ + + def __init__( + self, + model: RNNT, + blank: int, + temperature: float = 1.0, + hypo_sort_key: Optional[Callable[[Hypothesis], float]] = None, + step_max_tokens: int = 100, + trie: TrieNode = None, + biasing: bool = False, + ) -> None: + super().__init__() + self.model = model + self.blank = blank + self.temperature = temperature + self.resettrie = trie or [] + self.dobiasing = biasing + + if hypo_sort_key is None: + self.hypo_sort_key = _default_hypo_sort_key + else: + self.hypo_sort_key = hypo_sort_key + + self.step_max_tokens = step_max_tokens + + def _init_b_hypos(self, hypo: Optional[Hypothesis], device: torch.device) -> List[Hypothesis]: + if hypo is not None: + token = _get_hypo_tokens(hypo)[-1] + state = _get_hypo_state(hypo) + else: + token = self.blank + state = None + + one_tensor = torch.tensor([1], device=device) + pred_out, _, pred_state = self.model.predict(torch.tensor([[token]], device=device), one_tensor, state) + init_hypo = ([token], pred_out[0].detach(), pred_state, 0.0, self.resettrie) + return [init_hypo] + + def _get_trie_mask(self, trie): + step_mask = torch.ones(len(self.model.char_list) + 1) + step_mask[list(trie[0].keys())] = 0 + # step_mask[-1] = 0 + return step_mask + + def _get_generation_prob(self, trie): + if len(trie[0].keys()) == 0: + return True + else: + return False + + def _gen_next_token_probs( + self, enc_out: torch.Tensor, hypos: List[Hypothesis], device: torch.device + ) -> torch.Tensor: + one_tensor = torch.tensor([1], device=device) + predictor_out = torch.stack([_get_hypo_predictor_out(h) for h in hypos], dim=0) + if self.dobiasing: + # Get valid subset of wordpieces + trie_masks = torch.stack([self._get_trie_mask(_get_hypo_trie(h)) for h in hypos], dim=0) + trie_masks = trie_masks.to(enc_out.device).unsqueeze(1) # beam_width, 1, nchars + # Determine if there is any paths on the trie + genprob_masks = torch.tensor([self._get_generation_prob(_get_hypo_trie(h)) for h in hypos]) # beam_width + genprob_masks = genprob_masks.to(enc_out.device) + # Forward TCPGen component + last_tokens = torch.tensor([_get_hypo_tokens(h)[-1] for h in hypos]).unsqueeze(-1).to(enc_out.device) + hptr, tcpgen_dist = self.model.forward_tcpgen(last_tokens, trie_masks, enc_out) + else: + hptr = None + # hptr sent to joiner, if deepbiasing is True joiner will use it + joined_out, _, joined_activation = self.model.join( + enc_out, + one_tensor, + predictor_out, + torch.tensor([1] * len(hypos), device=device), + hptr=hptr, + ) # [beam_width, 1, 1, num_tokens] + if self.dobiasing: + p_gen = torch.sigmoid(self.model.pointer_gate(torch.cat((joined_activation, hptr), dim=-1))) + p_gen = p_gen.masked_fill(genprob_masks.view(p_gen.size(0), 1, 1, 1), 0) + model_tu = torch.softmax(joined_out / self.temperature, dim=3) + # assuming last token is blank + p_not_null = 1.0 - model_tu[:, :, :, -1:] + ptr_dist_fact = torch.cat([tcpgen_dist[:, :, :, :-2], tcpgen_dist[:, :, :, -1:]], dim=-1) * p_not_null + ptr_gen_complement = tcpgen_dist[:, :, :, -1:] * p_gen + p_partial = ptr_dist_fact[:, :, :, :-1] * p_gen + model_tu[:, :, :, :-1] * (1 - p_gen + ptr_gen_complement) + p_final = torch.cat([p_partial, model_tu[:, :, :, -1:]], dim=-1) + joined_out = torch.log(p_final) + else: + joined_out = torch.nn.functional.log_softmax(joined_out / self.temperature, dim=3) + return joined_out[:, 0, 0] + + def _gen_b_hypos( + self, + b_hypos: List[Hypothesis], + a_hypos: List[Hypothesis], + next_token_probs: torch.Tensor, + key_to_b_hypo: Dict[str, Hypothesis], + ) -> List[Hypothesis]: + for i in range(len(a_hypos)): + h_a = a_hypos[i] + append_blank_score = _get_hypo_score(h_a) + next_token_probs[i, -1] + if _get_hypo_key(h_a) in key_to_b_hypo: + h_b = key_to_b_hypo[_get_hypo_key(h_a)] + _remove_hypo(h_b, b_hypos) + score = float(torch.tensor(_get_hypo_score(h_b)).logaddexp(append_blank_score)) + else: + score = float(append_blank_score) + h_b = ( + _get_hypo_tokens(h_a), + _get_hypo_predictor_out(h_a), + _get_hypo_state(h_a), + score, + _get_hypo_trie(h_a), + ) + b_hypos.append(h_b) + key_to_b_hypo[_get_hypo_key(h_b)] = h_b + _, sorted_idx = torch.tensor([_get_hypo_score(hypo) for hypo in b_hypos]).sort() + return [b_hypos[idx] for idx in sorted_idx] + + def _gen_a_hypos( + self, + a_hypos: List[Hypothesis], + b_hypos: List[Hypothesis], + next_token_probs: torch.Tensor, + t: int, + beam_width: int, + device: torch.device, + ) -> List[Hypothesis]: + ( + nonblank_nbest_scores, + nonblank_nbest_hypo_idx, + nonblank_nbest_token, + ) = _compute_updated_scores(a_hypos, next_token_probs, beam_width) + + if len(b_hypos) < beam_width: + b_nbest_score = -float("inf") + else: + b_nbest_score = _get_hypo_score(b_hypos[-beam_width]) + + base_hypos: List[Hypothesis] = [] + new_tokens: List[int] = [] + new_scores: List[float] = [] + for i in range(beam_width): + score = float(nonblank_nbest_scores[i]) + if score > b_nbest_score: + a_hypo_idx = int(nonblank_nbest_hypo_idx[i]) + base_hypos.append(a_hypos[a_hypo_idx]) + new_tokens.append(int(nonblank_nbest_token[i])) + new_scores.append(score) + + if base_hypos: + new_hypos = self._gen_new_hypos(base_hypos, new_tokens, new_scores, t, device) + else: + new_hypos: List[Hypothesis] = [] + + return new_hypos + + def _gen_new_hypos( + self, + base_hypos: List[Hypothesis], + tokens: List[int], + scores: List[float], + t: int, + device: torch.device, + ) -> List[Hypothesis]: + tgt_tokens = torch.tensor([[token] for token in tokens], device=device) + states = _batch_state(base_hypos) + pred_out, _, pred_states = self.model.predict( + tgt_tokens, + torch.tensor([1] * len(base_hypos), device=device), + states, + ) + new_hypos: List[Hypothesis] = [] + for i, h_a in enumerate(base_hypos): + new_tokens = _get_hypo_tokens(h_a) + [tokens[i]] + if self.dobiasing: + new_trie = self.model.get_tcpgen_step(tokens[i], _get_hypo_trie(h_a), self.resettrie) + else: + new_trie = self.resettrie + new_hypos.append( + (new_tokens, pred_out[i].detach(), _slice_state(pred_states, i, device), scores[i], new_trie) + ) + return new_hypos + + def _search( + self, + enc_out: torch.Tensor, + hypo: Optional[Hypothesis], + beam_width: int, + ) -> List[Hypothesis]: + n_time_steps = enc_out.shape[1] + device = enc_out.device + + a_hypos: List[Hypothesis] = [] + b_hypos = self._init_b_hypos(hypo, device) + for t in range(n_time_steps): + a_hypos = b_hypos + b_hypos = torch.jit.annotate(List[Hypothesis], []) + key_to_b_hypo: Dict[str, Hypothesis] = {} + symbols_current_t = 0 + + while a_hypos: + next_token_probs = self._gen_next_token_probs(enc_out[:, t : t + 1], a_hypos, device) + next_token_probs = next_token_probs.cpu() + b_hypos = self._gen_b_hypos(b_hypos, a_hypos, next_token_probs, key_to_b_hypo) + + if symbols_current_t == self.step_max_tokens: + break + + a_hypos = self._gen_a_hypos( + a_hypos, + b_hypos, + next_token_probs, + t, + beam_width, + device, + ) + if a_hypos: + symbols_current_t += 1 + + _, sorted_idx = torch.tensor([self.hypo_sort_key(hypo) for hypo in b_hypos]).topk(beam_width) + b_hypos = [b_hypos[idx] for idx in sorted_idx] + + return b_hypos + + def forward( + self, + input: torch.Tensor, + length: torch.Tensor, + beam_width: int, + ) -> List[Hypothesis]: + r"""Performs beam search for the given input sequence. + + T: number of frames; + D: feature dimension of each frame. + + Args: + input (torch.Tensor): sequence of input frames, with shape (T, D) or (1, T, D). + length (torch.Tensor): number of valid frames in input + sequence, with shape () or (1,). + beam_width (int): beam size to use during search. + + Returns: + List[Hypothesis]: top-``beam_width`` hypotheses found by beam search. + """ + if input.dim() != 2 and not (input.dim() == 3 and input.shape[0] == 1): + raise ValueError("input must be of shape (T, D) or (1, T, D)") + if input.dim() == 2: + input = input.unsqueeze(0) + + if length.shape != () and length.shape != (1,): + raise ValueError("length must be of shape () or (1,)") + if input.dim() == 0: + input = input.unsqueeze(0) + + enc_out, _ = self.model.transcribe(input, length) + return self._search(enc_out, None, beam_width) + + @torch.jit.export + def infer( + self, + input: torch.Tensor, + length: torch.Tensor, + beam_width: int, + state: Optional[List[List[torch.Tensor]]] = None, + hypothesis: Optional[Hypothesis] = None, + ) -> Tuple[List[Hypothesis], List[List[torch.Tensor]]]: + r"""Performs beam search for the given input sequence in streaming mode. + + T: number of frames; + D: feature dimension of each frame. + + Args: + input (torch.Tensor): sequence of input frames, with shape (T, D) or (1, T, D). + length (torch.Tensor): number of valid frames in input + sequence, with shape () or (1,). + beam_width (int): beam size to use during search. + state (List[List[torch.Tensor]] or None, optional): list of lists of tensors + representing transcription network internal state generated in preceding + invocation. (Default: ``None``) + hypothesis (Hypothesis or None): hypothesis from preceding invocation to seed + search with. (Default: ``None``) + + Returns: + (List[Hypothesis], List[List[torch.Tensor]]): + List[Hypothesis] + top-``beam_width`` hypotheses found by beam search. + List[List[torch.Tensor]] + list of lists of tensors representing transcription network + internal state generated in current invocation. + """ + if input.dim() != 2 and not (input.dim() == 3 and input.shape[0] == 1): + raise ValueError("input must be of shape (T, D) or (1, T, D)") + if input.dim() == 2: + input = input.unsqueeze(0) + + if length.shape != () and length.shape != (1,): + raise ValueError("length must be of shape () or (1,)") + if length.dim() == 0: + length = length.unsqueeze(0) + + enc_out, _, state = self.model.transcribe_streaming(input, length, state) + return self._search(enc_out, hypothesis, beam_width), state diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7bb4b86e58121241ca4ffd176cac92d4342e6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__init__.py @@ -0,0 +1,12 @@ +from ._vggish import VGGISH, VGGishBundle +from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle +from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3 + +__all__ = [ + "EMFORMER_RNNT_BASE_MUSTC", + "EMFORMER_RNNT_BASE_TEDLIUM3", + "HIFIGAN_VOCODER_V3_LJSPEECH", + "HiFiGANVocoderBundle", + "VGGISH", + "VGGishBundle", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d81739549dfa348a5ba8272050ed69b03688a9a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/hifigan_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/hifigan_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5941387397aa3f7966c438cf31eb48ca3c636caa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/hifigan_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a78e0d0094930513e20c7cc9d6206faedc5d096f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/__pycache__/rnnt_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd4774f56a300d099b24f3a9e905224967da522 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__init__.py @@ -0,0 +1,3 @@ +from ._vggish_pipeline import VGGISH, VGGishBundle + +__all__ = ["VGGISH", "VGGishBundle"] diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..469a0921d50d502a75fc1bcf6db13d82c0555f53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d19f668af5805a86b4bc1b44edf349c67a163d57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_pipeline.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c0bd0eb049d529987a37bb21469105e463dfaa5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/__pycache__/_vggish_pipeline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/_vggish_impl.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/_vggish_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..6eb6ea8f59490eab777f2ba699f128d7c7876adc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/_vggish_impl.py @@ -0,0 +1,233 @@ +# Derived from torchvggish (https://github.com/harritaylor/torchvggish). +# Copyright 2017 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import math + +import torch + + +_MEL_BREAK_FREQUENCY_HERTZ = 700.0 +_MEL_HIGH_FREQUENCY_Q = 1127.0 + + +_SAMPLE_RATE = 16000 +_STFT_WINDOW_LENGTH_SECONDS = 0.025 +_STFT_HOP_LENGTH_SECONDS = 0.010 +_MEL_MIN_HZ = 125 +_MEL_MAX_HZ = 7500 +_NUM_BANDS = 64 +_LOG_OFFSET = 0.01 +_EXAMPLE_WINDOW_SECONDS = 0.96 # Each example contains 96 10ms frames +_EXAMPLE_HOP_SECONDS = 0.96 # with zero overlap. + + +def _build_features_network(): + layers = [] + + for input_dim, output_dim in [(1, 64), (64, 128)]: + layers += [ + torch.nn.Conv2d(input_dim, output_dim, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), + torch.nn.ReLU(inplace=True), + torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False), + ] + + for input_dim, output_dim in [(128, 256), (256, 512)]: + layers += [ + torch.nn.Conv2d(input_dim, output_dim, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), + torch.nn.ReLU(inplace=True), + torch.nn.Conv2d( + output_dim, + output_dim, + kernel_size=(3, 3), + stride=(1, 1), + padding=(1, 1), + ), + torch.nn.ReLU(inplace=True), + torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False), + ] + + return torch.nn.Sequential(*layers) + + +def _build_embedding_network(): + return torch.nn.Sequential( + torch.nn.Linear(512 * 4 * 6, 4096), + torch.nn.ReLU(True), + torch.nn.Linear(4096, 4096), + torch.nn.ReLU(True), + torch.nn.Linear(4096, 128), + torch.nn.ReLU(True), + ) + + +def _frame(data, window_length, hop_length): + num_samples = data.shape[0] + num_frames = 1 + int(math.floor((num_samples - window_length) / hop_length)) + shape = (num_frames, window_length) + data.shape[1:] + strides = (data.stride()[0] * hop_length,) + data.stride() + return torch.as_strided(data, shape, strides) + + +def _stft_magnitude(signal, fft_length, hop_length=None, window_length=None): + frames = _frame(signal, window_length, hop_length) + window = torch.hann_window(window_length, periodic=True).to(signal.device) + windowed_frames = frames * window + return torch.abs(torch.fft.rfft(windowed_frames, int(fft_length))) + + +def _hertz_to_mel(frequencies_hertz): + return _MEL_HIGH_FREQUENCY_Q * torch.log(1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)) + + +def _spectrogram_to_mel_matrix( + num_mel_bins=20, + num_spectrogram_bins=129, + audio_sample_rate=8000, + lower_edge_hertz=125.0, + upper_edge_hertz=3800.0, +): + nyquist_hertz = audio_sample_rate / 2.0 + if lower_edge_hertz < 0.0: + raise ValueError("lower_edge_hertz %.1f must be >= 0" % lower_edge_hertz) + if lower_edge_hertz >= upper_edge_hertz: + raise ValueError("lower_edge_hertz %.1f >= upper_edge_hertz %.1f" % (lower_edge_hertz, upper_edge_hertz)) + + if upper_edge_hertz > nyquist_hertz: + raise ValueError("upper_edge_hertz %.1f is greater than Nyquist %.1f" % (upper_edge_hertz, nyquist_hertz)) + spectrogram_bins_hertz = torch.linspace(0.0, nyquist_hertz, num_spectrogram_bins) + + spectrogram_bins_mel = _hertz_to_mel(spectrogram_bins_hertz) + # The i'th mel band (starting from i=1) has center frequency + # band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge + # band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in + # the band_edges_mel arrays. + band_edges_mel = torch.linspace( + _hertz_to_mel(torch.tensor(lower_edge_hertz)), + _hertz_to_mel(torch.tensor(upper_edge_hertz)), + num_mel_bins + 2, + ) + # Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins + # of spectrogram values. + mel_weights_matrix = torch.empty((num_spectrogram_bins, num_mel_bins)) + for i in range(num_mel_bins): + lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i : i + 3] + # Calculate lower and upper slopes for every spectrogram bin. + # Line segments are linear in the *mel* domain, not hertz. + lower_slope = (spectrogram_bins_mel - lower_edge_mel) / (center_mel - lower_edge_mel) + upper_slope = (upper_edge_mel - spectrogram_bins_mel) / (upper_edge_mel - center_mel) + + # .. then intersect them with each other and zero. + mel_weights_matrix[:, i] = torch.maximum(torch.tensor(0.0), torch.minimum(lower_slope, upper_slope)) + + # HTK excludes the spectrogram DC bin; make sure it always gets a zero + # coefficient. + mel_weights_matrix[0, :] = 0.0 + return mel_weights_matrix + + +def _log_mel_spectrogram( + data, + audio_sample_rate=8000, + log_offset=0.0, + window_length_secs=0.025, + hop_length_secs=0.010, + **kwargs, +): + window_length_samples = int(round(audio_sample_rate * window_length_secs)) + hop_length_samples = int(round(audio_sample_rate * hop_length_secs)) + fft_length = 2 ** int(math.ceil(math.log(window_length_samples) / math.log(2.0))) + + spectrogram = _stft_magnitude( + data, + fft_length=fft_length, + hop_length=hop_length_samples, + window_length=window_length_samples, + ) + mel_spectrogram = torch.matmul( + spectrogram, + _spectrogram_to_mel_matrix( + num_spectrogram_bins=spectrogram.shape[1], + audio_sample_rate=audio_sample_rate, + **kwargs, + ).to(spectrogram), + ) + return torch.log(mel_spectrogram + log_offset) + + +def _waveform_to_examples(data): + # Compute log mel spectrogram features, with shape (n_frame, n_mel) + log_mel = _log_mel_spectrogram( + data, + audio_sample_rate=_SAMPLE_RATE, + log_offset=_LOG_OFFSET, + window_length_secs=_STFT_WINDOW_LENGTH_SECONDS, + hop_length_secs=_STFT_HOP_LENGTH_SECONDS, + num_mel_bins=_NUM_BANDS, + lower_edge_hertz=_MEL_MIN_HZ, + upper_edge_hertz=_MEL_MAX_HZ, + ) + + # Frame features into examples, with shape (n_example, n_frame, n_mel) + features_sample_rate = 1.0 / _STFT_HOP_LENGTH_SECONDS + example_window_length = int(round(_EXAMPLE_WINDOW_SECONDS * features_sample_rate)) + + example_hop_length = int(round(_EXAMPLE_HOP_SECONDS * features_sample_rate)) + log_mel_examples = _frame(log_mel, window_length=example_window_length, hop_length=example_hop_length) + + # (n_example, 1, n_frame, n_mel) + return log_mel_examples.unsqueeze(1) + + +class VGGish(torch.nn.Module): + """Implementation of VGGish model :cite:`45611`.""" + + def __init__(self): + super().__init__() + + self.features_network = _build_features_network() + self.embedding_network = _build_embedding_network() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + """ + Args: + input (torch.Tensor): batch of spectrograms, with shape `(n_example, 1, n_frame, 64)`. + + Returns: + torch.Tensor: model output, with shape `(n_example, 128)`. + """ + x = self.features_network(input) + + x = x.permute(0, 2, 3, 1) + x = x.reshape(x.size(0), -1) + + return self.embedding_network(x) + + +class VGGishInputProcessor: + """Converts raw waveforms to batches of examples to use as inputs to VGGish.""" + + def __call__(self, input: torch.Tensor) -> torch.Tensor: + """ + Args: + input (torch.Tensor): waveform, with shape `(T,)`. + sample_rate (int): sample rate of waveform in hertz. + + Returns: + torch.Tensor: batch of examples to pass to VGGish, with shape `(n_example, 1, n_frame, 64)`. + """ + if len(input.shape) != 1: + raise ValueError("input waveform must have dimension of 1.") + return _waveform_to_examples(input) diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/_vggish_pipeline.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/_vggish_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..7b0f527e73eb61fe4a9fa7d7d86ea467f9ae8a9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/_vggish/_vggish_pipeline.py @@ -0,0 +1,82 @@ +from dataclasses import dataclass +from typing import Callable, Dict + +import torch +import torchaudio + +from ._vggish_impl import _SAMPLE_RATE, VGGish as _VGGish, VGGishInputProcessor as _VGGishInputProcessor + + +def _get_state_dict(): + path = torchaudio.utils.download_asset("models/vggish.pt") + return torch.load(path) + + +@dataclass +class VGGishBundle: + """VGGish :cite:`45611` inference pipeline ported from + `torchvggish `__ + and `tensorflow-models `__. + + Example: + >>> import torchaudio + >>> from torchaudio.prototype.pipelines import VGGISH + >>> + >>> input_sr = VGGISH.sample_rate + >>> input_proc = VGGISH.get_input_processor() + >>> model = VGGISH.get_model() + >>> + >>> waveform, sr = torchaudio.load( + >>> "Chopin_Ballade_-1_In_G_Minor,_Op._23.mp3", + >>> ) + >>> waveform = waveform.squeeze(0) + >>> waveform = torchaudio.functional.resample(waveform, sr, input_sr) + >>> mono_output = model(input_proc(waveform)) + """ + + class VGGish(_VGGish): + __doc__ = _VGGish.__doc__ + + class VGGishInputProcessor(_VGGishInputProcessor): + __doc__ = _VGGishInputProcessor.__doc__ + + _state_dict_func: Callable[[], Dict] + + @property + def sample_rate(self) -> int: + """Sample rate of input waveform expected by input processor and model. + + :type: int + """ + return _SAMPLE_RATE + + def get_model(self) -> VGGish: + """Constructs pre-trained VGGish model. Downloads and caches weights as necessary. + + Returns: + VGGish: VGGish model with pre-trained weights loaded. + """ + model = self.VGGish() + state_dict = self._state_dict_func() + model.load_state_dict(state_dict) + model.eval() + return model + + def get_input_processor(self) -> VGGishInputProcessor: + """Constructs input processor for VGGish. + + Returns: + VGGishInputProcessor: input processor for VGGish. + """ + return self.VGGishInputProcessor() + + +VGGISH = VGGishBundle(_get_state_dict) +VGGISH.__doc__ = """Pre-trained VGGish :cite:`45611` inference pipeline ported from + `torchvggish `__ + and `tensorflow-models `__. + + Per the `documentation `__ + for the original model, the model is "trained on a large YouTube dataset (a preliminary version of + what later became YouTube-8M)". + """ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/hifigan_pipeline.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/hifigan_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..9aa383deb010c872ef5817962396cb122281b382 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/hifigan_pipeline.py @@ -0,0 +1,228 @@ +from dataclasses import dataclass +from typing import Any, Dict, Optional + +import torch +import torch.nn.functional as F +from torch.nn import Module +from torchaudio._internal import load_state_dict_from_url + +from torchaudio.prototype.models.hifi_gan import hifigan_vocoder, HiFiGANVocoder +from torchaudio.transforms import MelSpectrogram + + +@dataclass +class HiFiGANVocoderBundle: + """Data class that bundles associated information to use pretrained + :py:class:`~torchaudio.prototype.models.HiFiGANVocoder`. + + This class provides interfaces for instantiating the pretrained model along with + the information necessary to retrieve pretrained weights and additional data + to be used with the model. + + Torchaudio library instantiates objects of this class, each of which represents + a different pretrained model. Client code should access pretrained models via these + instances. + + This bundle can convert mel spectrorgam to waveforms and vice versa. A typical use case would be a flow like + `text -> mel spectrogram -> waveform`, where one can use an external component, e.g. Tacotron2, + to generate mel spectrogram from text. Please see below for the code example. + + Example: Transform synthetic mel spectrogram to audio. + >>> import torch + >>> import torchaudio + >>> # Since HiFiGAN bundle is in prototypes, it needs to be exported explicitly + >>> from torchaudio.prototype.pipelines import HIFIGAN_VOCODER_V3_LJSPEECH as bundle + >>> + >>> # Load the HiFiGAN bundle + >>> vocoder = bundle.get_vocoder() + Downloading: "https://download.pytorch.org/torchaudio/models/hifigan_vocoder_v3_ljspeech.pth" + 100%|████████████| 5.59M/5.59M [00:00<00:00, 18.7MB/s] + >>> + >>> # Generate synthetic mel spectrogram + >>> specgram = torch.sin(0.5 * torch.arange(start=0, end=100)).expand(bundle._vocoder_params["in_channels"], 100) + >>> + >>> # Transform mel spectrogram into audio + >>> waveform = vocoder(specgram) + >>> torchaudio.save('sample.wav', waveform, bundle.sample_rate) + + Example: Usage together with Tacotron2, text to audio. + >>> import torch + >>> import torchaudio + >>> # Since HiFiGAN bundle is in prototypes, it needs to be exported explicitly + >>> from torchaudio.prototype.pipelines import HIFIGAN_VOCODER_V3_LJSPEECH as bundle_hifigan + >>> + >>> # Load Tacotron2 bundle + >>> bundle_tactron2 = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH + >>> processor = bundle_tactron2.get_text_processor() + >>> tacotron2 = bundle_tactron2.get_tacotron2() + >>> + >>> # Use Tacotron2 to convert text to mel spectrogram + >>> text = "A quick brown fox jumped over a lazy dog" + >>> input, lengths = processor(text) + >>> specgram, lengths, _ = tacotron2.infer(input, lengths) + >>> + >>> # Load HiFiGAN bundle + >>> vocoder = bundle_hifigan.get_vocoder() + Downloading: "https://download.pytorch.org/torchaudio/models/hifigan_vocoder_v3_ljspeech.pth" + 100%|████████████| 5.59M/5.59M [00:03<00:00, 1.55MB/s] + >>> + >>> # Use HiFiGAN to convert mel spectrogram to audio + >>> waveform = vocoder(specgram).squeeze(0) + >>> torchaudio.save('sample.wav', waveform, bundle_hifigan.sample_rate) + """ # noqa: E501 + + _path: str + _vocoder_params: Dict[str, Any] # Vocoder parameters + _mel_params: Dict[str, Any] # Mel transformation parameters + _sample_rate: float + + def _get_state_dict(self, dl_kwargs): + url = f"https://download.pytorch.org/torchaudio/models/{self._path}" + dl_kwargs = {} if dl_kwargs is None else dl_kwargs + state_dict = load_state_dict_from_url(url, **dl_kwargs) + return state_dict + + def get_vocoder(self, *, dl_kwargs=None) -> HiFiGANVocoder: + """Construct the HiFiGAN Generator model, which can be used a vocoder, and load the pretrained weight. + + The weight file is downloaded from the internet and cached with + :func:`torch.hub.load_state_dict_from_url` + + Args: + dl_kwargs (dictionary of keyword arguments): Passed to :func:`torch.hub.load_state_dict_from_url`. + + Returns: + Variation of :py:class:`~torchaudio.prototype.models.HiFiGANVocoder`. + """ + model = hifigan_vocoder(**self._vocoder_params) + model.load_state_dict(self._get_state_dict(dl_kwargs)) + model.eval() + return model + + def get_mel_transform(self) -> Module: + """Construct an object which transforms waveforms into mel spectrograms.""" + return _HiFiGANMelSpectrogram( + n_mels=self._vocoder_params["in_channels"], + sample_rate=self._sample_rate, + **self._mel_params, + ) + + @property + def sample_rate(self): + """Sample rate of the audio that the model is trained on. + + :type: float + """ + return self._sample_rate + + +class _HiFiGANMelSpectrogram(torch.nn.Module): + """ + Generate mel spectrogram in a way equivalent to the original HiFiGAN implementation: + https://github.com/jik876/hifi-gan/blob/4769534d45265d52a904b850da5a622601885777/meldataset.py#L49-L72 + + This class wraps around :py:class:`torchaudio.transforms.MelSpectrogram`, but performs extra steps to achive + equivalence with the HiFiGAN implementation. + + Args: + hop_size (int): Length of hop between STFT windows. + n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins. + win_length (int): Window size. + f_min (float or None): Minimum frequency. + f_max (float or None): Maximum frequency. + sample_rate (int): Sample rate of audio signal. + n_mels (int): Number of mel filterbanks. + """ + + def __init__( + self, + hop_size: int, + n_fft: int, + win_length: int, + f_min: Optional[float], + f_max: Optional[float], + sample_rate: float, + n_mels: int, + ): + super(_HiFiGANMelSpectrogram, self).__init__() + self.mel_transform = MelSpectrogram( + sample_rate=sample_rate, + n_fft=n_fft, + win_length=win_length, + hop_length=hop_size, + f_min=f_min, + f_max=f_max, + n_mels=n_mels, + normalized=False, + pad=0, + mel_scale="slaney", + norm="slaney", + center=False, + ) + self.sample_rate = sample_rate + self.hop_size = hop_size + self.n_fft = n_fft + self.win_length = win_length + self.f_min = f_min + self.f_max = f_max + self.n_mels = n_mels + self.pad_size = int((n_fft - hop_size) / 2) + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + """Generate mel spectrogram from a waveform. Should have same sample rate as ``self.sample_rate``. + + Args: + waveform (Tensor): waveform of shape ``(batch_size, time_length)``. + Returns: + Tensor of shape ``(batch_size, n_mel, time_length)`` + """ + ref_waveform = F.pad(waveform.unsqueeze(1), (self.pad_size, self.pad_size), mode="reflect") + ref_waveform = ref_waveform.squeeze(1) + + spectr = (self.mel_transform.spectrogram(ref_waveform) + 1e-9) ** 0.5 + mel_spectrogram = self.mel_transform.mel_scale(spectr) + mel_spectrogram = torch.log(torch.clamp(mel_spectrogram, min=1e-5)) + return mel_spectrogram + + +HIFIGAN_VOCODER_V3_LJSPEECH = HiFiGANVocoderBundle( + "hifigan_vocoder_v3_ljspeech.pth", + _vocoder_params={ + "upsample_rates": (8, 8, 4), + "upsample_kernel_sizes": (16, 16, 8), + "upsample_initial_channel": 256, + "resblock_kernel_sizes": (3, 5, 7), + "resblock_dilation_sizes": ((1, 2), (2, 6), (3, 12)), + "resblock_type": 2, + "in_channels": 80, + "lrelu_slope": 0.1, + }, + _mel_params={ + "hop_size": 256, + "n_fft": 1024, + "win_length": 1024, + "f_min": 0, + "f_max": 8000, + }, + _sample_rate=22050, +) +HIFIGAN_VOCODER_V3_LJSPEECH.__doc__ = """HiFiGAN Vocoder pipeline, trained on *The LJ Speech Dataset* + :cite:`ljspeech17`. + + This pipeine can be used with an external component which generates mel spectrograms from text, for example, + Tacotron2 - see examples in :py:class:`HiFiGANVocoderBundle`. + Although this works with the existing Tacotron2 bundles, for the best results one needs to retrain Tacotron2 + using the same data preprocessing pipeline which was used for training HiFiGAN. In particular, the original + HiFiGAN implementation uses a custom method of generating mel spectrograms from waveforms, different from + :py:class:`torchaudio.transforms.MelSpectrogram`. We reimplemented this transform as + :py:meth:`HiFiGANVocoderBundle.get_mel_transform`, making sure it is equivalent to the original HiFiGAN code `here + `_. + + The underlying vocoder is constructed by + :py:func:`torchaudio.prototype.models.hifigan_vocoder`. The weights are converted from the ones published + with the original paper :cite:`NEURIPS2020_c5d73680` under `MIT License + `__. See links to + pre-trained models on `GitHub `__. + + Please refer to :py:class:`HiFiGANVocoderBundle` for usage instructions. + """ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/rnnt_pipeline.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/rnnt_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..c82e2f83a2a4a1dc241e5b1cf15fad0690446d72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/pipelines/rnnt_pipeline.py @@ -0,0 +1,58 @@ +from functools import partial + +from torchaudio.models import emformer_rnnt_base +from torchaudio.pipelines import RNNTBundle + + +EMFORMER_RNNT_BASE_MUSTC = RNNTBundle( + _rnnt_path="models/emformer_rnnt_base_mustc.pt", + _rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501), + _global_stats_path="pipeline-assets/global_stats_rnnt_mustc.json", + _sp_model_path="pipeline-assets/spm_bpe_500_mustc.model", + _right_padding=4, + _blank=500, + _sample_rate=16000, + _n_fft=400, + _n_mels=80, + _hop_length=160, + _segment_length=16, + _right_context_length=4, +) +EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both +streaming and non-streaming inference. + +The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base` +and utilizes weights trained on *MuST-C release v2.0* :cite:`CATTONI2021101155` dataset +using training script ``train.py`` +`here `__ +with ``num_symbols=501``. + +Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions. +""" + + +EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle( + _rnnt_path="models/emformer_rnnt_base_tedlium3.pt", + _rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501), + _global_stats_path="pipeline-assets/global_stats_rnnt_tedlium3.json", + _sp_model_path="pipeline-assets/spm_bpe_500_tedlium3.model", + _right_padding=4, + _blank=500, + _sample_rate=16000, + _n_fft=400, + _n_mels=80, + _hop_length=160, + _segment_length=16, + _right_context_length=4, +) +EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both +streaming and non-streaming inference. + +The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base` +and utilizes weights trained on *TED-LIUM Release 3* :cite:`rousseau2012tedlium` dataset +using training script ``train.py`` +`here `__ +with ``num_symbols=501``. + +Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions. +""" diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..457f20e119a0640336ff91eb92ff68dd42fd23f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__init__.py @@ -0,0 +1,9 @@ +from ._transforms import BarkScale, BarkSpectrogram, ChromaScale, ChromaSpectrogram, InverseBarkScale + +__all__ = [ + "BarkScale", + "BarkSpectrogram", + "ChromaScale", + "ChromaSpectrogram", + "InverseBarkScale", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..819d7b6ce73a0e88b2d9b48bed3501138f603c68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__pycache__/_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__pycache__/_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e4a3c0e9094b37b86b1c46cc2acb5154af2ae24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/__pycache__/_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/_transforms.py b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..9d89cc5339c84d927c5a4d91a014026a9242f675 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/prototype/transforms/_transforms.py @@ -0,0 +1,456 @@ +from typing import Callable, Optional + +import torch +from torchaudio.prototype.functional import barkscale_fbanks, chroma_filterbank +from torchaudio.transforms import Spectrogram + + +class BarkScale(torch.nn.Module): + r"""Turn a normal STFT into a bark frequency STFT with triangular filter banks. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + n_barks (int, optional): Number of bark filterbanks. (Default: ``128``) + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``) + n_stft (int, optional): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. (Default: ``201``) + norm (str or None, optional): If ``"slaney"``, divide the triangular bark weights by the width of the bark band + (area normalization). (Default: ``None``) + bark_scale (str, optional): Scale to use: ``traunmuller``, ``schroeder`` or ``wang``. (Default: ``traunmuller``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> spectrogram_transform = transforms.Spectrogram(n_fft=1024) + >>> spectrogram = spectrogram_transform(waveform) + >>> barkscale_transform = transforms.BarkScale(sample_rate=sample_rate, n_stft=1024 // 2 + 1) + >>> barkscale_spectrogram = barkscale_transform(spectrogram) + + See also: + :py:func:`torchaudio.prototype.functional.barkscale_fbanks` - The function used to + generate the filter banks. + """ + __constants__ = ["n_barks", "sample_rate", "f_min", "f_max"] + + def __init__( + self, + n_barks: int = 128, + sample_rate: int = 16000, + f_min: float = 0.0, + f_max: Optional[float] = None, + n_stft: int = 201, + bark_scale: str = "traunmuller", + ) -> None: + super(BarkScale, self).__init__() + self.n_barks = n_barks + self.sample_rate = sample_rate + self.f_max = f_max if f_max is not None else float(sample_rate // 2) + self.f_min = f_min + self.bark_scale = bark_scale + + if f_min > self.f_max: + raise ValueError("Require f_min: {} <= f_max: {}".format(f_min, self.f_max)) + + fb = barkscale_fbanks(n_stft, self.f_min, self.f_max, self.n_barks, self.sample_rate, self.bark_scale) + self.register_buffer("fb", fb) + + def forward(self, specgram: torch.Tensor) -> torch.Tensor: + r""" + Args: + specgram (torch.Tensor): A spectrogram STFT of dimension (..., freq, time). + + Returns: + torch.Tensor: Bark frequency spectrogram of size (..., ``n_barks``, time). + """ + + # (..., time, freq) dot (freq, n_mels) -> (..., n_mels, time) + bark_specgram = torch.matmul(specgram.transpose(-1, -2), self.fb).transpose(-1, -2) + + return bark_specgram + + +class InverseBarkScale(torch.nn.Module): + r"""Estimate a STFT in normal frequency domain from bark frequency domain. + + .. devices:: CPU CUDA + + It minimizes the euclidian norm between the input bark-spectrogram and the product between + the estimated spectrogram and the filter banks using SGD. + + Args: + n_stft (int): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. + n_barks (int, optional): Number of bark filterbanks. (Default: ``128``) + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``) + max_iter (int, optional): Maximum number of optimization iterations. (Default: ``100000``) + tolerance_loss (float, optional): Value of loss to stop optimization at. (Default: ``1e-5``) + tolerance_change (float, optional): Difference in losses to stop optimization at. (Default: ``1e-8``) + sgdargs (dict or None, optional): Arguments for the SGD optimizer. (Default: ``None``) + bark_scale (str, optional): Scale to use: ``traunmuller``, ``schroeder`` or ``wang``. (Default: ``traunmuller``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> mel_spectrogram_transform = transforms.BarkSpectrogram(sample_rate, n_fft=1024) + >>> mel_spectrogram = bark_spectrogram_transform(waveform) + >>> inverse_barkscale_transform = transforms.InverseBarkScale(n_stft=1024 // 2 + 1) + >>> spectrogram = inverse_barkscale_transform(mel_spectrogram) + """ + __constants__ = [ + "n_stft", + "n_barks", + "sample_rate", + "f_min", + "f_max", + "max_iter", + "tolerance_loss", + "tolerance_change", + "sgdargs", + ] + + def __init__( + self, + n_stft: int, + n_barks: int = 128, + sample_rate: int = 16000, + f_min: float = 0.0, + f_max: Optional[float] = None, + max_iter: int = 100000, + tolerance_loss: float = 1e-5, + tolerance_change: float = 1e-8, + sgdargs: Optional[dict] = None, + bark_scale: str = "traunmuller", + ) -> None: + super(InverseBarkScale, self).__init__() + self.n_barks = n_barks + self.sample_rate = sample_rate + self.f_max = f_max or float(sample_rate // 2) + self.f_min = f_min + self.max_iter = max_iter + self.tolerance_loss = tolerance_loss + self.tolerance_change = tolerance_change + self.sgdargs = sgdargs or {"lr": 0.1, "momentum": 0.9} + + if f_min > self.f_max: + raise ValueError("Require f_min: {} <= f_max: {}".format(f_min, self.f_max)) + + fb = barkscale_fbanks(n_stft, self.f_min, self.f_max, self.n_barks, self.sample_rate, bark_scale) + self.register_buffer("fb", fb) + + def forward(self, barkspec: torch.Tensor) -> torch.Tensor: + r""" + Args: + barkspec (torch.Tensor): A Bark frequency spectrogram of dimension (..., ``n_barks``, time) + + Returns: + torch.Tensor: Linear scale spectrogram of size (..., freq, time) + """ + # pack batch + shape = barkspec.size() + barkspec = barkspec.view(-1, shape[-2], shape[-1]) + + n_barks, time = shape[-2], shape[-1] + freq, _ = self.fb.size() # (freq, n_mels) + barkspec = barkspec.transpose(-1, -2) + if self.n_barks != n_barks: + raise ValueError("Expected an input with {} bark bins. Found: {}".format(self.n_barks, n_barks)) + + specgram = torch.rand( + barkspec.size()[0], time, freq, requires_grad=True, dtype=barkspec.dtype, device=barkspec.device + ) + + optim = torch.optim.SGD([specgram], **self.sgdargs) + + loss = float("inf") + for _ in range(self.max_iter): + optim.zero_grad() + diff = barkspec - specgram.matmul(self.fb) + new_loss = diff.pow(2).sum(axis=-1).mean() + # take sum over bark-frequency then average over other dimensions + # so that loss threshold is applied par unit timeframe + new_loss.backward() + optim.step() + specgram.data = specgram.data.clamp(min=0) + + new_loss = new_loss.item() + if new_loss < self.tolerance_loss or abs(loss - new_loss) < self.tolerance_change: + break + loss = new_loss + + specgram.requires_grad_(False) + specgram = specgram.clamp(min=0).transpose(-1, -2) + + # unpack batch + specgram = specgram.view(shape[:-2] + (freq, time)) + return specgram + + +class BarkSpectrogram(torch.nn.Module): + r"""Create BarkSpectrogram for a raw audio signal. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + This is a composition of :py:func:`torchaudio.transforms.Spectrogram` and + and :py:func:`torchaudio.transforms.BarkScale`. + + Sources + * https://www.fon.hum.uva.nl/praat/manual/BarkSpectrogram.html + * Traunmüller, Hartmut. "Analytical Expressions for the Tonotopic Sensory Scale." Journal of the Acoustical + * Society of America. Vol. 88, Issue 1, 1990, pp. 97–100. + * https://ccrma.stanford.edu/courses/120-fall-2003/lecture-5.html + + Args: + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``None``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + window_fn (Callable[..., torch.Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``) + normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``) + wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + bark_scale (str, optional): Scale to use: ``traunmuller``, ``schroeder`` or ``wang``. (Default: ``traunmuller``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.BarkSpectrogram(sample_rate) + >>> bark_specgram = transform(waveform) # (channel, n_barks, time) + + See also: + :py:func:`torchaudio.functional.melscale_fbanks` - The function used to + generate the filter banks. + """ + __constants__ = ["sample_rate", "n_fft", "win_length", "hop_length", "pad", "n_barks", "f_min"] + + def __init__( + self, + sample_rate: int = 16000, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + f_min: float = 0.0, + f_max: Optional[float] = None, + pad: int = 0, + n_barks: int = 128, + window_fn: Callable[..., torch.Tensor] = torch.hann_window, + power: float = 2.0, + normalized: bool = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + bark_scale: str = "traunmuller", + ) -> None: + super(BarkSpectrogram, self).__init__() + + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + self.pad = pad + self.power = power + self.normalized = normalized + self.n_barks = n_barks # number of bark frequency bins + self.f_max = f_max + self.f_min = f_min + self.spectrogram = Spectrogram( + n_fft=self.n_fft, + win_length=self.win_length, + hop_length=self.hop_length, + pad=self.pad, + window_fn=window_fn, + power=self.power, + normalized=self.normalized, + wkwargs=wkwargs, + center=center, + pad_mode=pad_mode, + onesided=True, + ) + self.bark_scale = BarkScale( + self.n_barks, self.sample_rate, self.f_min, self.f_max, self.n_fft // 2 + 1, bark_scale + ) + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + r""" + Args: + waveform (torch.Tensor): torch.Tensor of audio of dimension (..., time). + + Returns: + torch.Tensor: Bark frequency spectrogram of size (..., ``n_barks``, time). + """ + specgram = self.spectrogram(waveform) + bark_specgram = self.bark_scale(specgram) + return bark_specgram + + +class ChromaScale(torch.nn.Module): + r"""Converts spectrogram to chromagram. + + .. devices:: CPU CUDA + + .. properties:: Autograd + + Args: + sample_rate (int): Sample rate of audio signal. + n_freqs (int): Number of frequency bins in STFT. See ``n_fft`` in :class:`Spectrogram`. + n_chroma (int, optional): Number of chroma. (Default: ``12``) + tuning (float, optional): Tuning deviation from A440 in fractions of a chroma bin. (Default: 0.0) + ctroct (float, optional): Center of Gaussian dominance window to weight filters by, in octaves. (Default: 5.0) + octwidth (float or None, optional): Width of Gaussian dominance window to weight filters by, in octaves. + If ``None``, then disable weighting altogether. (Default: 2.0) + norm (int, optional): order of norm to normalize filter bank by. (Default: 2) + base_c (bool, optional): If True, then start filter bank at C. Otherwise, start at A. (Default: True) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> spectrogram_transform = transforms.Spectrogram(n_fft=1024) + >>> spectrogram = spectrogram_transform(waveform) + >>> chroma_transform = transforms.ChromaScale(sample_rate=sample_rate, n_freqs=1024 // 2 + 1) + >>> chroma_spectrogram = chroma_transform(spectrogram) + + See also: + :py:func:`torchaudio.prototype.functional.chroma_filterbank` — function used to + generate the filter bank. + """ + + def __init__( + self, + sample_rate: int, + n_freqs: int, + *, + n_chroma: int = 12, + tuning: float = 0.0, + ctroct: float = 5.0, + octwidth: Optional[float] = 2.0, + norm: int = 2, + base_c: bool = True, + ): + super().__init__() + fb = chroma_filterbank( + sample_rate, n_freqs, n_chroma, tuning=tuning, ctroct=ctroct, octwidth=octwidth, norm=norm, base_c=base_c + ) + self.register_buffer("fb", fb) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + r""" + Args: + specgram (torch.Tensor): Spectrogram of dimension (..., ``n_freqs``, time). + + Returns: + torch.Tensor: Chroma spectrogram of size (..., ``n_chroma``, time). + """ + return torch.matmul(x.transpose(-1, -2), self.fb).transpose(-1, -2) + + +class ChromaSpectrogram(torch.nn.Module): + r"""Generates chromagram for audio signal. + + .. devices:: CPU CUDA + + .. properties:: Autograd + + Composes :py:func:`torchaudio.transforms.Spectrogram` and + and :py:func:`torchaudio.prototype.transforms.ChromaScale`. + + Args: + sample_rate (int): Sample rate of audio signal. + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + window_fn (Callable[..., torch.Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``) + normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``) + wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + n_chroma (int, optional): Number of chroma. (Default: ``12``) + tuning (float, optional): Tuning deviation from A440 in fractions of a chroma bin. (Default: 0.0) + ctroct (float, optional): Center of Gaussian dominance window to weight filters by, in octaves. (Default: 5.0) + octwidth (float or None, optional): Width of Gaussian dominance window to weight filters by, in octaves. + If ``None``, then disable weighting altogether. (Default: 2.0) + norm (int, optional): order of norm to normalize filter bank by. (Default: 2) + base_c (bool, optional): If True, then start filter bank at C. Otherwise, start at A. (Default: True) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.ChromaSpectrogram(sample_rate=sample_rate, n_fft=400) + >>> chromagram = transform(waveform) # (channel, n_chroma, time) + """ + + def __init__( + self, + sample_rate: int, + n_fft: int, + *, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + pad: int = 0, + window_fn: Callable[..., torch.Tensor] = torch.hann_window, + power: float = 2.0, + normalized: bool = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + n_chroma: int = 12, + tuning: float = 0.0, + ctroct: float = 5.0, + octwidth: Optional[float] = 2.0, + norm: int = 2, + base_c: bool = True, + ): + super().__init__() + self.spectrogram = Spectrogram( + n_fft=n_fft, + win_length=win_length, + hop_length=hop_length, + pad=pad, + window_fn=window_fn, + power=power, + normalized=normalized, + wkwargs=wkwargs, + center=center, + pad_mode=pad_mode, + onesided=True, + ) + self.chroma_scale = ChromaScale( + sample_rate, + n_fft // 2 + 1, + n_chroma=n_chroma, + tuning=tuning, + base_c=base_c, + ctroct=ctroct, + octwidth=octwidth, + norm=norm, + ) + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Chromagram of size (..., ``n_chroma``, time). + """ + spectrogram = self.spectrogram(waveform) + chroma_spectrogram = self.chroma_scale(spectrogram) + return chroma_spectrogram diff --git a/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..93c63cae1ded8a37226a76eec89796eb4902eafc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__init__.py @@ -0,0 +1,10 @@ +from .sox_effects import apply_effects_file, apply_effects_tensor, effect_names, init_sox_effects, shutdown_sox_effects + + +__all__ = [ + "init_sox_effects", + "shutdown_sox_effects", + "effect_names", + "apply_effects_tensor", + "apply_effects_file", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e47972469c2868985315b9f3408d0029ddb375cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__pycache__/sox_effects.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__pycache__/sox_effects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92e197a4e4d601e42db41227a990cdb6401c08d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/__pycache__/sox_effects.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/sox_effects/sox_effects.py b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/sox_effects.py new file mode 100644 index 0000000000000000000000000000000000000000..3d64d465acada0911e86af2f74d2b58dba9352ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/sox_effects/sox_effects.py @@ -0,0 +1,272 @@ +import os +from typing import List, Optional, Tuple + +import torch +import torchaudio +from torchaudio._internal.module_utils import deprecated +from torchaudio.utils.sox_utils import list_effects + + +sox_ext = torchaudio._extension.lazy_import_sox_ext() + + +@deprecated("Please remove the call. This function is called automatically.") +def init_sox_effects(): + """Initialize resources required to use sox effects. + + Note: + You do not need to call this function manually. It is called automatically. + + Once initialized, you do not need to call this function again across the multiple uses of + sox effects though it is safe to do so as long as :func:`shutdown_sox_effects` is not called yet. + Once :func:`shutdown_sox_effects` is called, you can no longer use SoX effects and initializing + again will result in error. + """ + pass + + +@deprecated("Please remove the call. This function is called automatically.") +def shutdown_sox_effects(): + """Clean up resources required to use sox effects. + + Note: + You do not need to call this function manually. It is called automatically. + + It is safe to call this function multiple times. + Once :py:func:`shutdown_sox_effects` is called, you can no longer use SoX effects and + initializing again will result in error. + """ + pass + + +def effect_names() -> List[str]: + """Gets list of valid sox effect names + + Returns: + List[str]: list of available effect names. + + Example + >>> torchaudio.sox_effects.effect_names() + ['allpass', 'band', 'bandpass', ... ] + """ + return list(list_effects().keys()) + + +def apply_effects_tensor( + tensor: torch.Tensor, + sample_rate: int, + effects: List[List[str]], + channels_first: bool = True, +) -> Tuple[torch.Tensor, int]: + """Apply sox effects to given Tensor + + .. devices:: CPU + + .. properties:: TorchScript + + Note: + This function only works on CPU Tensors. + This function works in the way very similar to ``sox`` command, however there are slight + differences. For example, ``sox`` command adds certain effects automatically (such as + ``rate`` effect after ``speed`` and ``pitch`` and other effects), but this function does + only applies the given effects. (Therefore, to actually apply ``speed`` effect, you also + need to give ``rate`` effect with desired sampling rate.). + + Args: + tensor (torch.Tensor): Input 2D CPU Tensor. + sample_rate (int): Sample rate + effects (List[List[str]]): List of effects. + channels_first (bool, optional): Indicates if the input Tensor's dimension is + `[channels, time]` or `[time, channels]` + + Returns: + (Tensor, int): Resulting Tensor and sample rate. + The resulting Tensor has the same ``dtype`` as the input Tensor, and + the same channels order. The shape of the Tensor can be different based on the + effects applied. Sample rate can also be different based on the effects applied. + + Example - Basic usage + >>> + >>> # Defines the effects to apply + >>> effects = [ + ... ['gain', '-n'], # normalises to 0dB + ... ['pitch', '5'], # 5 cent pitch shift + ... ['rate', '8000'], # resample to 8000 Hz + ... ] + >>> + >>> # Generate pseudo wave: + >>> # normalized, channels first, 2ch, sampling rate 16000, 1 second + >>> sample_rate = 16000 + >>> waveform = 2 * torch.rand([2, sample_rate * 1]) - 1 + >>> waveform.shape + torch.Size([2, 16000]) + >>> waveform + tensor([[ 0.3138, 0.7620, -0.9019, ..., -0.7495, -0.4935, 0.5442], + [-0.0832, 0.0061, 0.8233, ..., -0.5176, -0.9140, -0.2434]]) + >>> + >>> # Apply effects + >>> waveform, sample_rate = apply_effects_tensor( + ... wave_form, sample_rate, effects, channels_first=True) + >>> + >>> # Check the result + >>> # The new waveform is sampling rate 8000, 1 second. + >>> # normalization and channel order are preserved + >>> waveform.shape + torch.Size([2, 8000]) + >>> waveform + tensor([[ 0.5054, -0.5518, -0.4800, ..., -0.0076, 0.0096, -0.0110], + [ 0.1331, 0.0436, -0.3783, ..., -0.0035, 0.0012, 0.0008]]) + >>> sample_rate + 8000 + + Example - Torchscript-able transform + >>> + >>> # Use `apply_effects_tensor` in `torch.nn.Module` and dump it to file, + >>> # then run sox effect via Torchscript runtime. + >>> + >>> class SoxEffectTransform(torch.nn.Module): + ... effects: List[List[str]] + ... + ... def __init__(self, effects: List[List[str]]): + ... super().__init__() + ... self.effects = effects + ... + ... def forward(self, tensor: torch.Tensor, sample_rate: int): + ... return sox_effects.apply_effects_tensor( + ... tensor, sample_rate, self.effects) + ... + ... + >>> # Create transform object + >>> effects = [ + ... ["lowpass", "-1", "300"], # apply single-pole lowpass filter + ... ["rate", "8000"], # change sample rate to 8000 + ... ] + >>> transform = SoxEffectTensorTransform(effects, input_sample_rate) + >>> + >>> # Dump it to file and load + >>> path = 'sox_effect.zip' + >>> torch.jit.script(trans).save(path) + >>> transform = torch.jit.load(path) + >>> + >>>> # Run transform + >>> waveform, input_sample_rate = torchaudio.load("input.wav") + >>> waveform, sample_rate = transform(waveform, input_sample_rate) + >>> assert sample_rate == 8000 + """ + return sox_ext.apply_effects_tensor(tensor, sample_rate, effects, channels_first) + + +def apply_effects_file( + path: str, + effects: List[List[str]], + normalize: bool = True, + channels_first: bool = True, + format: Optional[str] = None, +) -> Tuple[torch.Tensor, int]: + """Apply sox effects to the audio file and load the resulting data as Tensor + + .. devices:: CPU + + .. properties:: TorchScript + + Note: + This function works in the way very similar to ``sox`` command, however there are slight + differences. For example, ``sox`` commnad adds certain effects automatically (such as + ``rate`` effect after ``speed``, ``pitch`` etc), but this function only applies the given + effects. Therefore, to actually apply ``speed`` effect, you also need to give ``rate`` + effect with desired sampling rate, because internally, ``speed`` effects only alter sampling + rate and leave samples untouched. + + Args: + path (path-like object): + Source of audio data. + effects (List[List[str]]): List of effects. + normalize (bool, optional): + When ``True``, this function converts the native sample type to ``float32``. + Default: ``True``. + + If input file is integer WAV, giving ``False`` will change the resulting Tensor type to + integer type. + This argument has no effect for formats other than integer WAV type. + + channels_first (bool, optional): When True, the returned Tensor has dimension `[channel, time]`. + Otherwise, the returned Tensor's dimension is `[time, channel]`. + format (str or None, optional): + Override the format detection with the given format. + Providing the argument might help when libsox can not infer the format + from header or extension, + + Returns: + (Tensor, int): Resulting Tensor and sample rate. + If ``normalize=True``, the resulting Tensor is always ``float32`` type. + If ``normalize=False`` and the input audio file is of integer WAV file, then the + resulting Tensor has corresponding integer type. (Note 24 bit integer type is not supported) + If ``channels_first=True``, the resulting Tensor has dimension `[channel, time]`, + otherwise `[time, channel]`. + + Example - Basic usage + >>> + >>> # Defines the effects to apply + >>> effects = [ + ... ['gain', '-n'], # normalises to 0dB + ... ['pitch', '5'], # 5 cent pitch shift + ... ['rate', '8000'], # resample to 8000 Hz + ... ] + >>> + >>> # Apply effects and load data with channels_first=True + >>> waveform, sample_rate = apply_effects_file("data.wav", effects, channels_first=True) + >>> + >>> # Check the result + >>> waveform.shape + torch.Size([2, 8000]) + >>> waveform + tensor([[ 5.1151e-03, 1.8073e-02, 2.2188e-02, ..., 1.0431e-07, + -1.4761e-07, 1.8114e-07], + [-2.6924e-03, 2.1860e-03, 1.0650e-02, ..., 6.4122e-07, + -5.6159e-07, 4.8103e-07]]) + >>> sample_rate + 8000 + + Example - Apply random speed perturbation to dataset + >>> + >>> # Load data from file, apply random speed perturbation + >>> class RandomPerturbationFile(torch.utils.data.Dataset): + ... \"\"\"Given flist, apply random speed perturbation + ... + ... Suppose all the input files are at least one second long. + ... \"\"\" + ... def __init__(self, flist: List[str], sample_rate: int): + ... super().__init__() + ... self.flist = flist + ... self.sample_rate = sample_rate + ... + ... def __getitem__(self, index): + ... speed = 0.5 + 1.5 * random.randn() + ... effects = [ + ... ['gain', '-n', '-10'], # apply 10 db attenuation + ... ['remix', '-'], # merge all the channels + ... ['speed', f'{speed:.5f}'], # duration is now 0.5 ~ 2.0 seconds. + ... ['rate', f'{self.sample_rate}'], + ... ['pad', '0', '1.5'], # add 1.5 seconds silence at the end + ... ['trim', '0', '2'], # get the first 2 seconds + ... ] + ... waveform, _ = torchaudio.sox_effects.apply_effects_file( + ... self.flist[index], effects) + ... return waveform + ... + ... def __len__(self): + ... return len(self.flist) + ... + >>> dataset = RandomPerturbationFile(file_list, sample_rate=8000) + >>> loader = torch.utils.data.DataLoader(dataset, batch_size=32) + >>> for batch in loader: + >>> pass + """ + if not torch.jit.is_scripting(): + if hasattr(path, "read"): + raise RuntimeError( + "apply_effects_file function does not support file-like object. " + "Please use torchaudio.io.AudioEffector." + ) + path = os.fspath(path) + return sox_ext.apply_effects_file(path, effects, normalize, channels_first, format) diff --git a/venv/lib/python3.10/site-packages/torchaudio/transforms/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1fe77865a9740ab6c88465dbe4a5a41bc445a688 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/transforms/__init__.py @@ -0,0 +1,75 @@ +from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR +from ._transforms import ( + AddNoise, + AmplitudeToDB, + ComputeDeltas, + Convolve, + Deemphasis, + Fade, + FFTConvolve, + FrequencyMasking, + GriffinLim, + InverseMelScale, + InverseSpectrogram, + LFCC, + Loudness, + MelScale, + MelSpectrogram, + MFCC, + MuLawDecoding, + MuLawEncoding, + PitchShift, + Preemphasis, + Resample, + RNNTLoss, + SlidingWindowCmn, + SpecAugment, + SpectralCentroid, + Spectrogram, + Speed, + SpeedPerturbation, + TimeMasking, + TimeStretch, + Vad, + Vol, +) + + +__all__ = [ + "AddNoise", + "AmplitudeToDB", + "ComputeDeltas", + "Convolve", + "Deemphasis", + "Fade", + "FFTConvolve", + "FrequencyMasking", + "GriffinLim", + "InverseMelScale", + "InverseSpectrogram", + "LFCC", + "Loudness", + "MFCC", + "MVDR", + "MelScale", + "MelSpectrogram", + "MuLawDecoding", + "MuLawEncoding", + "PSD", + "PitchShift", + "Preemphasis", + "RNNTLoss", + "RTFMVDR", + "Resample", + "SlidingWindowCmn", + "SoudenMVDR", + "SpecAugment", + "SpectralCentroid", + "Spectrogram", + "Speed", + "SpeedPerturbation", + "TimeMasking", + "TimeStretch", + "Vad", + "Vol", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7004abf76f5a12a1c5cb28d24172d1c465bf2cf9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/_multi_channel.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/_multi_channel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df9523722593d7a0f47562fb3683cc49b697135b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/_multi_channel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/_transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/_transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1039fe887740c62efa0f86508ada641bc0d3425 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/transforms/__pycache__/_transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/transforms/_multi_channel.py b/venv/lib/python3.10/site-packages/torchaudio/transforms/_multi_channel.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba3db7f454058de7c0fda1d57781ed346d7a65c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/transforms/_multi_channel.py @@ -0,0 +1,467 @@ +# -*- coding: utf-8 -*- + +import warnings +from typing import Optional, Union + +import torch +from torch import Tensor +from torchaudio import functional as F + + +__all__ = [] + + +def _get_mvdr_vector( + psd_s: torch.Tensor, + psd_n: torch.Tensor, + reference_vector: torch.Tensor, + solution: str = "ref_channel", + diagonal_loading: bool = True, + diag_eps: float = 1e-7, + eps: float = 1e-8, +) -> torch.Tensor: + r"""Compute the MVDR beamforming weights with ``solution`` argument. + + Args: + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + reference_vector (torch.Tensor): one-hot reference channel matrix. + solution (str, optional): Solution to compute the MVDR beamforming weights. + Options: [``ref_channel``, ``stv_evd``, ``stv_power``]. (Default: ``ref_channel``) + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + eps (float, optional): Value to add to the denominator in the beamforming weight formula. + (Default: ``1e-8``) + + Returns: + torch.Tensor: the mvdr beamforming weight matrix + """ + if solution == "ref_channel": + beamform_vector = F.mvdr_weights_souden(psd_s, psd_n, reference_vector, diagonal_loading, diag_eps, eps) + else: + if solution == "stv_evd": + stv = F.rtf_evd(psd_s) + else: + stv = F.rtf_power(psd_s, psd_n, reference_vector, diagonal_loading=diagonal_loading, diag_eps=diag_eps) + beamform_vector = F.mvdr_weights_rtf(stv, psd_n, reference_vector, diagonal_loading, diag_eps, eps) + + return beamform_vector + + +class PSD(torch.nn.Module): + r"""Compute cross-channel power spectral density (PSD) matrix. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + multi_mask (bool, optional): If ``True``, only accepts multi-channel Time-Frequency masks. (Default: ``False``) + normalize (bool, optional): If ``True``, normalize the mask along the time dimension. (Default: ``True``) + eps (float, optional): Value to add to the denominator in mask normalization. (Default: ``1e-15``) + """ + + def __init__(self, multi_mask: bool = False, normalize: bool = True, eps: float = 1e-15): + super().__init__() + self.multi_mask = multi_mask + self.normalize = normalize + self.eps = eps + + def forward(self, specgram: torch.Tensor, mask: Optional[torch.Tensor] = None): + """ + Args: + specgram (torch.Tensor): Multi-channel complex-valued spectrum. + Tensor with dimensions `(..., channel, freq, time)`. + mask (torch.Tensor or None, optional): Time-Frequency mask for normalization. + Tensor with dimensions `(..., freq, time)` if multi_mask is ``False`` or + with dimensions `(..., channel, freq, time)` if multi_mask is ``True``. + (Default: ``None``) + + Returns: + torch.Tensor: The complex-valued PSD matrix of the input spectrum. + Tensor with dimensions `(..., freq, channel, channel)` + """ + if mask is not None: + if self.multi_mask: + # Averaging mask along channel dimension + mask = mask.mean(dim=-3) # (..., freq, time) + psd = F.psd(specgram, mask, self.normalize, self.eps) + + return psd + + +class MVDR(torch.nn.Module): + """Minimum Variance Distortionless Response (MVDR) module that performs MVDR beamforming with Time-Frequency masks. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Based on https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/beamformer.py + + We provide three solutions of MVDR beamforming. One is based on *reference channel selection* + :cite:`souden2009optimal` (``solution=ref_channel``). + + .. math:: + \\textbf{w}_{\\text{MVDR}}(f) =\ + \\frac{{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bf{\\Phi}_{\\textbf{SS}}}}(f)}\ + {\\text{Trace}({{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f) \\bf{\\Phi}_{\\textbf{SS}}}(f))}}\\bm{u} + + where :math:`\\bf{\\Phi}_{\\textbf{SS}}` and :math:`\\bf{\\Phi}_{\\textbf{NN}}` are the covariance\ + matrices of speech and noise, respectively. :math:`\\bf{u}` is an one-hot vector to determine the\ + reference channel. + + The other two solutions are based on the steering vector (``solution=stv_evd`` or ``solution=stv_power``). + + .. math:: + \\textbf{w}_{\\text{MVDR}}(f) =\ + \\frac{{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bm{v}}(f)}}\ + {{\\bm{v}^{\\mathsf{H}}}(f){\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bm{v}}(f)} + + where :math:`\\bm{v}` is the acoustic transfer function or the steering vector.\ + :math:`.^{\\mathsf{H}}` denotes the Hermitian Conjugate operation. + + We apply either *eigenvalue decomposition* + :cite:`higuchi2016robust` or the *power method* :cite:`mises1929praktische` to get the + steering vector from the PSD matrix of speech. + + After estimating the beamforming weight, the enhanced Short-time Fourier Transform (STFT) is obtained by + + .. math:: + \\hat{\\bf{S}} = {\\bf{w}^\\mathsf{H}}{\\bf{Y}}, {\\bf{w}} \\in \\mathbb{C}^{M \\times F} + + where :math:`\\bf{Y}` and :math:`\\hat{\\bf{S}}` are the STFT of the multi-channel noisy speech and\ + the single-channel enhanced speech, respectively. + + For online streaming audio, we provide a *recursive method* :cite:`higuchi2017online` to update the + PSD matrices of speech and noise, respectively. + + Args: + ref_channel (int, optional): Reference channel for beamforming. (Default: ``0``) + solution (str, optional): Solution to compute the MVDR beamforming weights. + Options: [``ref_channel``, ``stv_evd``, ``stv_power``]. (Default: ``ref_channel``) + multi_mask (bool, optional): If ``True``, only accepts multi-channel Time-Frequency masks. (Default: ``False``) + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to the covariance matrix + of the noise. (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + online (bool, optional): If ``True``, updates the MVDR beamforming weights based on + the previous covarience matrices. (Default: ``False``) + + Note: + To improve the numerical stability, the input spectrogram will be converted to double precision + (``torch.complex128`` or ``torch.cdouble``) dtype for internal computation. The output spectrogram + is converted to the dtype of the input spectrogram to be compatible with other modules. + + Note: + If you use ``stv_evd`` solution, the gradient of the same input may not be identical if the + eigenvalues of the PSD matrix are not distinct (i.e. some eigenvalues are close or identical). + """ + + def __init__( + self, + ref_channel: int = 0, + solution: str = "ref_channel", + multi_mask: bool = False, + diag_loading: bool = True, + diag_eps: float = 1e-7, + online: bool = False, + ): + super().__init__() + if solution not in [ + "ref_channel", + "stv_evd", + "stv_power", + ]: + raise ValueError( + '`solution` must be one of ["ref_channel", "stv_evd", "stv_power"]. Given {}'.format(solution) + ) + self.ref_channel = ref_channel + self.solution = solution + self.multi_mask = multi_mask + self.diag_loading = diag_loading + self.diag_eps = diag_eps + self.online = online + self.psd = PSD(multi_mask) + + psd_s: torch.Tensor = torch.zeros(1) + psd_n: torch.Tensor = torch.zeros(1) + mask_sum_s: torch.Tensor = torch.zeros(1) + mask_sum_n: torch.Tensor = torch.zeros(1) + self.register_buffer("psd_s", psd_s) + self.register_buffer("psd_n", psd_n) + self.register_buffer("mask_sum_s", mask_sum_s) + self.register_buffer("mask_sum_n", mask_sum_n) + + def _get_updated_mvdr_vector( + self, + psd_s: torch.Tensor, + psd_n: torch.Tensor, + mask_s: torch.Tensor, + mask_n: torch.Tensor, + reference_vector: torch.Tensor, + solution: str = "ref_channel", + diagonal_loading: bool = True, + diag_eps: float = 1e-7, + eps: float = 1e-8, + ) -> torch.Tensor: + r"""Recursively update the MVDR beamforming vector. + + Args: + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + mask_s (torch.Tensor): Time-Frequency mask of the target speech. + Tensor with dimensions `(..., freq, time)` if multi_mask is ``False`` + or with dimensions `(..., channel, freq, time)` if multi_mask is ``True``. + mask_n (torch.Tensor or None, optional): Time-Frequency mask of the noise. + Tensor with dimensions `(..., freq, time)` if multi_mask is ``False`` + or with dimensions `(..., channel, freq, time)` if multi_mask is ``True``. + reference_vector (torch.Tensor): One-hot reference channel matrix. + solution (str, optional): Solution to compute the MVDR beamforming weights. + Options: [``ref_channel``, ``stv_evd``, ``stv_power``]. (Default: ``ref_channel``) + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + eps (float, optional): Value to add to the denominator in the beamforming weight formula. + (Default: ``1e-8``) + + Returns: + torch.Tensor: The MVDR beamforming weight matrix. + """ + if self.multi_mask: + # Averaging mask along channel dimension + mask_s = mask_s.mean(dim=-3) # (..., freq, time) + mask_n = mask_n.mean(dim=-3) # (..., freq, time) + if self.psd_s.ndim == 1: + self.psd_s = psd_s + self.psd_n = psd_n + self.mask_sum_s = mask_s.sum(dim=-1) + self.mask_sum_n = mask_n.sum(dim=-1) + return _get_mvdr_vector(psd_s, psd_n, reference_vector, solution, diagonal_loading, diag_eps, eps) + else: + psd_s = self._get_updated_psd_speech(psd_s, mask_s) + psd_n = self._get_updated_psd_noise(psd_n, mask_n) + self.psd_s = psd_s + self.psd_n = psd_n + self.mask_sum_s = self.mask_sum_s + mask_s.sum(dim=-1) + self.mask_sum_n = self.mask_sum_n + mask_n.sum(dim=-1) + return _get_mvdr_vector(psd_s, psd_n, reference_vector, solution, diagonal_loading, diag_eps, eps) + + def _get_updated_psd_speech(self, psd_s: torch.Tensor, mask_s: torch.Tensor) -> torch.Tensor: + r"""Update psd of speech recursively. + + Args: + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + mask_s (torch.Tensor): Time-Frequency mask of the target speech. + Tensor with dimensions `(..., freq, time)`. + + Returns: + torch.Tensor: The updated PSD matrix of target speech. + """ + numerator = self.mask_sum_s / (self.mask_sum_s + mask_s.sum(dim=-1)) + denominator = 1 / (self.mask_sum_s + mask_s.sum(dim=-1)) + psd_s = self.psd_s * numerator[..., None, None] + psd_s * denominator[..., None, None] + return psd_s + + def _get_updated_psd_noise(self, psd_n: torch.Tensor, mask_n: torch.Tensor) -> torch.Tensor: + r"""Update psd of noise recursively. + + Args: + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + mask_n (torch.Tensor or None, optional): Time-Frequency mask of the noise. + Tensor with dimensions `(..., freq, time)`. + + Returns: + torch.Tensor: The updated PSD matrix of noise. + """ + numerator = self.mask_sum_n / (self.mask_sum_n + mask_n.sum(dim=-1)) + denominator = 1 / (self.mask_sum_n + mask_n.sum(dim=-1)) + psd_n = self.psd_n * numerator[..., None, None] + psd_n * denominator[..., None, None] + return psd_n + + def forward( + self, specgram: torch.Tensor, mask_s: torch.Tensor, mask_n: Optional[torch.Tensor] = None + ) -> torch.Tensor: + """Perform MVDR beamforming. + + Args: + specgram (torch.Tensor): Multi-channel complex-valued spectrum. + Tensor with dimensions `(..., channel, freq, time)` + mask_s (torch.Tensor): Time-Frequency mask of target speech. + Tensor with dimensions `(..., freq, time)` if multi_mask is ``False`` + or with dimensions `(..., channel, freq, time)` if multi_mask is ``True``. + mask_n (torch.Tensor or None, optional): Time-Frequency mask of noise. + Tensor with dimensions `(..., freq, time)` if multi_mask is ``False`` + or with dimensions `(..., channel, freq, time)` if multi_mask is ``True``. + (Default: None) + + Returns: + torch.Tensor: Single-channel complex-valued enhanced spectrum with dimensions `(..., freq, time)`. + """ + dtype = specgram.dtype + if specgram.ndim < 3: + raise ValueError(f"Expected at least 3D tensor (..., channel, freq, time). Found: {specgram.shape}") + if not specgram.is_complex(): + raise ValueError( + f"The type of ``specgram`` tensor must be ``torch.cfloat`` or ``torch.cdouble``.\ + Found: {specgram.dtype}" + ) + if specgram.dtype == torch.cfloat: + specgram = specgram.cdouble() # Convert specgram to ``torch.cdouble``. + + if mask_n is None: + warnings.warn("``mask_n`` is not provided, use ``1 - mask_s`` as ``mask_n``.") + mask_n = 1 - mask_s + + psd_s = self.psd(specgram, mask_s) # (..., freq, time, channel, channel) + psd_n = self.psd(specgram, mask_n) # (..., freq, time, channel, channel) + + u = torch.zeros(specgram.size()[:-2], device=specgram.device, dtype=torch.cdouble) # (..., channel) + u[..., self.ref_channel].fill_(1) + + if self.online: + w_mvdr = self._get_updated_mvdr_vector( + psd_s, psd_n, mask_s, mask_n, u, self.solution, self.diag_loading, self.diag_eps + ) + else: + w_mvdr = _get_mvdr_vector(psd_s, psd_n, u, self.solution, self.diag_loading, self.diag_eps) + + specgram_enhanced = F.apply_beamforming(w_mvdr, specgram) + + return specgram_enhanced.to(dtype) + + +class RTFMVDR(torch.nn.Module): + r"""Minimum Variance Distortionless Response (*MVDR* :cite:`capon1969high`) module + based on the relative transfer function (RTF) and power spectral density (PSD) matrix of noise. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Given the multi-channel complex-valued spectrum :math:`\textbf{Y}`, the relative transfer function (RTF) matrix + or the steering vector of target speech :math:`\bm{v}`, the PSD matrix of noise :math:`\bf{\Phi}_{\textbf{NN}}`, and + a one-hot vector that represents the reference channel :math:`\bf{u}`, the module computes the single-channel + complex-valued spectrum of the enhanced speech :math:`\hat{\textbf{S}}`. The formula is defined as: + + .. math:: + \hat{\textbf{S}}(f) = \textbf{w}_{\text{bf}}(f)^{\mathsf{H}} \textbf{Y}(f) + + where :math:`\textbf{w}_{\text{bf}}(f)` is the MVDR beamforming weight for the :math:`f`-th frequency bin, + :math:`(.)^{\mathsf{H}}` denotes the Hermitian Conjugate operation. + + The beamforming weight is computed by: + + .. math:: + \textbf{w}_{\text{MVDR}}(f) = + \frac{{{\bf{\Phi}_{\textbf{NN}}^{-1}}(f){\bm{v}}(f)}} + {{\bm{v}^{\mathsf{H}}}(f){\bf{\Phi}_{\textbf{NN}}^{-1}}(f){\bm{v}}(f)} + """ + + def forward( + self, + specgram: Tensor, + rtf: Tensor, + psd_n: Tensor, + reference_channel: Union[int, Tensor], + diagonal_loading: bool = True, + diag_eps: float = 1e-7, + eps: float = 1e-8, + ) -> Tensor: + """ + Args: + specgram (torch.Tensor): Multi-channel complex-valued spectrum. + Tensor with dimensions `(..., channel, freq, time)` + rtf (torch.Tensor): The complex-valued RTF vector of target speech. + Tensor with dimensions `(..., freq, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + reference_channel (int or torch.Tensor): Specifies the reference channel. + If the dtype is ``int``, it represents the reference channel index. + If the dtype is ``torch.Tensor``, its shape is `(..., channel)`, where the ``channel`` dimension + is one-hot. + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + eps (float, optional): Value to add to the denominator in the beamforming weight formula. + (Default: ``1e-8``) + + Returns: + torch.Tensor: Single-channel complex-valued enhanced spectrum with dimensions `(..., freq, time)`. + """ + w_mvdr = F.mvdr_weights_rtf(rtf, psd_n, reference_channel, diagonal_loading, diag_eps, eps) + spectrum_enhanced = F.apply_beamforming(w_mvdr, specgram) + return spectrum_enhanced + + +class SoudenMVDR(torch.nn.Module): + r"""Minimum Variance Distortionless Response (*MVDR* :cite:`capon1969high`) module + based on the method proposed by *Souden et, al.* :cite:`souden2009optimal`. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Given the multi-channel complex-valued spectrum :math:`\textbf{Y}`, the power spectral density (PSD) matrix + of target speech :math:`\bf{\Phi}_{\textbf{SS}}`, the PSD matrix of noise :math:`\bf{\Phi}_{\textbf{NN}}`, and + a one-hot vector that represents the reference channel :math:`\bf{u}`, the module computes the single-channel + complex-valued spectrum of the enhanced speech :math:`\hat{\textbf{S}}`. The formula is defined as: + + .. math:: + \hat{\textbf{S}}(f) = \textbf{w}_{\text{bf}}(f)^{\mathsf{H}} \textbf{Y}(f) + + where :math:`\textbf{w}_{\text{bf}}(f)` is the MVDR beamforming weight for the :math:`f`-th frequency bin. + + The beamforming weight is computed by: + + .. math:: + \textbf{w}_{\text{MVDR}}(f) = + \frac{{{\bf{\Phi}_{\textbf{NN}}^{-1}}(f){\bf{\Phi}_{\textbf{SS}}}}(f)} + {\text{Trace}({{{\bf{\Phi}_{\textbf{NN}}^{-1}}(f) \bf{\Phi}_{\textbf{SS}}}(f))}}\bm{u} + """ + + def forward( + self, + specgram: Tensor, + psd_s: Tensor, + psd_n: Tensor, + reference_channel: Union[int, Tensor], + diagonal_loading: bool = True, + diag_eps: float = 1e-7, + eps: float = 1e-8, + ) -> torch.Tensor: + """ + Args: + specgram (torch.Tensor): Multi-channel complex-valued spectrum. + Tensor with dimensions `(..., channel, freq, time)`. + psd_s (torch.Tensor): The complex-valued power spectral density (PSD) matrix of target speech. + Tensor with dimensions `(..., freq, channel, channel)`. + psd_n (torch.Tensor): The complex-valued power spectral density (PSD) matrix of noise. + Tensor with dimensions `(..., freq, channel, channel)`. + reference_channel (int or torch.Tensor): Specifies the reference channel. + If the dtype is ``int``, it represents the reference channel index. + If the dtype is ``torch.Tensor``, its shape is `(..., channel)`, where the ``channel`` dimension + is one-hot. + diagonal_loading (bool, optional): If ``True``, enables applying diagonal loading to ``psd_n``. + (Default: ``True``) + diag_eps (float, optional): The coefficient multiplied to the identity matrix for diagonal loading. + It is only effective when ``diagonal_loading`` is set to ``True``. (Default: ``1e-7``) + eps (float, optional): Value to add to the denominator in the beamforming weight formula. + (Default: ``1e-8``) + + Returns: + torch.Tensor: Single-channel complex-valued enhanced spectrum with dimensions `(..., freq, time)`. + """ + w_mvdr = F.mvdr_weights_souden(psd_s, psd_n, reference_channel, diagonal_loading, diag_eps, eps) + spectrum_enhanced = F.apply_beamforming(w_mvdr, specgram) + return spectrum_enhanced diff --git a/venv/lib/python3.10/site-packages/torchaudio/transforms/_transforms.py b/venv/lib/python3.10/site-packages/torchaudio/transforms/_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..802cbd3d7783d6117fa2d5361d55db567aba43d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/transforms/_transforms.py @@ -0,0 +1,2137 @@ +# -*- coding: utf-8 -*- + +import math +import warnings +from typing import Callable, Optional, Sequence, Tuple, Union + +import torch +from torch import Tensor +from torch.nn.modules.lazy import LazyModuleMixin +from torch.nn.parameter import UninitializedParameter + +from torchaudio import functional as F +from torchaudio.functional.functional import ( + _apply_sinc_resample_kernel, + _check_convolve_mode, + _fix_waveform_shape, + _get_sinc_resample_kernel, + _stretch_waveform, +) + +__all__ = [] + + +class Spectrogram(torch.nn.Module): + r"""Create a spectrogram from a audio signal. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float or None, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for magnitude, 2 for power, etc. + If None, then the complex spectrum is returned instead. (Default: ``2``) + normalized (bool or str, optional): Whether to normalize by magnitude after stft. If input is str, choices are + ``"window"`` and ``"frame_length"``, if specific normalization type is desirable. ``True`` maps to + ``"window"``. (Default: ``False``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + onesided (bool, optional): controls whether to return half of results to + avoid redundancy (Default: ``True``) + return_complex (bool, optional): + Deprecated and not used. + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = torchaudio.transforms.Spectrogram(n_fft=800) + >>> spectrogram = transform(waveform) + + """ + __constants__ = ["n_fft", "win_length", "hop_length", "pad", "power", "normalized"] + + def __init__( + self, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + pad: int = 0, + window_fn: Callable[..., Tensor] = torch.hann_window, + power: Optional[float] = 2.0, + normalized: Union[bool, str] = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + return_complex: Optional[bool] = None, + ) -> None: + super(Spectrogram, self).__init__() + torch._C._log_api_usage_once("torchaudio.transforms.Spectrogram") + self.n_fft = n_fft + # number of FFT bins. the returned STFT result will have n_fft // 2 + 1 + # number of frequencies due to onesided=True in torch.stft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer("window", window) + self.pad = pad + self.power = power + self.normalized = normalized + self.center = center + self.pad_mode = pad_mode + self.onesided = onesided + if return_complex is not None: + warnings.warn( + "`return_complex` argument is now deprecated and is not effective." + "`torchaudio.transforms.Spectrogram(power=None)` always returns a tensor with " + "complex dtype. Please remove the argument in the function call." + ) + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Dimension (..., freq, time), where freq is + ``n_fft // 2 + 1`` where ``n_fft`` is the number of + Fourier bins, and time is the number of window hops (n_frame). + """ + return F.spectrogram( + waveform, + self.pad, + self.window, + self.n_fft, + self.hop_length, + self.win_length, + self.power, + self.normalized, + self.center, + self.pad_mode, + self.onesided, + ) + + +class InverseSpectrogram(torch.nn.Module): + r"""Create an inverse spectrogram to recover an audio signal from a spectrogram. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + normalized (bool or str, optional): Whether the stft output was normalized by magnitude. If input is str, + choices are ``"window"`` and ``"frame_length"``, dependent on normalization mode. ``True`` maps to + ``"window"``. (Default: ``False``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether the signal in spectrogram was padded on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + onesided (bool, optional): controls whether spectrogram was used to return half of results to + avoid redundancy (Default: ``True``) + + Example + >>> batch, freq, time = 2, 257, 100 + >>> length = 25344 + >>> spectrogram = torch.randn(batch, freq, time, dtype=torch.cdouble) + >>> transform = transforms.InverseSpectrogram(n_fft=512) + >>> waveform = transform(spectrogram, length) + """ + __constants__ = ["n_fft", "win_length", "hop_length", "pad", "power", "normalized"] + + def __init__( + self, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + pad: int = 0, + window_fn: Callable[..., Tensor] = torch.hann_window, + normalized: Union[bool, str] = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + onesided: bool = True, + ) -> None: + super(InverseSpectrogram, self).__init__() + self.n_fft = n_fft + # number of FFT bins. the returned STFT result will have n_fft // 2 + 1 + # number of frequencies due to onesided=True in torch.stft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer("window", window) + self.pad = pad + self.normalized = normalized + self.center = center + self.pad_mode = pad_mode + self.onesided = onesided + + def forward(self, spectrogram: Tensor, length: Optional[int] = None) -> Tensor: + r""" + Args: + spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time). + length (int or None, optional): The output length of the waveform. + + Returns: + Tensor: Dimension (..., time), Least squares estimation of the original signal. + """ + return F.inverse_spectrogram( + spectrogram, + length, + self.pad, + self.window, + self.n_fft, + self.hop_length, + self.win_length, + self.normalized, + self.center, + self.pad_mode, + self.onesided, + ) + + +class GriffinLim(torch.nn.Module): + r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Implementation ported from + *librosa* :cite:`brian_mcfee-proc-scipy-2015`, *A fast Griffin-Lim algorithm* :cite:`6701851` + and *Signal estimation from modified short-time Fourier transform* :cite:`1172092`. + + Args: + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + n_iter (int, optional): Number of iteration for phase recovery process. (Default: ``32``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for magnitude, 2 for power, etc. (Default: ``2``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + momentum (float, optional): The momentum parameter for fast Griffin-Lim. + Setting this to 0 recovers the original Griffin-Lim method. + Values near 1 can lead to faster convergence, but above 1 may not converge. (Default: ``0.99``) + length (int, optional): Array length of the expected output. (Default: ``None``) + rand_init (bool, optional): Initializes phase randomly if True and to zero otherwise. (Default: ``True``) + + Example + >>> batch, freq, time = 2, 257, 100 + >>> spectrogram = torch.randn(batch, freq, time) + >>> transform = transforms.GriffinLim(n_fft=512) + >>> waveform = transform(spectrogram) + """ + __constants__ = ["n_fft", "n_iter", "win_length", "hop_length", "power", "length", "momentum", "rand_init"] + + def __init__( + self, + n_fft: int = 400, + n_iter: int = 32, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + window_fn: Callable[..., Tensor] = torch.hann_window, + power: float = 2.0, + wkwargs: Optional[dict] = None, + momentum: float = 0.99, + length: Optional[int] = None, + rand_init: bool = True, + ) -> None: + super(GriffinLim, self).__init__() + + if not (0 <= momentum < 1): + raise ValueError("momentum must be in the range [0, 1). Found: {}".format(momentum)) + + self.n_fft = n_fft + self.n_iter = n_iter + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer("window", window) + self.length = length + self.power = power + self.momentum = momentum + self.rand_init = rand_init + + def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): + A magnitude-only STFT spectrogram of dimension (..., freq, frames) + where freq is ``n_fft // 2 + 1``. + + Returns: + Tensor: waveform of (..., time), where time equals the ``length`` parameter if given. + """ + return F.griffinlim( + specgram, + self.window, + self.n_fft, + self.hop_length, + self.win_length, + self.power, + self.n_iter, + self.momentum, + self.length, + self.rand_init, + ) + + +class AmplitudeToDB(torch.nn.Module): + r"""Turn a tensor from the power/amplitude scale to the decibel scale. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + This output depends on the maximum value in the input tensor, and so + may return different values for an audio clip split into snippets vs. a + a full clip. + + Args: + stype (str, optional): scale of input tensor (``"power"`` or ``"magnitude"``). The + power being the elementwise square of the magnitude. (Default: ``"power"``) + top_db (float or None, optional): minimum negative cut-off in decibels. A reasonable + number is 80. (Default: ``None``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.AmplitudeToDB(stype="amplitude", top_db=80) + >>> waveform_db = transform(waveform) + """ + __constants__ = ["multiplier", "amin", "ref_value", "db_multiplier"] + + def __init__(self, stype: str = "power", top_db: Optional[float] = None) -> None: + super(AmplitudeToDB, self).__init__() + self.stype = stype + if top_db is not None and top_db < 0: + raise ValueError("top_db must be positive value") + self.top_db = top_db + self.multiplier = 10.0 if stype == "power" else 20.0 + self.amin = 1e-10 + self.ref_value = 1.0 + self.db_multiplier = math.log10(max(self.amin, self.ref_value)) + + def forward(self, x: Tensor) -> Tensor: + r"""Numerically stable implementation from Librosa. + + https://librosa.org/doc/latest/generated/librosa.amplitude_to_db.html + + Args: + x (Tensor): Input tensor before being converted to decibel scale. + + Returns: + Tensor: Output tensor in decibel scale. + """ + return F.amplitude_to_DB(x, self.multiplier, self.amin, self.db_multiplier, self.top_db) + + +class MelScale(torch.nn.Module): + r"""Turn a normal STFT into a mel frequency STFT with triangular filter banks. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``) + n_stft (int, optional): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. (Default: ``201``) + norm (str or None, optional): If ``"slaney"``, divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> spectrogram_transform = transforms.Spectrogram(n_fft=1024) + >>> spectrogram = spectrogram_transform(waveform) + >>> melscale_transform = transforms.MelScale(sample_rate=sample_rate, n_stft=1024 // 2 + 1) + >>> melscale_spectrogram = melscale_transform(spectrogram) + + See also: + :py:func:`torchaudio.functional.melscale_fbanks` - The function used to + generate the filter banks. + """ + __constants__ = ["n_mels", "sample_rate", "f_min", "f_max"] + + def __init__( + self, + n_mels: int = 128, + sample_rate: int = 16000, + f_min: float = 0.0, + f_max: Optional[float] = None, + n_stft: int = 201, + norm: Optional[str] = None, + mel_scale: str = "htk", + ) -> None: + super(MelScale, self).__init__() + self.n_mels = n_mels + self.sample_rate = sample_rate + self.f_max = f_max if f_max is not None else float(sample_rate // 2) + self.f_min = f_min + self.norm = norm + self.mel_scale = mel_scale + + if f_min > self.f_max: + raise ValueError("Require f_min: {} <= f_max: {}".format(f_min, self.f_max)) + + fb = F.melscale_fbanks(n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, self.norm, self.mel_scale) + self.register_buffer("fb", fb) + + def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): A spectrogram STFT of dimension (..., freq, time). + + Returns: + Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time). + """ + + # (..., time, freq) dot (freq, n_mels) -> (..., n_mels, time) + mel_specgram = torch.matmul(specgram.transpose(-1, -2), self.fb).transpose(-1, -2) + + return mel_specgram + + +class InverseMelScale(torch.nn.Module): + r"""Estimate a STFT in normal frequency domain from mel frequency domain. + + .. devices:: CPU CUDA + + It minimizes the euclidian norm between the input mel-spectrogram and the product between + the estimated spectrogram and the filter banks using `torch.linalg.lstsq`. + + Args: + n_stft (int): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``) + norm (str or None, optional): If "slaney", divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + driver (str, optional): Name of the LAPACK/MAGMA method to be used for `torch.lstsq`. + For CPU inputs the valid values are ``"gels"``, ``"gelsy"``, ``"gelsd"``, ``"gelss"``. + For CUDA input, the only valid driver is ``"gels"``, which assumes that A is full-rank. + (Default: ``"gels``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> mel_spectrogram_transform = transforms.MelSpectrogram(sample_rate, n_fft=1024) + >>> mel_spectrogram = mel_spectrogram_transform(waveform) + >>> inverse_melscale_transform = transforms.InverseMelScale(n_stft=1024 // 2 + 1) + >>> spectrogram = inverse_melscale_transform(mel_spectrogram) + """ + __constants__ = [ + "n_stft", + "n_mels", + "sample_rate", + "f_min", + "f_max", + ] + + def __init__( + self, + n_stft: int, + n_mels: int = 128, + sample_rate: int = 16000, + f_min: float = 0.0, + f_max: Optional[float] = None, + norm: Optional[str] = None, + mel_scale: str = "htk", + driver: str = "gels", + ) -> None: + super(InverseMelScale, self).__init__() + self.n_mels = n_mels + self.sample_rate = sample_rate + self.f_max = f_max or float(sample_rate // 2) + self.f_min = f_min + self.driver = driver + + if f_min > self.f_max: + raise ValueError("Require f_min: {} <= f_max: {}".format(f_min, self.f_max)) + + if driver not in ["gels", "gelsy", "gelsd", "gelss"]: + raise ValueError(f'driver must be one of ["gels", "gelsy", "gelsd", "gelss"]. Found {driver}.') + + fb = F.melscale_fbanks(n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, norm, mel_scale) + self.register_buffer("fb", fb) + + def forward(self, melspec: Tensor) -> Tensor: + r""" + Args: + melspec (Tensor): A Mel frequency spectrogram of dimension (..., ``n_mels``, time) + + Returns: + Tensor: Linear scale spectrogram of size (..., freq, time) + """ + # pack batch + shape = melspec.size() + melspec = melspec.view(-1, shape[-2], shape[-1]) + + n_mels, time = shape[-2], shape[-1] + freq, _ = self.fb.size() # (freq, n_mels) + if self.n_mels != n_mels: + raise ValueError("Expected an input with {} mel bins. Found: {}".format(self.n_mels, n_mels)) + + specgram = torch.relu(torch.linalg.lstsq(self.fb.transpose(-1, -2)[None], melspec, driver=self.driver).solution) + + # unpack batch + specgram = specgram.view(shape[:-2] + (freq, time)) + return specgram + + +class MelSpectrogram(torch.nn.Module): + r"""Create MelSpectrogram for a raw audio signal. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + This is a composition of :py:func:`torchaudio.transforms.Spectrogram` + and :py:func:`torchaudio.transforms.MelScale`. + + Sources + * https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe + * https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html + * http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html + + Args: + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``None``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + n_mels (int, optional): Number of mel filterbanks. (Default: ``128``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + power (float, optional): Exponent for the magnitude spectrogram, + (must be > 0) e.g., 1 for magnitude, 2 for power, etc. (Default: ``2``) + normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``) + wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``) + center (bool, optional): whether to pad :attr:`waveform` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + pad_mode (string, optional): controls the padding method used when + :attr:`center` is ``True``. (Default: ``"reflect"``) + onesided: Deprecated and unused. + norm (str or None, optional): If "slaney", divide the triangular mel weights by the width of the mel band + (area normalization). (Default: ``None``) + mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.MelSpectrogram(sample_rate) + >>> mel_specgram = transform(waveform) # (channel, n_mels, time) + + See also: + :py:func:`torchaudio.functional.melscale_fbanks` - The function used to + generate the filter banks. + """ + __constants__ = ["sample_rate", "n_fft", "win_length", "hop_length", "pad", "n_mels", "f_min"] + + def __init__( + self, + sample_rate: int = 16000, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + f_min: float = 0.0, + f_max: Optional[float] = None, + pad: int = 0, + n_mels: int = 128, + window_fn: Callable[..., Tensor] = torch.hann_window, + power: float = 2.0, + normalized: bool = False, + wkwargs: Optional[dict] = None, + center: bool = True, + pad_mode: str = "reflect", + onesided: Optional[bool] = None, + norm: Optional[str] = None, + mel_scale: str = "htk", + ) -> None: + super(MelSpectrogram, self).__init__() + torch._C._log_api_usage_once("torchaudio.transforms.MelSpectrogram") + + if onesided is not None: + warnings.warn( + "Argument 'onesided' has been deprecated and has no influence on the behavior of this module." + ) + + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + self.pad = pad + self.power = power + self.normalized = normalized + self.n_mels = n_mels # number of mel frequency bins + self.f_max = f_max + self.f_min = f_min + self.spectrogram = Spectrogram( + n_fft=self.n_fft, + win_length=self.win_length, + hop_length=self.hop_length, + pad=self.pad, + window_fn=window_fn, + power=self.power, + normalized=self.normalized, + wkwargs=wkwargs, + center=center, + pad_mode=pad_mode, + onesided=True, + ) + self.mel_scale = MelScale( + self.n_mels, self.sample_rate, self.f_min, self.f_max, self.n_fft // 2 + 1, norm, mel_scale + ) + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time). + """ + specgram = self.spectrogram(waveform) + mel_specgram = self.mel_scale(specgram) + return mel_specgram + + +class MFCC(torch.nn.Module): + r"""Create the Mel-frequency cepstrum coefficients from an audio signal. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + By default, this calculates the MFCC on the DB-scaled Mel spectrogram. + This is not the textbook implementation, but is implemented here to + give consistency with librosa. + + This output depends on the maximum value in the input spectrogram, and so + may return different values for an audio clip split into snippets vs. a + a full clip. + + Args: + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + n_mfcc (int, optional): Number of mfc coefficients to retain. (Default: ``40``) + dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``) + norm (str, optional): norm to use. (Default: ``"ortho"``) + log_mels (bool, optional): whether to use log-mel spectrograms instead of db-scaled. (Default: ``False``) + melkwargs (dict or None, optional): arguments for MelSpectrogram. (Default: ``None``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.MFCC( + >>> sample_rate=sample_rate, + >>> n_mfcc=13, + >>> melkwargs={"n_fft": 400, "hop_length": 160, "n_mels": 23, "center": False}, + >>> ) + >>> mfcc = transform(waveform) + + See also: + :py:func:`torchaudio.functional.melscale_fbanks` - The function used to + generate the filter banks. + """ + __constants__ = ["sample_rate", "n_mfcc", "dct_type", "top_db", "log_mels"] + + def __init__( + self, + sample_rate: int = 16000, + n_mfcc: int = 40, + dct_type: int = 2, + norm: str = "ortho", + log_mels: bool = False, + melkwargs: Optional[dict] = None, + ) -> None: + super(MFCC, self).__init__() + supported_dct_types = [2] + if dct_type not in supported_dct_types: + raise ValueError("DCT type not supported: {}".format(dct_type)) + self.sample_rate = sample_rate + self.n_mfcc = n_mfcc + self.dct_type = dct_type + self.norm = norm + self.top_db = 80.0 + self.amplitude_to_DB = AmplitudeToDB("power", self.top_db) + + melkwargs = melkwargs or {} + self.MelSpectrogram = MelSpectrogram(sample_rate=self.sample_rate, **melkwargs) + + if self.n_mfcc > self.MelSpectrogram.n_mels: + raise ValueError("Cannot select more MFCC coefficients than # mel bins") + dct_mat = F.create_dct(self.n_mfcc, self.MelSpectrogram.n_mels, self.norm) + self.register_buffer("dct_mat", dct_mat) + self.log_mels = log_mels + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: specgram_mel_db of size (..., ``n_mfcc``, time). + """ + mel_specgram = self.MelSpectrogram(waveform) + if self.log_mels: + log_offset = 1e-6 + mel_specgram = torch.log(mel_specgram + log_offset) + else: + mel_specgram = self.amplitude_to_DB(mel_specgram) + + # (..., time, n_mels) dot (n_mels, n_mfcc) -> (..., n_nfcc, time) + mfcc = torch.matmul(mel_specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2) + return mfcc + + +class LFCC(torch.nn.Module): + r"""Create the linear-frequency cepstrum coefficients from an audio signal. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + By default, this calculates the LFCC on the DB-scaled linear filtered spectrogram. + This is not the textbook implementation, but is implemented here to + give consistency with librosa. + + This output depends on the maximum value in the input spectrogram, and so + may return different values for an audio clip split into snippets vs. a + a full clip. + + Args: + sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``) + n_filter (int, optional): Number of linear filters to apply. (Default: ``128``) + n_lfcc (int, optional): Number of lfc coefficients to retain. (Default: ``40``) + f_min (float, optional): Minimum frequency. (Default: ``0.``) + f_max (float or None, optional): Maximum frequency. (Default: ``None``) + dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``) + norm (str, optional): norm to use. (Default: ``"ortho"``) + log_lf (bool, optional): whether to use log-lf spectrograms instead of db-scaled. (Default: ``False``) + speckwargs (dict or None, optional): arguments for Spectrogram. (Default: ``None``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.LFCC( + >>> sample_rate=sample_rate, + >>> n_lfcc=13, + >>> speckwargs={"n_fft": 400, "hop_length": 160, "center": False}, + >>> ) + >>> lfcc = transform(waveform) + + See also: + :py:func:`torchaudio.functional.linear_fbanks` - The function used to + generate the filter banks. + """ + __constants__ = ["sample_rate", "n_filter", "n_lfcc", "dct_type", "top_db", "log_lf"] + + def __init__( + self, + sample_rate: int = 16000, + n_filter: int = 128, + f_min: float = 0.0, + f_max: Optional[float] = None, + n_lfcc: int = 40, + dct_type: int = 2, + norm: str = "ortho", + log_lf: bool = False, + speckwargs: Optional[dict] = None, + ) -> None: + super(LFCC, self).__init__() + supported_dct_types = [2] + if dct_type not in supported_dct_types: + raise ValueError("DCT type not supported: {}".format(dct_type)) + self.sample_rate = sample_rate + self.f_min = f_min + self.f_max = f_max if f_max is not None else float(sample_rate // 2) + self.n_filter = n_filter + self.n_lfcc = n_lfcc + self.dct_type = dct_type + self.norm = norm + self.top_db = 80.0 + self.amplitude_to_DB = AmplitudeToDB("power", self.top_db) + + speckwargs = speckwargs or {} + self.Spectrogram = Spectrogram(**speckwargs) + + if self.n_lfcc > self.Spectrogram.n_fft: + raise ValueError("Cannot select more LFCC coefficients than # fft bins") + + filter_mat = F.linear_fbanks( + n_freqs=self.Spectrogram.n_fft // 2 + 1, + f_min=self.f_min, + f_max=self.f_max, + n_filter=self.n_filter, + sample_rate=self.sample_rate, + ) + self.register_buffer("filter_mat", filter_mat) + + dct_mat = F.create_dct(self.n_lfcc, self.n_filter, self.norm) + self.register_buffer("dct_mat", dct_mat) + self.log_lf = log_lf + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Linear Frequency Cepstral Coefficients of size (..., ``n_lfcc``, time). + """ + specgram = self.Spectrogram(waveform) + + # (..., time, freq) dot (freq, n_filter) -> (..., n_filter, time) + specgram = torch.matmul(specgram.transpose(-1, -2), self.filter_mat).transpose(-1, -2) + + if self.log_lf: + log_offset = 1e-6 + specgram = torch.log(specgram + log_offset) + else: + specgram = self.amplitude_to_DB(specgram) + + # (..., time, n_filter) dot (n_filter, n_lfcc) -> (..., n_lfcc, time) + lfcc = torch.matmul(specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2) + return lfcc + + +class MuLawEncoding(torch.nn.Module): + r"""Encode signal based on mu-law companding. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + For more info see the + `Wikipedia Entry `_ + + This algorithm assumes the signal has been scaled to between -1 and 1 and + returns a signal encoded with values from 0 to quantization_channels - 1 + + Args: + quantization_channels (int, optional): Number of channels. (Default: ``256``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = torchaudio.transforms.MuLawEncoding(quantization_channels=512) + >>> mulawtrans = transform(waveform) + + """ + __constants__ = ["quantization_channels"] + + def __init__(self, quantization_channels: int = 256) -> None: + super(MuLawEncoding, self).__init__() + self.quantization_channels = quantization_channels + + def forward(self, x: Tensor) -> Tensor: + r""" + Args: + x (Tensor): A signal to be encoded. + + Returns: + Tensor: An encoded signal. + """ + return F.mu_law_encoding(x, self.quantization_channels) + + +class MuLawDecoding(torch.nn.Module): + r"""Decode mu-law encoded signal. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + For more info see the + `Wikipedia Entry `_ + + This expects an input with values between 0 and ``quantization_channels - 1`` + and returns a signal scaled between -1 and 1. + + Args: + quantization_channels (int, optional): Number of channels. (Default: ``256``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = torchaudio.transforms.MuLawDecoding(quantization_channels=512) + >>> mulawtrans = transform(waveform) + """ + __constants__ = ["quantization_channels"] + + def __init__(self, quantization_channels: int = 256) -> None: + super(MuLawDecoding, self).__init__() + self.quantization_channels = quantization_channels + + def forward(self, x_mu: Tensor) -> Tensor: + r""" + Args: + x_mu (Tensor): A mu-law encoded signal which needs to be decoded. + + Returns: + Tensor: The signal decoded. + """ + return F.mu_law_decoding(x_mu, self.quantization_channels) + + +class Resample(torch.nn.Module): + r"""Resample a signal from one frequency to another. A resampling method can be given. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Note: + If resampling on waveforms of higher precision than float32, there may be a small loss of precision + because the kernel is cached once as float32. If high precision resampling is important for your application, + the functional form will retain higher precision, but run slower because it does not cache the kernel. + Alternatively, you could rewrite a transform that caches a higher precision kernel. + + Args: + orig_freq (int, optional): The original frequency of the signal. (Default: ``16000``) + new_freq (int, optional): The desired frequency. (Default: ``16000``) + resampling_method (str, optional): The resampling method to use. + Options: [``sinc_interp_hann``, ``sinc_interp_kaiser``] (Default: ``"sinc_interp_hann"``) + lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper + but less efficient. (Default: ``6``) + rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist. + Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``) + beta (float or None, optional): The shape parameter used for kaiser window. + dtype (torch.device, optional): + Determnines the precision that resampling kernel is pre-computed and cached. If not provided, + kernel is computed with ``torch.float64`` then cached as ``torch.float32``. + If you need higher precision, provide ``torch.float64``, and the pre-computed kernel is computed and + cached as ``torch.float64``. If you use resample with lower precision, then instead of providing this + providing this argument, please use ``Resample.to(dtype)``, so that the kernel generation is still + carried out on ``torch.float64``. + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.Resample(sample_rate, sample_rate/10) + >>> waveform = transform(waveform) + """ + + def __init__( + self, + orig_freq: int = 16000, + new_freq: int = 16000, + resampling_method: str = "sinc_interp_hann", + lowpass_filter_width: int = 6, + rolloff: float = 0.99, + beta: Optional[float] = None, + *, + dtype: Optional[torch.dtype] = None, + ) -> None: + super().__init__() + + self.orig_freq = orig_freq + self.new_freq = new_freq + self.gcd = math.gcd(int(self.orig_freq), int(self.new_freq)) + self.resampling_method = resampling_method + self.lowpass_filter_width = lowpass_filter_width + self.rolloff = rolloff + self.beta = beta + + if self.orig_freq != self.new_freq: + kernel, self.width = _get_sinc_resample_kernel( + self.orig_freq, + self.new_freq, + self.gcd, + self.lowpass_filter_width, + self.rolloff, + self.resampling_method, + beta, + dtype=dtype, + ) + self.register_buffer("kernel", kernel) + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension (..., time). + + Returns: + Tensor: Output signal of dimension (..., time). + """ + if self.orig_freq == self.new_freq: + return waveform + return _apply_sinc_resample_kernel(waveform, self.orig_freq, self.new_freq, self.gcd, self.kernel, self.width) + + +class ComputeDeltas(torch.nn.Module): + r"""Compute delta coefficients of a tensor, usually a spectrogram. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + See `torchaudio.functional.compute_deltas` for more details. + + Args: + win_length (int, optional): The window length used for computing delta. (Default: ``5``) + mode (str, optional): Mode parameter passed to padding. (Default: ``"replicate"``) + """ + __constants__ = ["win_length"] + + def __init__(self, win_length: int = 5, mode: str = "replicate") -> None: + super(ComputeDeltas, self).__init__() + self.win_length = win_length + self.mode = mode + + def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): Tensor of audio of dimension (..., freq, time). + + Returns: + Tensor: Tensor of deltas of dimension (..., freq, time). + """ + return F.compute_deltas(specgram, win_length=self.win_length, mode=self.mode) + + +class TimeStretch(torch.nn.Module): + r"""Stretch stft in time without modifying pitch for a given rate. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Proposed in *SpecAugment* :cite:`specaugment`. + + Args: + hop_length (int or None, optional): Length of hop between STFT windows. + (Default: ``n_fft // 2``, where ``n_fft == (n_freq - 1) * 2``) + n_freq (int, optional): number of filter banks from stft. (Default: ``201``) + fixed_rate (float or None, optional): rate to speed up or slow down by. + If None is provided, rate must be passed to the forward method. (Default: ``None``) + + .. note:: + + The expected input is raw, complex-valued spectrogram. + + Example + >>> spectrogram = torchaudio.transforms.Spectrogram(power=None) + >>> stretch = torchaudio.transforms.TimeStretch() + >>> + >>> original = spectrogram(waveform) + >>> stretched_1_2 = stretch(original, 1.2) + >>> stretched_0_9 = stretch(original, 0.9) + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch.png + :width: 600 + :alt: The visualization of stretched spectrograms. + """ + __constants__ = ["fixed_rate"] + + def __init__(self, hop_length: Optional[int] = None, n_freq: int = 201, fixed_rate: Optional[float] = None) -> None: + super(TimeStretch, self).__init__() + + self.fixed_rate = fixed_rate + + n_fft = (n_freq - 1) * 2 + hop_length = hop_length if hop_length is not None else n_fft // 2 + self.register_buffer("phase_advance", torch.linspace(0, math.pi * hop_length, n_freq)[..., None]) + + def forward(self, complex_specgrams: Tensor, overriding_rate: Optional[float] = None) -> Tensor: + r""" + Args: + complex_specgrams (Tensor): + A tensor of dimension `(..., freq, num_frame)` with complex dtype. + overriding_rate (float or None, optional): speed up to apply to this batch. + If no rate is passed, use ``self.fixed_rate``. (Default: ``None``) + + Returns: + Tensor: + Stretched spectrogram. The resulting tensor is of the corresponding complex dtype + as the input spectrogram, and the number of frames is changed to ``ceil(num_frame / rate)``. + """ + if not torch.is_complex(complex_specgrams): + warnings.warn( + "The input to TimeStretch must be complex type. " + "Providing non-complex tensor produces invalid results.", + stacklevel=4, + ) + + if overriding_rate is None: + if self.fixed_rate is None: + raise ValueError("If no fixed_rate is specified, must pass a valid rate to the forward method.") + rate = self.fixed_rate + else: + rate = overriding_rate + return F.phase_vocoder(complex_specgrams, rate, self.phase_advance) + + +class Fade(torch.nn.Module): + r"""Add a fade in and/or fade out to an waveform. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + fade_in_len (int, optional): Length of fade-in (time frames). (Default: ``0``) + fade_out_len (int, optional): Length of fade-out (time frames). (Default: ``0``) + fade_shape (str, optional): Shape of fade. Must be one of: "quarter_sine", + ``"half_sine"``, ``"linear"``, ``"logarithmic"``, ``"exponential"``. + (Default: ``"linear"``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.Fade(fade_in_len=sample_rate, fade_out_len=2 * sample_rate, fade_shape="linear") + >>> faded_waveform = transform(waveform) + """ + + def __init__(self, fade_in_len: int = 0, fade_out_len: int = 0, fade_shape: str = "linear") -> None: + super(Fade, self).__init__() + self.fade_in_len = fade_in_len + self.fade_out_len = fade_out_len + self.fade_shape = fade_shape + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension `(..., time)`. + + Returns: + Tensor: Tensor of audio of dimension `(..., time)`. + """ + waveform_length = waveform.size()[-1] + device = waveform.device + return self._fade_in(waveform_length, device) * self._fade_out(waveform_length, device) * waveform + + def _fade_in(self, waveform_length: int, device: torch.device) -> Tensor: + fade = torch.linspace(0, 1, self.fade_in_len, device=device) + ones = torch.ones(waveform_length - self.fade_in_len, device=device) + + if self.fade_shape == "linear": + fade = fade + + if self.fade_shape == "exponential": + fade = torch.pow(2, (fade - 1)) * fade + + if self.fade_shape == "logarithmic": + fade = torch.log10(0.1 + fade) + 1 + + if self.fade_shape == "quarter_sine": + fade = torch.sin(fade * math.pi / 2) + + if self.fade_shape == "half_sine": + fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5 + + return torch.cat((fade, ones)).clamp_(0, 1) + + def _fade_out(self, waveform_length: int, device: torch.device) -> Tensor: + fade = torch.linspace(0, 1, self.fade_out_len, device=device) + ones = torch.ones(waveform_length - self.fade_out_len, device=device) + + if self.fade_shape == "linear": + fade = -fade + 1 + + if self.fade_shape == "exponential": + fade = torch.pow(2, -fade) * (1 - fade) + + if self.fade_shape == "logarithmic": + fade = torch.log10(1.1 - fade) + 1 + + if self.fade_shape == "quarter_sine": + fade = torch.sin(fade * math.pi / 2 + math.pi / 2) + + if self.fade_shape == "half_sine": + fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5 + + return torch.cat((ones, fade)).clamp_(0, 1) + + +class _AxisMasking(torch.nn.Module): + r"""Apply masking to a spectrogram. + + Args: + mask_param (int): Maximum possible length of the mask. + axis (int): What dimension the mask is applied on (assuming the tensor is 3D). + For frequency masking, axis = 1. + For time masking, axis = 2. + iid_masks (bool): Applies iid masks to each of the examples in the batch dimension. + This option is applicable only when the dimension of the input tensor is >= 3. + p (float, optional): maximum proportion of columns that can be masked. (Default: 1.0) + """ + __constants__ = ["mask_param", "axis", "iid_masks", "p"] + + def __init__(self, mask_param: int, axis: int, iid_masks: bool, p: float = 1.0) -> None: + super(_AxisMasking, self).__init__() + self.mask_param = mask_param + self.axis = axis + self.iid_masks = iid_masks + self.p = p + + def forward(self, specgram: Tensor, mask_value: float = 0.0) -> Tensor: + r""" + Args: + specgram (Tensor): Tensor of dimension `(..., freq, time)`. + mask_value (float): Value to assign to the masked columns. + + Returns: + Tensor: Masked spectrogram of dimensions `(..., freq, time)`. + """ + # if iid_masks flag marked and specgram has a batch dimension + # self.axis + specgram.dim() - 3 gives the time/frequency dimension (last two dimensions) + # for input tensor for which the dimension is not 3. + if self.iid_masks: + return F.mask_along_axis_iid( + specgram, self.mask_param, mask_value, self.axis + specgram.dim() - 3, p=self.p + ) + else: + return F.mask_along_axis(specgram, self.mask_param, mask_value, self.axis + specgram.dim() - 3, p=self.p) + + +class FrequencyMasking(_AxisMasking): + r"""Apply masking to a spectrogram in the frequency domain. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Proposed in *SpecAugment* :cite:`specaugment`. + + Args: + freq_mask_param (int): maximum possible length of the mask. + Indices uniformly sampled from [0, freq_mask_param). + iid_masks (bool, optional): whether to apply different masks to each + example/channel in the batch. (Default: ``False``) + This option is applicable only when the input tensor >= 3D. + + Example + >>> spectrogram = torchaudio.transforms.Spectrogram() + >>> masking = torchaudio.transforms.FrequencyMasking(freq_mask_param=80) + >>> + >>> original = spectrogram(waveform) + >>> masked = masking(original) + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_freq_masking1.png + :alt: The original spectrogram + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_freq_masking2.png + :alt: The spectrogram masked along frequency axis + """ + + def __init__(self, freq_mask_param: int, iid_masks: bool = False) -> None: + super(FrequencyMasking, self).__init__(freq_mask_param, 1, iid_masks) + + +class TimeMasking(_AxisMasking): + r"""Apply masking to a spectrogram in the time domain. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Proposed in *SpecAugment* :cite:`specaugment`. + + Args: + time_mask_param (int): maximum possible length of the mask. + Indices uniformly sampled from [0, time_mask_param). + iid_masks (bool, optional): whether to apply different masks to each + example/channel in the batch. (Default: ``False``) + This option is applicable only when the input tensor >= 3D. + p (float, optional): maximum proportion of time steps that can be masked. + Must be within range [0.0, 1.0]. (Default: 1.0) + + Example + >>> spectrogram = torchaudio.transforms.Spectrogram() + >>> masking = torchaudio.transforms.TimeMasking(time_mask_param=80) + >>> + >>> original = spectrogram(waveform) + >>> masked = masking(original) + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_masking1.png + :alt: The original spectrogram + + .. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_masking2.png + :alt: The spectrogram masked along time axis + """ + + def __init__(self, time_mask_param: int, iid_masks: bool = False, p: float = 1.0) -> None: + if not 0.0 <= p <= 1.0: + raise ValueError(f"The value of p must be between 0.0 and 1.0 ({p} given).") + super(TimeMasking, self).__init__(time_mask_param, 2, iid_masks, p=p) + + +class SpecAugment(torch.nn.Module): + r"""Apply time and frequency masking to a spectrogram. + Args: + n_time_masks (int): Number of time masks. If its value is zero, no time masking will be applied. + time_mask_param (int): Maximum possible length of the time mask. + n_freq_masks (int): Number of frequency masks. If its value is zero, no frequency masking will be applied. + freq_mask_param (int): Maximum possible length of the frequency mask. + iid_masks (bool, optional): Applies iid masks to each of the examples in the batch dimension. + This option is applicable only when the input tensor is 4D. (Default: ``True``) + p (float, optional): maximum proportion of time steps that can be masked. + Must be within range [0.0, 1.0]. (Default: 1.0) + zero_masking (bool, optional): If ``True``, use 0 as the mask value, + else use mean of the input tensor. (Default: ``False``) + """ + __constants__ = [ + "n_time_masks", + "time_mask_param", + "n_freq_masks", + "freq_mask_param", + "iid_masks", + "p", + "zero_masking", + ] + + def __init__( + self, + n_time_masks: int, + time_mask_param: int, + n_freq_masks: int, + freq_mask_param: int, + iid_masks: bool = True, + p: float = 1.0, + zero_masking: bool = False, + ) -> None: + super(SpecAugment, self).__init__() + self.n_time_masks = n_time_masks + self.time_mask_param = time_mask_param + self.n_freq_masks = n_freq_masks + self.freq_mask_param = freq_mask_param + self.iid_masks = iid_masks + self.p = p + self.zero_masking = zero_masking + + def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): Tensor of shape `(..., freq, time)`. + Returns: + Tensor: Masked spectrogram of shape `(..., freq, time)`. + """ + if self.zero_masking: + mask_value = 0.0 + else: + mask_value = specgram.mean() + time_dim = specgram.dim() - 1 + freq_dim = time_dim - 1 + + if specgram.dim() > 2 and self.iid_masks is True: + for _ in range(self.n_time_masks): + specgram = F.mask_along_axis_iid(specgram, self.time_mask_param, mask_value, time_dim, p=self.p) + for _ in range(self.n_freq_masks): + specgram = F.mask_along_axis_iid(specgram, self.freq_mask_param, mask_value, freq_dim, p=self.p) + else: + for _ in range(self.n_time_masks): + specgram = F.mask_along_axis(specgram, self.time_mask_param, mask_value, time_dim, p=self.p) + for _ in range(self.n_freq_masks): + specgram = F.mask_along_axis(specgram, self.freq_mask_param, mask_value, freq_dim, p=self.p) + + return specgram + + +class Loudness(torch.nn.Module): + r"""Measure audio loudness according to the ITU-R BS.1770-4 recommendation. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + sample_rate (int): Sample rate of audio signal. + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.Loudness(sample_rate) + >>> loudness = transform(waveform) + + Reference: + - https://www.itu.int/rec/R-REC-BS.1770-4-201510-I/en + """ + __constants__ = ["sample_rate"] + + def __init__(self, sample_rate: int): + super(Loudness, self).__init__() + self.sample_rate = sample_rate + + def forward(self, wavefrom: Tensor): + r""" + Args: + waveform(torch.Tensor): audio waveform of dimension `(..., channels, time)` + + Returns: + Tensor: loudness estimates (LKFS) + """ + return F.loudness(wavefrom, self.sample_rate) + + +class Vol(torch.nn.Module): + r"""Adjust volume of waveform. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + gain (float): Interpreted according to the given gain_type: + If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio. + If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared). + If ``gain_type`` = ``db``, ``gain`` is in decibels. + gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.Vol(gain=0.5, gain_type="amplitude") + >>> quieter_waveform = transform(waveform) + """ + + def __init__(self, gain: float, gain_type: str = "amplitude"): + super(Vol, self).__init__() + self.gain = gain + self.gain_type = gain_type + + if gain_type in ["amplitude", "power"] and gain < 0: + raise ValueError("If gain_type = amplitude or power, gain must be positive.") + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension `(..., time)`. + + Returns: + Tensor: Tensor of audio of dimension `(..., time)`. + """ + if self.gain_type == "amplitude": + waveform = waveform * self.gain + + if self.gain_type == "db": + waveform = F.gain(waveform, self.gain) + + if self.gain_type == "power": + waveform = F.gain(waveform, 10 * math.log10(self.gain)) + + return torch.clamp(waveform, -1, 1) + + +class SlidingWindowCmn(torch.nn.Module): + r""" + Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) + min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). + Only applicable if center == false, ignored if center==true (int, default = 100) + center (bool, optional): If true, use a window centered on the current frame + (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) + norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.SlidingWindowCmn(cmn_window=1000) + >>> cmn_waveform = transform(waveform) + """ + + def __init__( + self, cmn_window: int = 600, min_cmn_window: int = 100, center: bool = False, norm_vars: bool = False + ) -> None: + super().__init__() + self.cmn_window = cmn_window + self.min_cmn_window = min_cmn_window + self.center = center + self.norm_vars = norm_vars + + def forward(self, specgram: Tensor) -> Tensor: + r""" + Args: + specgram (Tensor): Tensor of spectrogram of dimension `(..., time, freq)`. + + Returns: + Tensor: Tensor of spectrogram of dimension `(..., time, freq)`. + """ + cmn_specgram = F.sliding_window_cmn(specgram, self.cmn_window, self.min_cmn_window, self.center, self.norm_vars) + return cmn_specgram + + +class Vad(torch.nn.Module): + r"""Voice Activity Detector. Similar to SoX implementation. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Attempts to trim silence and quiet background sounds from the ends of recordings of speech. + The algorithm currently uses a simple cepstral power measurement to detect voice, + so may be fooled by other things, especially music. + + The effect can trim only from the front of the audio, + so in order to trim from the back, the reverse effect must also be used. + + Args: + sample_rate (int): Sample rate of audio signal. + trigger_level (float, optional): The measurement level used to trigger activity detection. + This may need to be changed depending on the noise level, signal level, + and other characteristics of the input audio. (Default: 7.0) + trigger_time (float, optional): The time constant (in seconds) + used to help ignore short bursts of sound. (Default: 0.25) + search_time (float, optional): The amount of audio (in seconds) + to search for quieter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 1.0) + allowed_gap (float, optional): The allowed gap (in seconds) between + quiteter/shorter bursts of audio to include prior + to the detected trigger point. (Default: 0.25) + pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve + before the trigger point and any found quieter/shorter bursts. (Default: 0.0) + boot_time (float, optional) The algorithm (internally) uses adaptive noise + estimation/reduction in order to detect the start of the wanted audio. + This option sets the time for the initial noise estimate. (Default: 0.35) + noise_up_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is increasing. (Default: 0.1) + noise_down_time (float, optional) Time constant used by the adaptive noise estimator + for when the noise level is decreasing. (Default: 0.01) + noise_reduction_amount (float, optional) Amount of noise reduction to use in + the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35) + measure_freq (float, optional) Frequency of the algorithm’s + processing/measurements. (Default: 20.0) + measure_duration: (float or None, optional) Measurement duration. + (Default: Twice the measurement period; i.e. with overlap.) + measure_smooth_time (float, optional) Time constant used to smooth + spectral measurements. (Default: 0.4) + hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied + at the input to the detector algorithm. (Default: 50.0) + lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied + at the input to the detector algorithm. (Default: 6000.0) + hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used + in the detector algorithm. (Default: 150.0) + lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used + in the detector algorithm. (Default: 2000.0) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> waveform_reversed, sample_rate = apply_effects_tensor(waveform, sample_rate, [["reverse"]]) + >>> transform = transforms.Vad(sample_rate=sample_rate, trigger_level=7.5) + >>> waveform_reversed_front_trim = transform(waveform_reversed) + >>> waveform_end_trim, sample_rate = apply_effects_tensor( + >>> waveform_reversed_front_trim, sample_rate, [["reverse"]] + >>> ) + + Reference: + - http://sox.sourceforge.net/sox.html + """ + + def __init__( + self, + sample_rate: int, + trigger_level: float = 7.0, + trigger_time: float = 0.25, + search_time: float = 1.0, + allowed_gap: float = 0.25, + pre_trigger_time: float = 0.0, + boot_time: float = 0.35, + noise_up_time: float = 0.1, + noise_down_time: float = 0.01, + noise_reduction_amount: float = 1.35, + measure_freq: float = 20.0, + measure_duration: Optional[float] = None, + measure_smooth_time: float = 0.4, + hp_filter_freq: float = 50.0, + lp_filter_freq: float = 6000.0, + hp_lifter_freq: float = 150.0, + lp_lifter_freq: float = 2000.0, + ) -> None: + super().__init__() + + self.sample_rate = sample_rate + self.trigger_level = trigger_level + self.trigger_time = trigger_time + self.search_time = search_time + self.allowed_gap = allowed_gap + self.pre_trigger_time = pre_trigger_time + self.boot_time = boot_time + self.noise_up_time = noise_up_time + self.noise_down_time = noise_down_time + self.noise_reduction_amount = noise_reduction_amount + self.measure_freq = measure_freq + self.measure_duration = measure_duration + self.measure_smooth_time = measure_smooth_time + self.hp_filter_freq = hp_filter_freq + self.lp_filter_freq = lp_filter_freq + self.hp_lifter_freq = hp_lifter_freq + self.lp_lifter_freq = lp_lifter_freq + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)` + Tensor of shape `(channels, time)` is treated as a multi-channel recording + of the same event and the resulting output will be trimmed to the earliest + voice activity in any channel. + """ + return F.vad( + waveform=waveform, + sample_rate=self.sample_rate, + trigger_level=self.trigger_level, + trigger_time=self.trigger_time, + search_time=self.search_time, + allowed_gap=self.allowed_gap, + pre_trigger_time=self.pre_trigger_time, + boot_time=self.boot_time, + noise_up_time=self.noise_up_time, + noise_down_time=self.noise_down_time, + noise_reduction_amount=self.noise_reduction_amount, + measure_freq=self.measure_freq, + measure_duration=self.measure_duration, + measure_smooth_time=self.measure_smooth_time, + hp_filter_freq=self.hp_filter_freq, + lp_filter_freq=self.lp_filter_freq, + hp_lifter_freq=self.hp_lifter_freq, + lp_lifter_freq=self.lp_lifter_freq, + ) + + +class SpectralCentroid(torch.nn.Module): + r"""Compute the spectral centroid for each channel along the time axis. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + The spectral centroid is defined as the weighted average of the + frequency values, weighted by their magnitude. + + Args: + sample_rate (int): Sample rate of audio signal. + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``) + win_length (int or None, optional): Window size. (Default: ``n_fft``) + hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``) + pad (int, optional): Two sided padding of signal. (Default: ``0``) + window_fn (Callable[..., Tensor], optional): A function to create a window tensor + that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``) + wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``) + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.SpectralCentroid(sample_rate) + >>> spectral_centroid = transform(waveform) # (channel, time) + """ + __constants__ = ["sample_rate", "n_fft", "win_length", "hop_length", "pad"] + + def __init__( + self, + sample_rate: int, + n_fft: int = 400, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + pad: int = 0, + window_fn: Callable[..., Tensor] = torch.hann_window, + wkwargs: Optional[dict] = None, + ) -> None: + super(SpectralCentroid, self).__init__() + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 2 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer("window", window) + self.pad = pad + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension `(..., time)`. + + Returns: + Tensor: Spectral Centroid of size `(..., time)`. + """ + + return F.spectral_centroid( + waveform, self.sample_rate, self.pad, self.window, self.n_fft, self.hop_length, self.win_length + ) + + +class PitchShift(LazyModuleMixin, torch.nn.Module): + r"""Shift the pitch of a waveform by ``n_steps`` steps. + + .. devices:: CPU CUDA + + .. properties:: TorchScript + + Args: + waveform (Tensor): The input waveform of shape `(..., time)`. + sample_rate (int): Sample rate of `waveform`. + n_steps (int): The (fractional) steps to shift `waveform`. + bins_per_octave (int, optional): The number of steps per octave (Default : ``12``). + n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``). + win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``). + hop_length (int or None, optional): Length of hop between STFT windows. If None, then ``win_length // 4`` + is used (Default: ``None``). + window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window. + If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``). + + Example + >>> waveform, sample_rate = torchaudio.load("test.wav", normalize=True) + >>> transform = transforms.PitchShift(sample_rate, 4) + >>> waveform_shift = transform(waveform) # (channel, time) + """ + __constants__ = ["sample_rate", "n_steps", "bins_per_octave", "n_fft", "win_length", "hop_length"] + + kernel: UninitializedParameter + width: int + + def __init__( + self, + sample_rate: int, + n_steps: int, + bins_per_octave: int = 12, + n_fft: int = 512, + win_length: Optional[int] = None, + hop_length: Optional[int] = None, + window_fn: Callable[..., Tensor] = torch.hann_window, + wkwargs: Optional[dict] = None, + ) -> None: + super().__init__() + self.n_steps = n_steps + self.bins_per_octave = bins_per_octave + self.sample_rate = sample_rate + self.n_fft = n_fft + self.win_length = win_length if win_length is not None else n_fft + self.hop_length = hop_length if hop_length is not None else self.win_length // 4 + window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs) + self.register_buffer("window", window) + rate = 2.0 ** (-float(n_steps) / bins_per_octave) + self.orig_freq = int(sample_rate / rate) + self.gcd = math.gcd(int(self.orig_freq), int(sample_rate)) + + if self.orig_freq != sample_rate: + self.width = -1 + self.kernel = UninitializedParameter(device=None, dtype=None) + + def initialize_parameters(self, input): + if self.has_uninitialized_params(): + if self.orig_freq != self.sample_rate: + with torch.no_grad(): + kernel, self.width = _get_sinc_resample_kernel( + self.orig_freq, + self.sample_rate, + self.gcd, + dtype=input.dtype, + device=input.device, + ) + self.kernel.materialize(kernel.shape) + self.kernel.copy_(kernel) + + def forward(self, waveform: Tensor) -> Tensor: + r""" + Args: + waveform (Tensor): Tensor of audio of dimension `(..., time)`. + + Returns: + Tensor: The pitch-shifted audio of shape `(..., time)`. + """ + shape = waveform.size() + + waveform_stretch = _stretch_waveform( + waveform, + self.n_steps, + self.bins_per_octave, + self.n_fft, + self.win_length, + self.hop_length, + self.window, + ) + + if self.orig_freq != self.sample_rate: + waveform_shift = _apply_sinc_resample_kernel( + waveform_stretch, + self.orig_freq, + self.sample_rate, + self.gcd, + self.kernel, + self.width, + ) + else: + waveform_shift = waveform_stretch + + return _fix_waveform_shape( + waveform_shift, + shape, + ) + + +class RNNTLoss(torch.nn.Module): + """Compute the RNN Transducer loss from *Sequence Transduction with Recurrent Neural Networks* + :cite:`graves2012sequence`. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + The RNN Transducer loss extends the CTC loss by defining a distribution over output + sequences of all lengths, and by jointly modelling both input-output and output-output + dependencies. + + Args: + blank (int, optional): blank label (Default: ``-1``) + clamp (float, optional): clamp for gradients (Default: ``-1``) + reduction (string, optional): Specifies the reduction to apply to the output: + ``"none"`` | ``"mean"`` | ``"sum"``. (Default: ``"mean"``) + fused_log_softmax (bool): set to False if calling log_softmax outside of loss (Default: ``True``) + + Example + >>> # Hypothetical values + >>> logits = torch.tensor([[[[0.1, 0.6, 0.1, 0.1, 0.1], + >>> [0.1, 0.1, 0.6, 0.1, 0.1], + >>> [0.1, 0.1, 0.2, 0.8, 0.1]], + >>> [[0.1, 0.6, 0.1, 0.1, 0.1], + >>> [0.1, 0.1, 0.2, 0.1, 0.1], + >>> [0.7, 0.1, 0.2, 0.1, 0.1]]]], + >>> dtype=torch.float32, + >>> requires_grad=True) + >>> targets = torch.tensor([[1, 2]], dtype=torch.int) + >>> logit_lengths = torch.tensor([2], dtype=torch.int) + >>> target_lengths = torch.tensor([2], dtype=torch.int) + >>> transform = transforms.RNNTLoss(blank=0) + >>> loss = transform(logits, targets, logit_lengths, target_lengths) + >>> loss.backward() + """ + + def __init__( + self, + blank: int = -1, + clamp: float = -1.0, + reduction: str = "mean", + fused_log_softmax: bool = True, + ): + super().__init__() + self.blank = blank + self.clamp = clamp + self.reduction = reduction + self.fused_log_softmax = fused_log_softmax + + def forward( + self, + logits: Tensor, + targets: Tensor, + logit_lengths: Tensor, + target_lengths: Tensor, + ): + """ + Args: + logits (Tensor): Tensor of dimension `(batch, max seq length, max target length + 1, class)` + containing output from joiner + targets (Tensor): Tensor of dimension `(batch, max target length)` containing targets with zero padded + logit_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of each sequence from encoder + target_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of targets for each sequence + Returns: + Tensor: Loss with the reduction option applied. If ``reduction`` is ``"none"``, then size (batch), + otherwise scalar. + """ + return F.rnnt_loss( + logits, + targets, + logit_lengths, + target_lengths, + self.blank, + self.clamp, + self.reduction, + self.fused_log_softmax, + ) + + +class Convolve(torch.nn.Module): + r""" + Convolves inputs along their last dimension using the direct method. + Note that, in contrast to :class:`torch.nn.Conv1d`, which actually applies the valid cross-correlation + operator, this module applies the true `convolution`_ operator. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + mode (str, optional): Must be one of ("full", "valid", "same"). + + * "full": Returns the full convolution result, with shape `(..., N + M - 1)`, where + `N` and `M` are the trailing dimensions of the two inputs. (Default) + * "valid": Returns the segment of the full convolution result corresponding to where + the two inputs overlap completely, with shape `(..., max(N, M) - min(N, M) + 1)`. + * "same": Returns the center segment of the full convolution result, with shape `(..., N)`. + + .. _convolution: + https://en.wikipedia.org/wiki/Convolution + """ + + def __init__(self, mode: str = "full") -> None: + _check_convolve_mode(mode) + + super().__init__() + self.mode = mode + + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + r""" + Args: + x (torch.Tensor): First convolution operand, with shape `(..., N)`. + y (torch.Tensor): Second convolution operand, with shape `(..., M)` + (leading dimensions must be broadcast-able with those of ``x``). + + Returns: + torch.Tensor: Result of convolving ``x`` and ``y``, with shape `(..., L)`, where + the leading dimensions match those of ``x`` and `L` is dictated by ``mode``. + """ + return F.convolve(x, y, mode=self.mode) + + +class FFTConvolve(torch.nn.Module): + r""" + Convolves inputs along their last dimension using FFT. For inputs with large last dimensions, this module + is generally much faster than :class:`Convolve`. + Note that, in contrast to :class:`torch.nn.Conv1d`, which actually applies the valid cross-correlation + operator, this module applies the true `convolution`_ operator. + Also note that this module can only output float tensors (int tensor inputs will be cast to float). + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + mode (str, optional): Must be one of ("full", "valid", "same"). + + * "full": Returns the full convolution result, with shape `(..., N + M - 1)`, where + `N` and `M` are the trailing dimensions of the two inputs. (Default) + * "valid": Returns the segment of the full convolution result corresponding to where + the two inputs overlap completely, with shape `(..., max(N, M) - min(N, M) + 1)`. + * "same": Returns the center segment of the full convolution result, with shape `(..., N)`. + + .. _convolution: + https://en.wikipedia.org/wiki/Convolution + """ + + def __init__(self, mode: str = "full") -> None: + _check_convolve_mode(mode) + + super().__init__() + self.mode = mode + + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + r""" + Args: + x (torch.Tensor): First convolution operand, with shape `(..., N)`. + y (torch.Tensor): Second convolution operand, with shape `(..., M)` + (leading dimensions must be broadcast-able with those of ``x``). + + Returns: + torch.Tensor: Result of convolving ``x`` and ``y``, with shape `(..., L)`, where + the leading dimensions match those of ``x`` and `L` is dictated by ``mode``. + """ + return F.fftconvolve(x, y, mode=self.mode) + + +def _source_target_sample_rate(orig_freq: int, speed: float) -> Tuple[int, int]: + source_sample_rate = int(speed * orig_freq) + target_sample_rate = int(orig_freq) + gcd = math.gcd(source_sample_rate, target_sample_rate) + return source_sample_rate // gcd, target_sample_rate // gcd + + +class Speed(torch.nn.Module): + r"""Adjusts waveform speed. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + orig_freq (int): Original frequency of the signals in ``waveform``. + factor (float): Factor by which to adjust speed of input. Values greater than 1.0 + compress ``waveform`` in time, whereas values less than 1.0 stretch ``waveform`` in time. + """ + + def __init__(self, orig_freq, factor) -> None: + super().__init__() + + self.orig_freq = orig_freq + self.factor = factor + + self.source_sample_rate, self.target_sample_rate = _source_target_sample_rate(orig_freq, factor) + self.resampler = Resample(orig_freq=self.source_sample_rate, new_freq=self.target_sample_rate) + + def forward(self, waveform, lengths: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + r""" + Args: + waveform (torch.Tensor): Input signals, with shape `(..., time)`. + lengths (torch.Tensor or None, optional): Valid lengths of signals in ``waveform``, with shape `(...)`. + If ``None``, all elements in ``waveform`` are treated as valid. (Default: ``None``) + + Returns: + (torch.Tensor, torch.Tensor or None): + torch.Tensor + Speed-adjusted waveform, with shape `(..., new_time).` + torch.Tensor or None + If ``lengths`` is not ``None``, valid lengths of signals in speed-adjusted waveform, + with shape `(...)`; otherwise, ``None``. + """ + + if lengths is None: + out_lengths = None + else: + out_lengths = torch.ceil(lengths * self.target_sample_rate / self.source_sample_rate).to(lengths.dtype) + + return self.resampler(waveform), out_lengths + + +class SpeedPerturbation(torch.nn.Module): + r"""Applies the speed perturbation augmentation introduced in + *Audio augmentation for speech recognition* :cite:`ko15_interspeech`. For a given input, + the module samples a speed-up factor from ``factors`` uniformly at random and adjusts + the speed of the input by that factor. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + orig_freq (int): Original frequency of the signals in ``waveform``. + factors (Sequence[float]): Factors by which to adjust speed of input. Values greater than 1.0 + compress ``waveform`` in time, whereas values less than 1.0 stretch ``waveform`` in time. + + Example + >>> speed_perturb = SpeedPerturbation(16000, [0.9, 1.1, 1.0, 1.0, 1.0]) + >>> # waveform speed will be adjusted by factor 0.9 with 20% probability, + >>> # 1.1 with 20% probability, and 1.0 (i.e. kept the same) with 60% probability. + >>> speed_perturbed_waveform = speed_perturb(waveform, lengths) + """ + + def __init__(self, orig_freq: int, factors: Sequence[float]) -> None: + super().__init__() + + self.speeders = torch.nn.ModuleList([Speed(orig_freq=orig_freq, factor=factor) for factor in factors]) + + def forward( + self, waveform: torch.Tensor, lengths: Optional[torch.Tensor] = None + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + r""" + Args: + waveform (torch.Tensor): Input signals, with shape `(..., time)`. + lengths (torch.Tensor or None, optional): Valid lengths of signals in ``waveform``, with shape `(...)`. + If ``None``, all elements in ``waveform`` are treated as valid. (Default: ``None``) + + Returns: + (torch.Tensor, torch.Tensor or None): + torch.Tensor + Speed-adjusted waveform, with shape `(..., new_time).` + torch.Tensor or None + If ``lengths`` is not ``None``, valid lengths of signals in speed-adjusted waveform, + with shape `(...)`; otherwise, ``None``. + """ + + idx = int(torch.randint(len(self.speeders), ())) + # NOTE: we do this because TorchScript doesn't allow for + # indexing ModuleList instances with non-literals. + for speeder_idx, speeder in enumerate(self.speeders): + if idx == speeder_idx: + return speeder(waveform, lengths) + raise RuntimeError("Speeder not found; execution should have never reached here.") + + +class AddNoise(torch.nn.Module): + r"""Scales and adds noise to waveform per signal-to-noise ratio. + See :meth:`torchaudio.functional.add_noise` for more details. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + """ + + def forward( + self, waveform: torch.Tensor, noise: torch.Tensor, snr: torch.Tensor, lengths: Optional[torch.Tensor] = None + ) -> torch.Tensor: + r""" + Args: + waveform (torch.Tensor): Input waveform, with shape `(..., L)`. + noise (torch.Tensor): Noise, with shape `(..., L)` (same shape as ``waveform``). + snr (torch.Tensor): Signal-to-noise ratios in dB, with shape `(...,)`. + lengths (torch.Tensor or None, optional): Valid lengths of signals in ``waveform`` and ``noise``, + with shape `(...,)` (leading dimensions must match those of ``waveform``). If ``None``, all + elements in ``waveform`` and ``noise`` are treated as valid. (Default: ``None``) + + Returns: + torch.Tensor: Result of scaling and adding ``noise`` to ``waveform``, with shape `(..., L)` + (same shape as ``waveform``). + """ + return F.add_noise(waveform, noise, snr, lengths) + + +class Preemphasis(torch.nn.Module): + r"""Pre-emphasizes a waveform along its last dimension. + See :meth:`torchaudio.functional.preemphasis` for more details. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + coeff (float, optional): Pre-emphasis coefficient. Typically between 0.0 and 1.0. + (Default: 0.97) + """ + + def __init__(self, coeff: float = 0.97) -> None: + super().__init__() + self.coeff = coeff + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + r""" + Args: + waveform (torch.Tensor): Waveform, with shape `(..., N)`. + + Returns: + torch.Tensor: Pre-emphasized waveform, with shape `(..., N)`. + """ + return F.preemphasis(waveform, coeff=self.coeff) + + +class Deemphasis(torch.nn.Module): + r"""De-emphasizes a waveform along its last dimension. + See :meth:`torchaudio.functional.deemphasis` for more details. + + .. devices:: CPU CUDA + + .. properties:: Autograd TorchScript + + Args: + coeff (float, optional): De-emphasis coefficient. Typically between 0.0 and 1.0. + (Default: 0.97) + """ + + def __init__(self, coeff: float = 0.97) -> None: + super().__init__() + self.coeff = coeff + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + r""" + Args: + waveform (torch.Tensor): Waveform, with shape `(..., N)`. + + Returns: + torch.Tensor: De-emphasized waveform, with shape `(..., N)`. + """ + return F.deemphasis(waveform, coeff=self.coeff) diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/__init__.py b/venv/lib/python3.10/site-packages/torchaudio/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..89bffaa34d61fbb12cfafbe7287af0b92139b19c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/utils/__init__.py @@ -0,0 +1,11 @@ +from torio.utils import ffmpeg_utils + +from . import sox_utils +from .download import download_asset + + +__all__ = [ + "download_asset", + "sox_utils", + "ffmpeg_utils", +] diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5fd7d182fa204c2858747f1f5967898d8d443c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/download.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/download.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..573ef33b5e0e769abccfb68267874c5cd17ba9a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/download.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/ffmpeg_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/ffmpeg_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45e1cff6f2b5e4f27a1c0306d290d5e2a2473860 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/ffmpeg_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/sox_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/sox_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cf30f5ad4994c17be5a54a7aef043d9596283e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torchaudio/utils/__pycache__/sox_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/download.py b/venv/lib/python3.10/site-packages/torchaudio/utils/download.py new file mode 100644 index 0000000000000000000000000000000000000000..2081877d15a13e91a6fcb87905634addd23cc712 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/utils/download.py @@ -0,0 +1,89 @@ +import hashlib +import logging +from os import PathLike +from pathlib import Path +from typing import Union + +import torch +from torchaudio._internal import download_url_to_file + +_LG = logging.getLogger(__name__) + + +def _get_local_path(key): + path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key) + path.parent.mkdir(parents=True, exist_ok=True) + return path + + +def _download(key, path, progress): + url = f"https://download.pytorch.org/torchaudio/{key}" + download_url_to_file(url, path, progress=progress) + + +def _get_hash(path, hash, chunk_size=1028): + m = hashlib.sha256() + with open(path, "rb") as file: + data = file.read(chunk_size) + while data: + m.update(data) + data = file.read(chunk_size) + return m.hexdigest() + + +def download_asset( + key: str, + hash: str = "", + path: Union[str, PathLike] = "", + *, + progress: bool = True, +) -> str: + """Download and store torchaudio assets to local file system. + + If a file exists at the download path, then that path is returned with or without + hash validation. + + Args: + key (str): The asset identifier. + hash (str, optional): + The value of SHA256 hash of the asset. If provided, it is used to verify + the downloaded / cached object. If not provided, then no hash validation + is performed. This means if a file exists at the download path, then the path + is returned as-is without verifying the identity of the file. + path (path-like object, optional): + By default, the downloaded asset is saved in a directory under + :py:func:`torch.hub.get_dir` and intermediate directories based on the given `key` + are created. + This argument can be used to overwrite the target location. + When this argument is provided, all the intermediate directories have to be + created beforehand. + progress (bool): Whether to show progress bar for downloading. Default: ``True``. + + Note: + Currently the valid key values are the route on ``download.pytorch.org/torchaudio``, + but this is an implementation detail. + + Returns: + str: The path to the asset on the local file system. + """ + path = path or _get_local_path(key) + + if path.exists(): + _LG.info("The local file (%s) exists. Skipping the download.", path) + else: + _LG.info("Downloading %s to %s", key, path) + _download(key, path, progress=progress) + + if hash: + _LG.info("Verifying the hash value.") + digest = _get_hash(path, hash) + + if digest != hash: + raise ValueError( + f"The hash value of the downloaded file ({path}), '{digest}' does not match " + f"the provided hash value, '{hash}'." + ) + + _LG.info("Hash validated.") + + return str(path) diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/ffmpeg_utils.py b/venv/lib/python3.10/site-packages/torchaudio/utils/ffmpeg_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..385596edc1491e45ecd4aae14a07b2c0e64ecd22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/utils/ffmpeg_utils.py @@ -0,0 +1,11 @@ +"""Module to change the configuration of FFmpeg libraries (such as libavformat). + +It affects functionalities in :py:mod:`torchaudio.io` (and indirectly :py:func:`torchaudio.load`). +""" + + +# This file is just for BC. +def __getattr__(item): + from torio.utils import ffmpeg_utils + + return getattr(ffmpeg_utils, item) diff --git a/venv/lib/python3.10/site-packages/torchaudio/utils/sox_utils.py b/venv/lib/python3.10/site-packages/torchaudio/utils/sox_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5212b77ea9d5ae0e58741322db7c9852a4ddafff --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/utils/sox_utils.py @@ -0,0 +1,99 @@ +"""Module to change the configuration of libsox, which is used by I/O functions like +:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`. +""" + +from typing import Dict, List + +import torchaudio + +sox_ext = torchaudio._extension.lazy_import_sox_ext() + + +def set_seed(seed: int): + """Set libsox's PRNG + + Args: + seed (int): seed value. valid range is int32. + + See Also: + http://sox.sourceforge.net/sox.html + """ + sox_ext.set_seed(seed) + + +def set_verbosity(verbosity: int): + """Set libsox's verbosity + + Args: + verbosity (int): Set verbosity level of libsox. + + * ``1`` failure messages + * ``2`` warnings + * ``3`` details of processing + * ``4``-``6`` increasing levels of debug messages + + See Also: + http://sox.sourceforge.net/sox.html + """ + sox_ext.set_verbosity(verbosity) + + +def set_buffer_size(buffer_size: int): + """Set buffer size for sox effect chain + + Args: + buffer_size (int): Set the size in bytes of the buffers used for processing audio. + + See Also: + http://sox.sourceforge.net/sox.html + """ + sox_ext.set_buffer_size(buffer_size) + + +def set_use_threads(use_threads: bool): + """Set multithread option for sox effect chain + + Args: + use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing. + To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support. + + See Also: + http://sox.sourceforge.net/sox.html + """ + sox_ext.set_use_threads(use_threads) + + +def list_effects() -> Dict[str, str]: + """List the available sox effect names + + Returns: + Dict[str, str]: Mapping from ``effect name`` to ``usage`` + """ + return dict(sox_ext.list_effects()) + + +def list_read_formats() -> List[str]: + """List the supported audio formats for read + + Returns: + List[str]: List of supported audio formats + """ + return sox_ext.list_read_formats() + + +def list_write_formats() -> List[str]: + """List the supported audio formats for write + + Returns: + List[str]: List of supported audio formats + """ + return sox_ext.list_write_formats() + + +def get_buffer_size() -> int: + """Get buffer size for sox effect chain + + Returns: + int: size in bytes of buffers used for processing audio. + """ + return sox_ext.get_buffer_size() diff --git a/venv/lib/python3.10/site-packages/torchaudio/version.py b/venv/lib/python3.10/site-packages/torchaudio/version.py new file mode 100644 index 0000000000000000000000000000000000000000..497be0ddcb55299abcda5ab5c65a0605f39166d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torchaudio/version.py @@ -0,0 +1,2 @@ +__version__ = '2.6.0+cu124' +git_version = 'd8831425203385077a03c1d92cfbbe3bf2106008' diff --git a/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1de4a67d210059b2ce00534323b0dad953ea10ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/METADATA @@ -0,0 +1,422 @@ +Metadata-Version: 2.1 +Name: typer +Version: 0.16.0 +Summary: Typer, build great CLIs. Easy to code. Based on Python type hints. +Author-Email: =?utf-8?q?Sebasti=C3=A1n_Ram=C3=ADrez?= +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries :: Application Frameworks +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development +Classifier: Typing :: Typed +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: License :: OSI Approved :: MIT License +Project-URL: Homepage, https://github.com/fastapi/typer +Project-URL: Documentation, https://typer.tiangolo.com +Project-URL: Repository, https://github.com/fastapi/typer +Project-URL: Issues, https://github.com/fastapi/typer/issues +Project-URL: Changelog, https://typer.tiangolo.com/release-notes/ +Requires-Python: >=3.7 +Requires-Dist: click>=8.0.0 +Requires-Dist: typing-extensions>=3.7.4.3 +Requires-Dist: shellingham>=1.3.0 +Requires-Dist: rich>=10.11.0 +Description-Content-Type: text/markdown + +

    + Typer + +

    +

    + Typer, build great CLIs. Easy to code. Based on Python type hints. +

    +

    + + Test + + + Publish + + + Coverage + + Package version + +

    + +--- + +**Documentation**: https://typer.tiangolo.com + +**Source Code**: https://github.com/fastapi/typer + +--- + +Typer is a library for building CLI applications that users will **love using** and developers will **love creating**. Based on Python type hints. + +It's also a command line tool to run scripts, automatically converting them to CLI applications. + +The key features are: + +* **Intuitive to write**: Great editor support. Completion everywhere. Less time debugging. Designed to be easy to use and learn. Less time reading docs. +* **Easy to use**: It's easy to use for the final users. Automatic help, and automatic completion for all shells. +* **Short**: Minimize code duplication. Multiple features from each parameter declaration. Fewer bugs. +* **Start simple**: The simplest example adds only 2 lines of code to your app: **1 import, 1 function call**. +* **Grow large**: Grow in complexity as much as you want, create arbitrarily complex trees of commands and groups of subcommands, with options and arguments. +* **Run scripts**: Typer includes a `typer` command/program that you can use to run scripts, automatically converting them to CLIs, even if they don't use Typer internally. + +## FastAPI of CLIs + +**Typer** is FastAPI's little sibling, it's the FastAPI of CLIs. + +## Installation + +Create and activate a virtual environment and then install **Typer**: + +
    + +```console +$ pip install typer +---> 100% +Successfully installed typer rich shellingham +``` + +
    + +## Example + +### The absolute minimum + +* Create a file `main.py` with: + +```Python +def main(name: str): + print(f"Hello {name}") +``` + +This script doesn't even use Typer internally. But you can use the `typer` command to run it as a CLI application. + +### Run it + +Run your application with the `typer` command: + +
    + +```console +// Run your application +$ typer main.py run + +// You get a nice error, you are missing NAME +Usage: typer [PATH_OR_MODULE] run [OPTIONS] NAME +Try 'typer [PATH_OR_MODULE] run --help' for help. +╭─ Error ───────────────────────────────────────────╮ +│ Missing argument 'NAME'. │ +╰───────────────────────────────────────────────────╯ + + +// You get a --help for free +$ typer main.py run --help + +Usage: typer [PATH_OR_MODULE] run [OPTIONS] NAME + +Run the provided Typer app. + +╭─ Arguments ───────────────────────────────────────╮ +│ * name TEXT [default: None] [required] | +╰───────────────────────────────────────────────────╯ +╭─ Options ─────────────────────────────────────────╮ +│ --help Show this message and exit. │ +╰───────────────────────────────────────────────────╯ + +// Now pass the NAME argument +$ typer main.py run Camila + +Hello Camila + +// It works! 🎉 +``` + +
    + +This is the simplest use case, not even using Typer internally, but it can already be quite useful for simple scripts. + +**Note**: auto-completion works when you create a Python package and run it with `--install-completion` or when you use the `typer` command. + +## Use Typer in your code + +Now let's start using Typer in your own code, update `main.py` with: + +```Python +import typer + + +def main(name: str): + print(f"Hello {name}") + + +if __name__ == "__main__": + typer.run(main) +``` + +Now you could run it with Python directly: + +
    + +```console +// Run your application +$ python main.py + +// You get a nice error, you are missing NAME +Usage: main.py [OPTIONS] NAME +Try 'main.py --help' for help. +╭─ Error ───────────────────────────────────────────╮ +│ Missing argument 'NAME'. │ +╰───────────────────────────────────────────────────╯ + + +// You get a --help for free +$ python main.py --help + +Usage: main.py [OPTIONS] NAME + +╭─ Arguments ───────────────────────────────────────╮ +│ * name TEXT [default: None] [required] | +╰───────────────────────────────────────────────────╯ +╭─ Options ─────────────────────────────────────────╮ +│ --help Show this message and exit. │ +╰───────────────────────────────────────────────────╯ + +// Now pass the NAME argument +$ python main.py Camila + +Hello Camila + +// It works! 🎉 +``` + +
    + +**Note**: you can also call this same script with the `typer` command, but you don't need to. + +## Example upgrade + +This was the simplest example possible. + +Now let's see one a bit more complex. + +### An example with two subcommands + +Modify the file `main.py`. + +Create a `typer.Typer()` app, and create two subcommands with their parameters. + +```Python hl_lines="3 6 11 20" +import typer + +app = typer.Typer() + + +@app.command() +def hello(name: str): + print(f"Hello {name}") + + +@app.command() +def goodbye(name: str, formal: bool = False): + if formal: + print(f"Goodbye Ms. {name}. Have a good day.") + else: + print(f"Bye {name}!") + + +if __name__ == "__main__": + app() +``` + +And that will: + +* Explicitly create a `typer.Typer` app. + * The previous `typer.run` actually creates one implicitly for you. +* Add two subcommands with `@app.command()`. +* Execute the `app()` itself, as if it was a function (instead of `typer.run`). + +### Run the upgraded example + +Check the new help: + +
    + +```console +$ python main.py --help + + Usage: main.py [OPTIONS] COMMAND [ARGS]... + +╭─ Options ─────────────────────────────────────────╮ +│ --install-completion Install completion │ +│ for the current │ +│ shell. │ +│ --show-completion Show completion for │ +│ the current shell, │ +│ to copy it or │ +│ customize the │ +│ installation. │ +│ --help Show this message │ +│ and exit. │ +╰───────────────────────────────────────────────────╯ +╭─ Commands ────────────────────────────────────────╮ +│ goodbye │ +│ hello │ +╰───────────────────────────────────────────────────╯ + +// When you create a package you get ✨ auto-completion ✨ for free, installed with --install-completion + +// You have 2 subcommands (the 2 functions): goodbye and hello +``` + +
    + +Now check the help for the `hello` command: + +
    + +```console +$ python main.py hello --help + + Usage: main.py hello [OPTIONS] NAME + +╭─ Arguments ───────────────────────────────────────╮ +│ * name TEXT [default: None] [required] │ +╰───────────────────────────────────────────────────╯ +╭─ Options ─────────────────────────────────────────╮ +│ --help Show this message and exit. │ +╰───────────────────────────────────────────────────╯ +``` + +
    + +And now check the help for the `goodbye` command: + +
    + +```console +$ python main.py goodbye --help + + Usage: main.py goodbye [OPTIONS] NAME + +╭─ Arguments ───────────────────────────────────────╮ +│ * name TEXT [default: None] [required] │ +╰───────────────────────────────────────────────────╯ +╭─ Options ─────────────────────────────────────────╮ +│ --formal --no-formal [default: no-formal] │ +│ --help Show this message │ +│ and exit. │ +╰───────────────────────────────────────────────────╯ + +// Automatic --formal and --no-formal for the bool option 🎉 +``` + +
    + +Now you can try out the new command line application: + +
    + +```console +// Use it with the hello command + +$ python main.py hello Camila + +Hello Camila + +// And with the goodbye command + +$ python main.py goodbye Camila + +Bye Camila! + +// And with --formal + +$ python main.py goodbye --formal Camila + +Goodbye Ms. Camila. Have a good day. +``` + +
    + +### Recap + +In summary, you declare **once** the types of parameters (*CLI arguments* and *CLI options*) as function parameters. + +You do that with standard modern Python types. + +You don't have to learn a new syntax, the methods or classes of a specific library, etc. + +Just standard **Python**. + +For example, for an `int`: + +```Python +total: int +``` + +or for a `bool` flag: + +```Python +force: bool +``` + +And similarly for **files**, **paths**, **enums** (choices), etc. And there are tools to create **groups of subcommands**, add metadata, extra **validation**, etc. + +**You get**: great editor support, including **completion** and **type checks** everywhere. + +**Your users get**: automatic **`--help`**, **auto-completion** in their terminal (Bash, Zsh, Fish, PowerShell) when they install your package or when using the `typer` command. + +For a more complete example including more features, see the Tutorial - User Guide. + +## Dependencies + +**Typer** stands on the shoulders of a giant. Its only internal required dependency is Click. + +By default it also comes with extra standard dependencies: + +* rich: to show nicely formatted errors automatically. +* shellingham: to automatically detect the current shell when installing completion. + * With `shellingham` you can just use `--install-completion`. + * Without `shellingham`, you have to pass the name of the shell to install completion for, e.g. `--install-completion bash`. + +### `typer-slim` + +If you don't want the extra standard optional dependencies, install `typer-slim` instead. + +When you install with: + +```bash +pip install typer +``` + +...it includes the same code and dependencies as: + +```bash +pip install "typer-slim[standard]" +``` + +The `standard` extra dependencies are `rich` and `shellingham`. + +**Note**: The `typer` command is only included in the `typer` package. + +## License + +This project is licensed under the terms of the MIT license. diff --git a/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..47c1f994e54bc3f9952e0b1d4516969c3973b211 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/RECORD @@ -0,0 +1,40 @@ +../../../bin/typer,sha256=8kxw0MOLj9rpjo207S_1STrEQo64orl2C2LV3DdplJ4,278 +typer-0.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +typer-0.16.0.dist-info/METADATA,sha256=4n4FU9l0gR5JhaGE3ERjZn_aHAklo64CkFk_gzI1kdM,15721 +typer-0.16.0.dist-info/RECORD,, +typer-0.16.0.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90 +typer-0.16.0.dist-info/entry_points.txt,sha256=YO13ByiqWeuas9V0JADLUARZFUe_cwU_7wmTNvxBYQ8,57 +typer-0.16.0.dist-info/licenses/LICENSE,sha256=WJks68-N-25AxOIRLtEhJsJDZm3KORKj14t-ysSFnUk,1086 +typer/__init__.py,sha256=D3u-F2ltL-fo4S8GGp0g-OEaovfTSp-W6eAv1jKqBU8,1596 +typer/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30 +typer/__pycache__/__init__.cpython-310.pyc,, +typer/__pycache__/__main__.cpython-310.pyc,, +typer/__pycache__/_completion_classes.cpython-310.pyc,, +typer/__pycache__/_completion_shared.cpython-310.pyc,, +typer/__pycache__/_types.cpython-310.pyc,, +typer/__pycache__/_typing.cpython-310.pyc,, +typer/__pycache__/cli.cpython-310.pyc,, +typer/__pycache__/colors.cpython-310.pyc,, +typer/__pycache__/completion.cpython-310.pyc,, +typer/__pycache__/core.cpython-310.pyc,, +typer/__pycache__/main.cpython-310.pyc,, +typer/__pycache__/models.cpython-310.pyc,, +typer/__pycache__/params.cpython-310.pyc,, +typer/__pycache__/rich_utils.cpython-310.pyc,, +typer/__pycache__/testing.cpython-310.pyc,, +typer/__pycache__/utils.cpython-310.pyc,, +typer/_completion_classes.py,sha256=Px9bV56Y4J_F9S_sUI2iPpUh_i--Chocxrk4HAut2HE,7385 +typer/_completion_shared.py,sha256=4lFOUhXSry2bIZR9al7Uq33itpgSzg6eagquAysNmOE,8758 +typer/_types.py,sha256=kSLxhKmX37YzizQjqYUAWmr_JFcCW5vhEc4YshDTC9Q,1031 +typer/_typing.py,sha256=nsZ-TKcMlGAtqiWXM60r97rqtWMZxdhwj_YkLo8_neM,3001 +typer/cli.py,sha256=YaXpDud7wRtDCsJsWkE1L0BPWhpIHAaPbvZNLTJ854w,9779 +typer/colors.py,sha256=e42j8uB520hLpX5C_0fiR3OOoIFMbhO3ADZvv6hlAV8,430 +typer/completion.py,sha256=d1AiptrsEwUeSPQaIA5oiv4T3Xy3MDq5o3VIjK39Qeg,4810 +typer/core.py,sha256=TsdtoJYOOa4-sppkYybsAbLJ6gN3EJyPSXUJu5ib5rA,26530 +typer/main.py,sha256=Upb2hrXidyzCkSZQ5BdDipG3c4KRU5v7L-fV-9b19Mw,42043 +typer/models.py,sha256=Q6v9BQYutNlH44i7fn0YbZ-OeRXsgib1o_tR4gTmqow,17188 +typer/params.py,sha256=MRVCwRPzNMkOdYU6VNVGkawX_gAoYzbiCfL_tYcR6x8,14929 +typer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +typer/rich_utils.py,sha256=svygTTSUOs94K3r_VNnmMjO1GOXGVa0GqX59k2c77aU,25315 +typer/testing.py,sha256=Mb_HqTkpPw24qsVYxCQrDJpjq_oOHlgqZpauWofxkq0,874 +typer/utils.py,sha256=G0qddDX06YtHuMJNCmj-frLJYkYxfUa7iwO6KOTX2FI,7368 diff --git a/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..45ec8c4e6bf53687a824041168247bae0953dd7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: pdm-backend (2.4.4) +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca44c05a071b87fe02a3724bd895838305fe4e5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +typer = typer.cli:main + +[gui_scripts] + diff --git a/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..a7694736cf37716aafec14b24aa8d6316ebe07a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typer-0.16.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Sebastián Ramírez + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/LICENSE b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4555634d3e51beb788e94ac535d01b6482514a5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/LICENSE @@ -0,0 +1,35 @@ +From xFormers: + +Copyright (c) Facebook, Inc. and its affiliates + + +=== + +BSD 3-Clause License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America + and IDIAP Research Institute nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/METADATA b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..f4630ab4828ce28a9bcb24a601797c4ac523b877 --- /dev/null +++ b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/METADATA @@ -0,0 +1,21 @@ +Metadata-Version: 2.1 +Name: xformers +Version: 0.0.29.post2 +Summary: XFormers: A collection of composable Transformer building blocks. +Home-page: https://facebookresearch.github.io/xformers/ +Author: Facebook AI Research +Author-email: oncall+xformers@xmail.facebook.com +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Operating System :: OS Independent +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: numpy +Requires-Dist: torch==2.6.0 + +XFormers: A collection of composable Transformer building blocks.XFormers aims at being able to reproduce most architectures in the Transformer-family SOTA,defined as compatible and combined building blocks as opposed to monolithic models diff --git a/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/RECORD b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..fb2af744e466f83768d41dbc2e8dce1e056f08d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/RECORD @@ -0,0 +1,317 @@ +xformers-0.0.29.post2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +xformers-0.0.29.post2.dist-info/LICENSE,sha256=iwaVhtHd3wMc1GkkT2Y_GzmnThI1eC5iVcJJQ-ACt-o,1610 +xformers-0.0.29.post2.dist-info/METADATA,sha256=h-twd8OUHZbe5hciqDvhJ6IfS17P4NNTZl6_2CCAcp8,1014 +xformers-0.0.29.post2.dist-info/RECORD,, +xformers-0.0.29.post2.dist-info/WHEEL,sha256=-wdbF42uEDqyK3Oo1Xe0P8Ss8ux7axbac4IJpS3CCHY,114 +xformers-0.0.29.post2.dist-info/top_level.txt,sha256=4Px1VcGhKk0j3XhKXjA8HtTm6EQOb0hazeJ5nQsNlKk,9 +xformers/_C.so,sha256=pS6YN5S_xYPBqI8VLOQuWQvSHKV68ty-K_dHbw70hts,47822552 +xformers/_C_flashattention3.so,sha256=rJOLQPJhxvZraaJZPRwsYYr1-CrR7NBldF5FjQdCbSQ,120689992 +xformers/__init__.py,sha256=TKDA28gOAQdAzwVvU9IEpGWz677QRKgVFmnqvngAg2w,1710 +xformers/__pycache__/__init__.cpython-310.pyc,, +xformers/__pycache__/_cpp_lib.cpython-310.pyc,, +xformers/__pycache__/_deprecation_warning.cpython-310.pyc,, +xformers/__pycache__/attn_bias_utils.cpython-310.pyc,, +xformers/__pycache__/checkpoint.cpython-310.pyc,, +xformers/__pycache__/info.cpython-310.pyc,, +xformers/__pycache__/test.cpython-310.pyc,, +xformers/__pycache__/utils.cpython-310.pyc,, +xformers/__pycache__/version.cpython-310.pyc,, +xformers/_cpp_lib.py,sha256=diLPbor1Tp80qZKvJLeoiYKYRfBf8UnsO2UbaMRuQLI,4958 +xformers/_deprecation_warning.py,sha256=a2-JfNsT7EGLG5ZFarz7GTdnMe6EA3cli4BNfr3Wtog,456 +xformers/_flash_attn/__init__.py,sha256=33Vo6R_5k7y4iJR3Bk8Op3Uzxa0dZc4OdchOtdzzJSE,291 +xformers/_flash_attn/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/bert_padding.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/flash_attn_interface.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/flash_attn_triton.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/flash_attn_triton_og.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/flash_blocksparse_attention.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/flash_blocksparse_attn_interface.cpython-310.pyc,, +xformers/_flash_attn/__pycache__/fused_softmax.cpython-310.pyc,, +xformers/_flash_attn/bert_padding.py,sha256=gF1EmsdJ-HpQ86MRQ4VxDw-Sb_RVISdQALdNnoByHlw,9930 +xformers/_flash_attn/flash_attn_interface.py,sha256=dHIPTJx9uYVyWSkLdUcYu6KxgbnHsP6qkgwbXqJW9jo,59398 +xformers/_flash_attn/flash_attn_triton.py,sha256=Du81zbh8Ls70ExEsm00opziGvjGFfcZCoZDUO2zut9Q,41112 +xformers/_flash_attn/flash_attn_triton_amd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/bench.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/bwd_prefill.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/bwd_ref.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/fwd_decode.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/fwd_prefill.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/fwd_ref.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/interface_fa.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/interface_torch.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/test.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/__pycache__/utils.cpython-310.pyc,, +xformers/_flash_attn/flash_attn_triton_amd/bench.py,sha256=wIGZHcYI_Ria2BB6FBJ87GKRxzboRP1LOf2L__aPeA4,9837 +xformers/_flash_attn/flash_attn_triton_amd/bwd_prefill.py,sha256=iCe1jLP5_osxzEGyMBpLUW9koGezp91A5tsI0o6OQQM,20269 +xformers/_flash_attn/flash_attn_triton_amd/bwd_ref.py,sha256=BL2_4jYjRvUodX4GjgpvyQhVDgm2a6OcnVZJnozEz_A,9972 +xformers/_flash_attn/flash_attn_triton_amd/fwd_decode.py,sha256=vrk6GQqo9Tp8-SWSUzREWftqyokI5BQlTA_AAdmbEYA,23435 +xformers/_flash_attn/flash_attn_triton_amd/fwd_prefill.py,sha256=ynZUspV95iXQ0Ccld3jQ0ZxGuZHWrLWwysDL4FS71R0,32986 +xformers/_flash_attn/flash_attn_triton_amd/fwd_ref.py,sha256=ORf0qC92d3FFqPe0l6VA0gMjc2bVlYwUwF-g4Ilq2ko,11362 +xformers/_flash_attn/flash_attn_triton_amd/interface_fa.py,sha256=7NTNBEsr3RQfylD-0IrQXN_ECryEtR7BlvJFVPz-HDM,16292 +xformers/_flash_attn/flash_attn_triton_amd/interface_torch.py,sha256=DU_iepQ4h5FFvYxM4qDR5eh76l55iUiCLXdGBsc6KYo,3308 +xformers/_flash_attn/flash_attn_triton_amd/test.py,sha256=7jN9tQIRNJ_xpfPHrERU9aeG70OewtQKIYqyRFxed7o,30832 +xformers/_flash_attn/flash_attn_triton_amd/utils.py,sha256=XoKPb0Zzrjbbn7kX_j3YIqqhDW0xIhX4EjzqYvwPmng,12247 +xformers/_flash_attn/flash_attn_triton_og.py,sha256=LmvDju7LJG-wOYhoR6Zc2AmdPK2oWyB1VJpMjRhnWnE,11328 +xformers/_flash_attn/flash_blocksparse_attention.py,sha256=gsdH9VtYaVcTcP1rzZYPy1V_wUqgdvVcsB1h4Mk7RGs,7472 +xformers/_flash_attn/flash_blocksparse_attn_interface.py,sha256=2qK2KvVCt851_j8ZzHvjS-aMfdgVDu1yne67-iScWfo,7265 +xformers/_flash_attn/fused_softmax.py,sha256=0-XbXo7R1a5h4-EpUzPy--lwlGytfTDW34WGM5nmBAY,7793 +xformers/_flash_attn/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/layers/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/layers/__pycache__/patch_embed.cpython-310.pyc,, +xformers/_flash_attn/layers/__pycache__/rotary.cpython-310.pyc,, +xformers/_flash_attn/layers/patch_embed.py,sha256=H58CgME_qSOPTZLOG08wFgrQS1j34pvNwMPrkTj3Ek4,2136 +xformers/_flash_attn/layers/rotary.py,sha256=MqsUZ-Gxa0OcYLtL8OsjHIOkqyTacQHkMGpqADa2e6Q,21239 +xformers/_flash_attn/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/losses/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/losses/__pycache__/cross_entropy.cpython-310.pyc,, +xformers/_flash_attn/losses/cross_entropy.py,sha256=tj5IoeUZuSzA1_82UFr7o-1WuoHyKAc1gVS6fWzAbDQ,3197 +xformers/_flash_attn/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/models/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/baichuan.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/bert.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/bigcode.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/btlm.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/falcon.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/gpt.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/gpt_neox.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/gptj.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/llama.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/opt.cpython-310.pyc,, +xformers/_flash_attn/models/__pycache__/vit.cpython-310.pyc,, +xformers/_flash_attn/models/baichuan.py,sha256=eFNWwoRQ02AIeQP0OoK8pNvYw0dqnHOshLigCQPkAEc,5730 +xformers/_flash_attn/models/bert.py,sha256=dMM6-Pj814pgQdsKkgkwg_grNZ7snM2juSgoUB14R7Q,33232 +xformers/_flash_attn/models/bigcode.py,sha256=mkYeItoJtmWVf2wKkUs5oXjwdbTdGSo5eHxi0-1maZ8,9383 +xformers/_flash_attn/models/btlm.py,sha256=d8YDjYTa2G1DutYu-YuVf15S_Dn6oKn8-HzERoersLA,4631 +xformers/_flash_attn/models/falcon.py,sha256=mA3wGv1a4zhbrUSlFNVVmTgVjiXc1sFTOi55eYpgSPo,6033 +xformers/_flash_attn/models/gpt.py,sha256=QGBMCw_osxD4VMMj1uC6TMlXlM5lIInxSUKmq5J5kSU,47669 +xformers/_flash_attn/models/gpt_neox.py,sha256=_704a9KQ2PcnID8uMV7yZ4ggjGlh1zZH5gszue6D1bI,5159 +xformers/_flash_attn/models/gptj.py,sha256=k2eqMNyMbU7CJVM_BHBjlKt0ByFz6ITSETqS1mJa89g,4436 +xformers/_flash_attn/models/llama.py,sha256=bDRI308iRpeJngZLrQlLTGYAmwYotqzUxnjBMirfn-k,16581 +xformers/_flash_attn/models/opt.py,sha256=L0ZIWKpSP44lcEbiVCzVT9un_5gFMAW6cvnS3KHcb-A,5164 +xformers/_flash_attn/models/vit.py,sha256=7i0WUI_jZvQ5TMoSKPPzf77ZcyMDfDJuQaINzXN_iQU,14074 +xformers/_flash_attn/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/modules/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/modules/__pycache__/block.cpython-310.pyc,, +xformers/_flash_attn/modules/__pycache__/embedding.cpython-310.pyc,, +xformers/_flash_attn/modules/__pycache__/mha.cpython-310.pyc,, +xformers/_flash_attn/modules/__pycache__/mlp.cpython-310.pyc,, +xformers/_flash_attn/modules/block.py,sha256=WLi7JKj9_Zpk89ppzC7WTIoykJJ7TLOJbUSZePNnW1E,17349 +xformers/_flash_attn/modules/embedding.py,sha256=RCVeeiomlGNkLeQD8G6Udvex-NDI_xKD45hXjgZ2lbQ,8693 +xformers/_flash_attn/modules/mha.py,sha256=V6Ynog9pb_G9UVxetRjXlmWGExZlxmJkYVwAExXqUEk,43297 +xformers/_flash_attn/modules/mlp.py,sha256=G6KPQagfKq1DRn7hQRJ3OHznFJLZHj_PiidZE_zcLgg,6033 +xformers/_flash_attn/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/ops/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/ops/__pycache__/activations.cpython-310.pyc,, +xformers/_flash_attn/ops/__pycache__/fused_dense.cpython-310.pyc,, +xformers/_flash_attn/ops/__pycache__/layer_norm.cpython-310.pyc,, +xformers/_flash_attn/ops/__pycache__/rms_norm.cpython-310.pyc,, +xformers/_flash_attn/ops/activations.py,sha256=t5lzNg1In8LP6bKeTnyeMizwqjv27JGbJ6ylPdGvZYg,3939 +xformers/_flash_attn/ops/fused_dense.py,sha256=ACJKqkIfxZibxI3nb5ycb3pXBKaL_CM63rUUyQYNAUE,27907 +xformers/_flash_attn/ops/layer_norm.py,sha256=zr7NXIm-2mtEynTp1CS0fbFGI2Mqdp41dY4AfDWF6EQ,22443 +xformers/_flash_attn/ops/rms_norm.py,sha256=XEnihcj0a4aSz4LO55m5iKGVn4HKTeKN8TIyHjuDgxI,3988 +xformers/_flash_attn/ops/triton/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +xformers/_flash_attn/ops/triton/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/__pycache__/cross_entropy.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/__pycache__/k_activations.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/__pycache__/layer_norm.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/__pycache__/linear.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/__pycache__/mlp.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/__pycache__/rotary.cpython-310.pyc,, +xformers/_flash_attn/ops/triton/cross_entropy.py,sha256=hjSfLhv4cKt-N8hTfpgkGMFdxhs8B4II6VIkEtck8EM,12845 +xformers/_flash_attn/ops/triton/k_activations.py,sha256=-Z3vIyO4JkqBMipKsPvhzmxljtBdIhJCsl_M-_ESqBo,4034 +xformers/_flash_attn/ops/triton/layer_norm.py,sha256=rNJwuijsZ6sKDtKHlkbT0qDzbi6vetVjjibpy9YRHFQ,35715 +xformers/_flash_attn/ops/triton/linear.py,sha256=OtRvKz8xdpl-7v3q_ZTaS9fdBt9XrzMyapgRr50uBbM,20841 +xformers/_flash_attn/ops/triton/mlp.py,sha256=_5lbZJFZg_pXeXYITGt4V_6LkB_yddClB_jt-diCOdw,6068 +xformers/_flash_attn/ops/triton/rotary.py,sha256=WH7tELBLZ23znuxnYUAzP7YWqwMXJmRgUQ8B64Vjdn4,8583 +xformers/_flash_attn/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +xformers/_flash_attn/utils/__pycache__/__init__.cpython-310.pyc,, +xformers/_flash_attn/utils/__pycache__/benchmark.cpython-310.pyc,, +xformers/_flash_attn/utils/__pycache__/distributed.cpython-310.pyc,, +xformers/_flash_attn/utils/__pycache__/generation.cpython-310.pyc,, +xformers/_flash_attn/utils/__pycache__/pretrained.cpython-310.pyc,, +xformers/_flash_attn/utils/benchmark.py,sha256=JDtzdVhFyMIQqs3edbcXdXnmDf-O7RVpmZmn2ZFCvI0,7369 +xformers/_flash_attn/utils/distributed.py,sha256=qhcybRXtslssuV9LYaQy37haPaPtklM4YUMDx9UvnnQ,5825 +xformers/_flash_attn/utils/generation.py,sha256=9IVPvkf_hlbsxCWgECUA03293qHxXzWDFCTAOdqbAVo,30694 +xformers/_flash_attn/utils/pretrained.py,sha256=VZ6qk90sBJA7M86gRzPsNc_CkQXkj5HyrJvwl0I355k,3246 +xformers/attn_bias_utils.py,sha256=2MD_FqAf7Xci5W7jbeu6E3IYxqj0-4q6aWSXA_qzhko,19178 +xformers/benchmarks/LRA/__init__.py,sha256=BBoPvMfJjZ0Fi25JCRS8sPYUfLbHneLncV3JeGDIGHg,198 +xformers/benchmarks/LRA/__pycache__/__init__.cpython-310.pyc,, +xformers/benchmarks/LRA/__pycache__/batch_fetch_results.cpython-310.pyc,, +xformers/benchmarks/LRA/__pycache__/batch_submit.cpython-310.pyc,, +xformers/benchmarks/LRA/__pycache__/run_grid_search.cpython-310.pyc,, +xformers/benchmarks/LRA/__pycache__/run_tasks.cpython-310.pyc,, +xformers/benchmarks/LRA/__pycache__/run_with_submitit.cpython-310.pyc,, +xformers/benchmarks/LRA/batch_fetch_results.py,sha256=QsTgzFU45VP636TPiewnVCKD57bOR_YkjZq2KjMLzqc,3508 +xformers/benchmarks/LRA/batch_submit.py,sha256=7BgzEgSCJNN4vaQ8m-hEY2x5DxSkaPzp_Jorr2fS5p4,1685 +xformers/benchmarks/LRA/code/__init__.py,sha256=BBoPvMfJjZ0Fi25JCRS8sPYUfLbHneLncV3JeGDIGHg,198 +xformers/benchmarks/LRA/code/__pycache__/__init__.cpython-310.pyc,, +xformers/benchmarks/LRA/code/__pycache__/dataset.cpython-310.pyc,, +xformers/benchmarks/LRA/code/__pycache__/model_wrapper.cpython-310.pyc,, +xformers/benchmarks/LRA/code/dataset.py,sha256=6iMOhcM8rY75Ub_t3fopBxGjo1zCWwSIRAC3eHqAsIs,1403 +xformers/benchmarks/LRA/code/model_wrapper.py,sha256=18YKbVAASeoaGZqhM-Ps3scvblLbVoImHWKJqY7CQjw,9777 +xformers/benchmarks/LRA/run_grid_search.py,sha256=yLhNnQv4HDbEdQVApCdJiZzFes9VeGZjPOafHAaZ5Ew,5327 +xformers/benchmarks/LRA/run_tasks.py,sha256=GZP6cUgE1ftpQrp2IOf-MWbYJFcokt9bsLA8Q6PNyIU,9294 +xformers/benchmarks/LRA/run_with_submitit.py,sha256=WUtQJLYWnVVcp6mbmBQuqiA3pdvayEpZzqEpW5FZRz0,4612 +xformers/benchmarks/__init__.py,sha256=BBoPvMfJjZ0Fi25JCRS8sPYUfLbHneLncV3JeGDIGHg,198 +xformers/benchmarks/__pycache__/__init__.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_attn_decoding.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_core.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_indexing.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_mem_eff_attention.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_merge_attentions.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_nystrom_utils.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_revnet.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_sddmm.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_sequence_parallel_fused.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_sp24.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_swiglu.cpython-310.pyc,, +xformers/benchmarks/__pycache__/benchmark_tiled_matmul.cpython-310.pyc,, +xformers/benchmarks/__pycache__/utils.cpython-310.pyc,, +xformers/benchmarks/benchmark_attn_decoding.py,sha256=Jp45OCalf0Wfk8WZ1y8NBqyccup9lSnFs9R1Oq6Gjqg,12368 +xformers/benchmarks/benchmark_core.py,sha256=83x0wl7ouGqRgECQiRE3kzTXQwXHP3uWpQHvqxJWBlw,8626 +xformers/benchmarks/benchmark_indexing.py,sha256=W55qQHBuwlEp98X3XzoCrlHQg7NF00b9gc52_qDzhSE,5288 +xformers/benchmarks/benchmark_mem_eff_attention.py,sha256=HG8mfTCy1oFOsnOOQZ8zOGrOWotRTFU0fVJ4d4_xn0M,10101 +xformers/benchmarks/benchmark_merge_attentions.py,sha256=_6iSEhJGoeljSrVbOdi-tncYHPQNE7lj1wrTQlubSLo,3042 +xformers/benchmarks/benchmark_nystrom_utils.py,sha256=mUML4twJT1LCoCwtvS_3isfcHDS4XTH18leB_kix-OY,3230 +xformers/benchmarks/benchmark_revnet.py,sha256=qG7SxXpoJ-bi5cajWukyft7qVprmVfqqtx9PRQqIdI8,2671 +xformers/benchmarks/benchmark_sddmm.py,sha256=Tvq1rgXpeaj7kGjCKn_6pX2QA7TOt_aNG-nLCYa9_Eo,3765 +xformers/benchmarks/benchmark_sequence_parallel_fused.py,sha256=-Ob3G3c0xTJs--bw5yZQ203UqY1maOtKM3K5ZCDB6tw,14454 +xformers/benchmarks/benchmark_sp24.py,sha256=xcp-cR8t7U5WGfvbDoZeDUoTLXD-w7vVQerfd2kybVw,4861 +xformers/benchmarks/benchmark_swiglu.py,sha256=UD6tmg4JaPYct6KLwqnE_7UsFewKY_GpgV8XZMa1hFM,4295 +xformers/benchmarks/benchmark_tiled_matmul.py,sha256=oJW_-b1AkXEN1J6h35DVDzvZIjvDFMYld1KWq8h7Twk,3446 +xformers/benchmarks/utils.py,sha256=PSLBq4P26wNZqcIoo78afIDoqTDpJsJsS6-Fdus5zgE,24507 +xformers/checkpoint.py,sha256=PUYtKs1QE-3ZKoBfWhIT-aFHMD_uYJq4ZBNePE-Ug6o,20337 +xformers/components/__init__.py,sha256=YYnOMdsb2l8qdNR09HFFTu-wf34-y6g2G_nFb7oxaac,1004 +xformers/components/__pycache__/__init__.cpython-310.pyc,, +xformers/components/__pycache__/input_projection.cpython-310.pyc,, +xformers/components/__pycache__/residual.cpython-310.pyc,, +xformers/components/attention/__init__.py,sha256=ssO2FRcK9wALgkMc_H0QUujFfZjIZtYLImKPmmLpC4s,3311 +xformers/components/attention/__pycache__/__init__.cpython-310.pyc,, +xformers/components/attention/__pycache__/_sputnik_sparse.cpython-310.pyc,, +xformers/components/attention/__pycache__/attention_mask.cpython-310.pyc,, +xformers/components/attention/__pycache__/attention_patterns.cpython-310.pyc,, +xformers/components/attention/__pycache__/base.cpython-310.pyc,, +xformers/components/attention/__pycache__/core.cpython-310.pyc,, +xformers/components/attention/__pycache__/fourier_mix.cpython-310.pyc,, +xformers/components/attention/__pycache__/scaled_dot_product.cpython-310.pyc,, +xformers/components/attention/__pycache__/sparsity_config.cpython-310.pyc,, +xformers/components/attention/__pycache__/utils.cpython-310.pyc,, +xformers/components/attention/_sputnik_sparse.py,sha256=bibk2S-Coatt45gO9fzLTaAgUMKXbRi5U0eR_-QAbnw,3164 +xformers/components/attention/attention_mask.py,sha256=Y0zyM5JDnFzfE0TGPx4BxEeQlqIPX-XnZpO_lskQn4w,4585 +xformers/components/attention/attention_patterns.py,sha256=mZ-s8R-DS6XrOP_-ExZZku7XWVKUOEVfUdHpB9D5kek,9945 +xformers/components/attention/base.py,sha256=X8qmru_ln4eSac68rnYZzfAF3pbQDz5l6UxfRE-pR6Y,3305 +xformers/components/attention/core.py,sha256=KqkGcrwVsZmlovA0yQV3MKPAbGIoKx2AkyBlfxM3xHE,7641 +xformers/components/attention/fourier_mix.py,sha256=AKMjFgmSBomER9A8c9mbuERjnLR9KAcMafawRJ0eCko,1184 +xformers/components/attention/scaled_dot_product.py,sha256=vUcRVzQT3ol6hIqrTcCudlaCru1W1ZcZgjaTIkl7Lr4,4504 +xformers/components/attention/sparsity_config.py,sha256=lZLGeKVcqnKVGKqkjE5Av2yGIHB5zZvU9IwrR3UrvZo,41608 +xformers/components/attention/utils.py,sha256=04aEVvDG9LjzD9NfsQ3Kj-kZSnMFCppCFrJx6NPoV8g,3803 +xformers/components/input_projection.py,sha256=7oH2EfhMsujvjpXxIHzg5A3gT5xA0vJ2trb2-pZoh3g,3121 +xformers/components/residual.py,sha256=jTAnAytB9dZoXv-xEw9NBL2NnSqo3C68q9jlOpfS04I,5693 +xformers/cpp_lib.json,sha256=zeS6TFc3Tny7dRXY7SMm26hn_wa_uJGSYrznaBMeFhs,396 +xformers/info.py,sha256=EzzBOTz7vp1OUPDlFT5yBSs3ItqAzMhfK975bv-A2FM,2672 +xformers/ops/__init__.py,sha256=3JYxy2MquvYUcdoZzSez-dTX257Gth7430QjW1xex7g,3502 +xformers/ops/__pycache__/__init__.cpython-310.pyc,, +xformers/ops/__pycache__/common.cpython-310.pyc,, +xformers/ops/__pycache__/differentiable_collectives.cpython-310.pyc,, +xformers/ops/__pycache__/indexing.cpython-310.pyc,, +xformers/ops/__pycache__/ipc.cpython-310.pyc,, +xformers/ops/__pycache__/modpar_layers.cpython-310.pyc,, +xformers/ops/__pycache__/rmsnorm.cpython-310.pyc,, +xformers/ops/__pycache__/rope_padded.cpython-310.pyc,, +xformers/ops/__pycache__/seqpar.cpython-310.pyc,, +xformers/ops/__pycache__/sequence_parallel_fused_ops.cpython-310.pyc,, +xformers/ops/__pycache__/sp24.cpython-310.pyc,, +xformers/ops/__pycache__/swiglu_op.cpython-310.pyc,, +xformers/ops/__pycache__/tiled_matmul.cpython-310.pyc,, +xformers/ops/__pycache__/unbind.cpython-310.pyc,, +xformers/ops/_triton/__init__.py,sha256=G7u-HQ1NEf-mlR3LN5qupDMVCHHA94H1S_aXoStfKOI,741 +xformers/ops/_triton/__pycache__/__init__.cpython-310.pyc,, +xformers/ops/_triton/__pycache__/k_index_select_cat.cpython-310.pyc,, +xformers/ops/_triton/__pycache__/k_scaled_index_add.cpython-310.pyc,, +xformers/ops/_triton/__pycache__/matmul_perf_model.cpython-310.pyc,, +xformers/ops/_triton/__pycache__/rmsnorm_kernels.cpython-310.pyc,, +xformers/ops/_triton/__pycache__/rope_padded_kernels.cpython-310.pyc,, +xformers/ops/_triton/__pycache__/tiled_matmul_kernels.cpython-310.pyc,, +xformers/ops/_triton/k_index_select_cat.py,sha256=_gFAuuUbKd_CH0Awkc_sspxE0lmd4Y-gLndLu3G3brA,6194 +xformers/ops/_triton/k_scaled_index_add.py,sha256=KCEgK-9s_A1jdEVvBn84GlA4py7ZBrQSuxWyYaJG_jc,12802 +xformers/ops/_triton/matmul_perf_model.py,sha256=FpuoFYqUsm0zCn0G6htd1kAri-GhShCuj_MZPvdHXp4,8385 +xformers/ops/_triton/rmsnorm_kernels.py,sha256=2aLv_WHvVebn2HMvLLfIxLcHIScgWsMtz9PtdjUZLDU,5124 +xformers/ops/_triton/rope_padded_kernels.py,sha256=L9QqbmfGYUPMMmVkKvN08OG28BmIDL3aCXsNbhkl4bw,7221 +xformers/ops/_triton/tiled_matmul_kernels.py,sha256=p1G-VMcJnthKc71Bc4cE7Hul66U5Dy_-ur2YnlJ2BC4,13832 +xformers/ops/common.py,sha256=bgeQP7DxTkWhj_xiM-GUZ7QBoxRzg-VY3NfEOns5hhs,1911 +xformers/ops/differentiable_collectives.py,sha256=moSee9BpQDlfXgEq_P3IqxYcF9NjJGi8jbge_cJ9yGI,5356 +xformers/ops/fmha/__init__.py,sha256=EtZAYIbQBkiPWPCWuKiQpe2f3oSG7uGX8JyDoF9jH8I,30491 +xformers/ops/fmha/__pycache__/__init__.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/attn_bias.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/ck.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/ck_decoder.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/ck_splitk.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/common.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/cutlass.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/dispatch.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/flash.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/flash3.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/torch_attention_compat.cpython-310.pyc,, +xformers/ops/fmha/__pycache__/triton_splitk.cpython-310.pyc,, +xformers/ops/fmha/_triton/__init__.py,sha256=BBoPvMfJjZ0Fi25JCRS8sPYUfLbHneLncV3JeGDIGHg,198 +xformers/ops/fmha/_triton/__pycache__/__init__.cpython-310.pyc,, +xformers/ops/fmha/_triton/__pycache__/splitk_kernels.cpython-310.pyc,, +xformers/ops/fmha/_triton/splitk_kernels.py,sha256=2aGCZ6UftNykO-XQBSxUZYce1MzQA0xhSGAE7jqH2AM,41033 +xformers/ops/fmha/attn_bias.py,sha256=gtiyxPr2cVmpcUGRo-8u3KSY8jsVVuQL5ASsyeS_SrI,65449 +xformers/ops/fmha/ck.py,sha256=LrHNuPTsF9DJZqFIKYPMw-4El_ORH3hCmVfmpZMcjvs,17742 +xformers/ops/fmha/ck_decoder.py,sha256=3Lm0DA4TYgBu7KmlKEntnaDSL21iv__d3-z-BH5RAB4,5092 +xformers/ops/fmha/ck_splitk.py,sha256=4_kVxgiKM0YBUsQaWV071I6y4kDNmNore47rAZ_i-oc,6411 +xformers/ops/fmha/common.py,sha256=pw-pcpErmjfHJxL6LgxLm2hWTtn6UbVkvD4JrTia9MI,18579 +xformers/ops/fmha/cutlass.py,sha256=2QRon36JqrXth0pAixPsslYRIKTpVPnkWzXQinCOBbw,17148 +xformers/ops/fmha/dispatch.py,sha256=CLhYpRPS0vQOolffKlm3OlpLM1EJwwSgnaAxS3ZZK7A,6186 +xformers/ops/fmha/flash.py,sha256=mXTBqK7YXBqhoB8eqW_jARbRBgtW2MYqX9Ovas0lguY,28496 +xformers/ops/fmha/flash3.py,sha256=MaQ0S-ZxaIrAS0TATyrEwBOU-SFYC6kWTgU4sOvpZB0,17829 +xformers/ops/fmha/torch_attention_compat.py,sha256=n02uHdad4M7xwO2k3lDTs40vbQ0WfKskZ-DSdy7G914,5745 +xformers/ops/fmha/triton_splitk.py,sha256=8bqMisrKRfncHFr8WkNRigQuSu-oFtGup9ru2fG5Orw,36605 +xformers/ops/indexing.py,sha256=uQDU1rqgh-huDvOdtMekgSQSMXVndrC0n3vn8CFHNrI,6927 +xformers/ops/ipc.py,sha256=Q5YRa5G1LrAbm8syGZiSBzPvMqQEvR-gqML6t_18G-0,6649 +xformers/ops/modpar_layers.py,sha256=gOg2cAn3G-xhp1bQTM33dA1adQc5bmZp6v-jF4bh_sM,5703 +xformers/ops/rmsnorm.py,sha256=ofNCqraOJZBt_z1WoTMuW5HgfCr2wpJ_zkL_b0e-n2s,3358 +xformers/ops/rope_padded.py,sha256=bI6JCNHO0Rggp9oYW23KV4ONsoPqk4tGT3EGejqRZvY,12100 +xformers/ops/seqpar.py,sha256=dTNVh1DPAYJeOP-dmopGEuulQQ9WjLvo7BU0B7Y8zcU,12063 +xformers/ops/sequence_parallel_fused_ops.py,sha256=UVaJf0onPork4jw5LAVqa0-h-jryu0jpLdwevhQuSog,36973 +xformers/ops/sp24.py,sha256=MUPa8Klm1pqijH4Dm3CCRWJSDxOOEJkm2bjwHzG5UM4,27125 +xformers/ops/swiglu_op.py,sha256=mhb1wn7pDKlGs2BXzB7n4pr12djwik-rCjrxwRK8xEw,17327 +xformers/ops/tiled_matmul.py,sha256=mzGdPYDKIBwgwitOOo0R6ld_Q5wtm4mHawElx3bMPqY,12792 +xformers/ops/unbind.py,sha256=KWEif28duVWbzj14bL-O3jJTxMuTZPvuGqCkZQhsgYQ,3925 +xformers/profiler/__init__.py,sha256=4jTj_MGJ-HkCkHwkx6QneBm27dMMPxvNa6kLwro86bk,421 +xformers/profiler/__pycache__/__init__.cpython-310.pyc,, +xformers/profiler/__pycache__/api.cpython-310.pyc,, +xformers/profiler/__pycache__/device_limits.cpython-310.pyc,, +xformers/profiler/__pycache__/find_slowest.cpython-310.pyc,, +xformers/profiler/__pycache__/profile_analyzer.cpython-310.pyc,, +xformers/profiler/__pycache__/profiler.cpython-310.pyc,, +xformers/profiler/__pycache__/profiler_dcgm.cpython-310.pyc,, +xformers/profiler/__pycache__/profiler_dcgm_impl.cpython-310.pyc,, +xformers/profiler/api.py,sha256=t-Cp6VqSEBk7Nm0zChnpJSIAKCMuY7VoYn9mySna7xQ,2685 +xformers/profiler/device_limits.py,sha256=HzPTO6bHMk7HxHelSh3A0Bk8qyuxL3-SsFYGBXdjT6c,3588 +xformers/profiler/find_slowest.py,sha256=o5Kky0Lrdz5ea_5CMJE8NX-EFyaMylwBNVLREdU8GEc,5087 +xformers/profiler/profile_analyzer.py,sha256=v1TKzsE9lm5kkFoCLCXbmc3XMSG_7QD_J3QbeqmyYLc,8717 +xformers/profiler/profiler.py,sha256=sD_9eoeKttMBVfqhODw5LIzp4MYFNx8t7xC4qp5hAYw,12756 +xformers/profiler/profiler_dcgm.py,sha256=1NWMET7oVtUoBYhEFoQ-F17s1B8xmqcrL3Hjddry-H0,1153 +xformers/profiler/profiler_dcgm_impl.py,sha256=umLRJQCYOIyYHwFsyXDk9eNmbs4DUmuvAwe6tUahN6k,8472 +xformers/sparse/__init__.py,sha256=NpDMLawDg0ra3mR7LpNnkua8KOpeY6h2AL4or88rGws,317 +xformers/sparse/__pycache__/__init__.cpython-310.pyc,, +xformers/sparse/__pycache__/_csr_ops.cpython-310.pyc,, +xformers/sparse/__pycache__/blocksparse_tensor.cpython-310.pyc,, +xformers/sparse/__pycache__/csr_tensor.cpython-310.pyc,, +xformers/sparse/__pycache__/utils.cpython-310.pyc,, +xformers/sparse/_csr_ops.py,sha256=p8bGD91Y52LuYieY97ZCtlOpYFNnnPRjXDuBoK5_Bp0,4873 +xformers/sparse/blocksparse_tensor.py,sha256=5f1iIqrK_R0cza1anOIJo4CkaMIUo1WbMlJ0LjPbd8s,8894 +xformers/sparse/csr_tensor.py,sha256=TZbVB4aP0WVG2Gr8gB3orkPbNWdjeGxktxYzHAvkYfc,14191 +xformers/sparse/utils.py,sha256=yxugnW0FLK7J4nc3xTPlRMcyVVQE7xwUcgt47kmjP-o,4274 +xformers/test.py,sha256=BBoPvMfJjZ0Fi25JCRS8sPYUfLbHneLncV3JeGDIGHg,198 +xformers/triton/__init__.py,sha256=BBoPvMfJjZ0Fi25JCRS8sPYUfLbHneLncV3JeGDIGHg,198 +xformers/triton/__pycache__/__init__.cpython-310.pyc,, +xformers/triton/__pycache__/importing.cpython-310.pyc,, +xformers/triton/__pycache__/vararg_kernel.cpython-310.pyc,, +xformers/triton/importing.py,sha256=-gHQG6er7JP4Z88mA5DJL5j6CT2mtdSFd989ZOWiWBk,908 +xformers/triton/vararg_kernel.py,sha256=O7_HmdejlNKIdG_bO2i8aHToqLNwrJuqNHOGqarwzu4,8956 +xformers/utils.py,sha256=eCX-EDLXtb0pOvQAeQVwC8LiMqeBiX2NEyb89jqEVas,5064 +xformers/version.py,sha256=n75bWVQoJa0gsi-54tbbkZFmIIXCoXDrss-RtaHUNos,42 diff --git a/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..c187dac15f0537f2f00b44ab9a7831ee9477fee1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.45.1) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_28_x86_64 + diff --git a/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..56e3c822be24e7529619f4406fbfb2ad6dfae3fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/xformers-0.0.29.post2.dist-info/top_level.txt @@ -0,0 +1 @@ +xformers